PostgreSQL Source Code git master
trigger.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * trigger.c
4 * PostgreSQL TRIGGERs support code.
5 *
6 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 * IDENTIFICATION
10 * src/backend/commands/trigger.c
11 *
12 *-------------------------------------------------------------------------
13 */
14#include "postgres.h"
15
16#include "access/genam.h"
17#include "access/htup_details.h"
18#include "access/relation.h"
19#include "access/sysattr.h"
20#include "access/table.h"
21#include "access/tableam.h"
22#include "access/xact.h"
23#include "catalog/catalog.h"
24#include "catalog/dependency.h"
25#include "catalog/indexing.h"
27#include "catalog/partition.h"
29#include "catalog/pg_inherits.h"
30#include "catalog/pg_proc.h"
31#include "catalog/pg_trigger.h"
32#include "catalog/pg_type.h"
33#include "commands/dbcommands.h"
34#include "commands/trigger.h"
35#include "executor/executor.h"
36#include "miscadmin.h"
37#include "nodes/bitmapset.h"
38#include "nodes/makefuncs.h"
39#include "optimizer/optimizer.h"
40#include "parser/parse_clause.h"
42#include "parser/parse_func.h"
45#include "pgstat.h"
48#include "storage/lmgr.h"
49#include "utils/acl.h"
50#include "utils/builtins.h"
51#include "utils/fmgroids.h"
52#include "utils/guc_hooks.h"
53#include "utils/inval.h"
54#include "utils/lsyscache.h"
55#include "utils/memutils.h"
56#include "utils/plancache.h"
57#include "utils/rel.h"
58#include "utils/snapmgr.h"
59#include "utils/syscache.h"
60#include "utils/tuplestore.h"
61
62
63/* GUC variables */
65
66/* How many levels deep into trigger execution are we? */
67static int MyTriggerDepth = 0;
68
69/* Local function prototypes */
70static void renametrig_internal(Relation tgrel, Relation targetrel,
71 HeapTuple trigtup, const char *newname,
72 const char *expected_name);
73static void renametrig_partition(Relation tgrel, Oid partitionId,
74 Oid parentTriggerOid, const char *newname,
75 const char *expected_name);
76static void SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger);
77static bool GetTupleForTrigger(EState *estate,
78 EPQState *epqstate,
79 ResultRelInfo *relinfo,
80 ItemPointer tid,
81 LockTupleMode lockmode,
82 TupleTableSlot *oldslot,
83 TupleTableSlot **epqslot,
84 TM_Result *tmresultp,
85 TM_FailureData *tmfdp);
86static bool TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
87 Trigger *trigger, TriggerEvent event,
88 Bitmapset *modifiedCols,
89 TupleTableSlot *oldslot, TupleTableSlot *newslot);
91 int tgindx,
92 FmgrInfo *finfo,
93 Instrumentation *instr,
94 MemoryContext per_tuple_context);
95static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
96 ResultRelInfo *src_partinfo,
97 ResultRelInfo *dst_partinfo,
98 int event, bool row_trigger,
99 TupleTableSlot *oldslot, TupleTableSlot *newslot,
100 List *recheckIndexes, Bitmapset *modifiedCols,
101 TransitionCaptureState *transition_capture,
102 bool is_crosspart_update);
103static void AfterTriggerEnlargeQueryState(void);
104static bool before_stmt_triggers_fired(Oid relid, CmdType cmdType);
106
107
108/*
109 * Create a trigger. Returns the address of the created trigger.
110 *
111 * queryString is the source text of the CREATE TRIGGER command.
112 * This must be supplied if a whenClause is specified, else it can be NULL.
113 *
114 * relOid, if nonzero, is the relation on which the trigger should be
115 * created. If zero, the name provided in the statement will be looked up.
116 *
117 * refRelOid, if nonzero, is the relation to which the constraint trigger
118 * refers. If zero, the constraint relation name provided in the statement
119 * will be looked up as needed.
120 *
121 * constraintOid, if nonzero, says that this trigger is being created
122 * internally to implement that constraint. A suitable pg_depend entry will
123 * be made to link the trigger to that constraint. constraintOid is zero when
124 * executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT
125 * TRIGGER, we build a pg_constraint entry internally.)
126 *
127 * indexOid, if nonzero, is the OID of an index associated with the constraint.
128 * We do nothing with this except store it into pg_trigger.tgconstrindid;
129 * but when creating a trigger for a deferrable unique constraint on a
130 * partitioned table, its children are looked up. Note we don't cope with
131 * invalid indexes in that case.
132 *
133 * funcoid, if nonzero, is the OID of the function to invoke. When this is
134 * given, stmt->funcname is ignored.
135 *
136 * parentTriggerOid, if nonzero, is a trigger that begets this one; so that
137 * if that trigger is dropped, this one should be too. There are two cases
138 * when a nonzero value is passed for this: 1) when this function recurses to
139 * create the trigger on partitions, 2) when creating child foreign key
140 * triggers; see CreateFKCheckTrigger() and createForeignKeyActionTriggers().
141 *
142 * If whenClause is passed, it is an already-transformed expression for
143 * WHEN. In this case, we ignore any that may come in stmt->whenClause.
144 *
145 * If isInternal is true then this is an internally-generated trigger.
146 * This argument sets the tgisinternal field of the pg_trigger entry, and
147 * if true causes us to modify the given trigger name to ensure uniqueness.
148 *
149 * When isInternal is not true we require ACL_TRIGGER permissions on the
150 * relation, as well as ACL_EXECUTE on the trigger function. For internal
151 * triggers the caller must apply any required permission checks.
152 *
153 * When called on partitioned tables, this function recurses to create the
154 * trigger on all the partitions, except if isInternal is true, in which
155 * case caller is expected to execute recursion on its own. in_partition
156 * indicates such a recursive call; outside callers should pass "false"
157 * (but see CloneRowTriggersToPartition).
158 */
160CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
161 Oid relOid, Oid refRelOid, Oid constraintOid, Oid indexOid,
162 Oid funcoid, Oid parentTriggerOid, Node *whenClause,
163 bool isInternal, bool in_partition)
164{
165 return
166 CreateTriggerFiringOn(stmt, queryString, relOid, refRelOid,
167 constraintOid, indexOid, funcoid,
168 parentTriggerOid, whenClause, isInternal,
169 in_partition, TRIGGER_FIRES_ON_ORIGIN);
170}
171
172/*
173 * Like the above; additionally the firing condition
174 * (always/origin/replica/disabled) can be specified.
175 */
177CreateTriggerFiringOn(CreateTrigStmt *stmt, const char *queryString,
178 Oid relOid, Oid refRelOid, Oid constraintOid,
179 Oid indexOid, Oid funcoid, Oid parentTriggerOid,
180 Node *whenClause, bool isInternal, bool in_partition,
181 char trigger_fires_when)
182{
183 int16 tgtype;
184 int ncolumns;
185 int16 *columns;
186 int2vector *tgattr;
187 List *whenRtable;
188 char *qual;
189 Datum values[Natts_pg_trigger];
190 bool nulls[Natts_pg_trigger];
191 Relation rel;
192 AclResult aclresult;
193 Relation tgrel;
194 Relation pgrel;
195 HeapTuple tuple = NULL;
196 Oid funcrettype;
197 Oid trigoid = InvalidOid;
198 char internaltrigname[NAMEDATALEN];
199 char *trigname;
200 Oid constrrelid = InvalidOid;
201 ObjectAddress myself,
202 referenced;
203 char *oldtablename = NULL;
204 char *newtablename = NULL;
205 bool partition_recurse;
206 bool trigger_exists = false;
207 Oid existing_constraint_oid = InvalidOid;
208 bool existing_isInternal = false;
209 bool existing_isClone = false;
210
211 if (OidIsValid(relOid))
212 rel = table_open(relOid, ShareRowExclusiveLock);
213 else
214 rel = table_openrv(stmt->relation, ShareRowExclusiveLock);
215
216 /*
217 * Triggers must be on tables or views, and there are additional
218 * relation-type-specific restrictions.
219 */
220 if (rel->rd_rel->relkind == RELKIND_RELATION)
221 {
222 /* Tables can't have INSTEAD OF triggers */
223 if (stmt->timing != TRIGGER_TYPE_BEFORE &&
224 stmt->timing != TRIGGER_TYPE_AFTER)
226 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
227 errmsg("\"%s\" is a table",
229 errdetail("Tables cannot have INSTEAD OF triggers.")));
230 }
231 else if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
232 {
233 /* Partitioned tables can't have INSTEAD OF triggers */
234 if (stmt->timing != TRIGGER_TYPE_BEFORE &&
235 stmt->timing != TRIGGER_TYPE_AFTER)
237 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
238 errmsg("\"%s\" is a table",
240 errdetail("Tables cannot have INSTEAD OF triggers.")));
241
242 /*
243 * FOR EACH ROW triggers have further restrictions
244 */
245 if (stmt->row)
246 {
247 /*
248 * Disallow use of transition tables.
249 *
250 * Note that we have another restriction about transition tables
251 * in partitions; search for 'has_superclass' below for an
252 * explanation. The check here is just to protect from the fact
253 * that if we allowed it here, the creation would succeed for a
254 * partitioned table with no partitions, but would be blocked by
255 * the other restriction when the first partition was created,
256 * which is very unfriendly behavior.
257 */
258 if (stmt->transitionRels != NIL)
260 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
261 errmsg("\"%s\" is a partitioned table",
263 errdetail("ROW triggers with transition tables are not supported on partitioned tables.")));
264 }
265 }
266 else if (rel->rd_rel->relkind == RELKIND_VIEW)
267 {
268 /*
269 * Views can have INSTEAD OF triggers (which we check below are
270 * row-level), or statement-level BEFORE/AFTER triggers.
271 */
272 if (stmt->timing != TRIGGER_TYPE_INSTEAD && stmt->row)
274 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
275 errmsg("\"%s\" is a view",
277 errdetail("Views cannot have row-level BEFORE or AFTER triggers.")));
278 /* Disallow TRUNCATE triggers on VIEWs */
279 if (TRIGGER_FOR_TRUNCATE(stmt->events))
281 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
282 errmsg("\"%s\" is a view",
284 errdetail("Views cannot have TRUNCATE triggers.")));
285 }
286 else if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
287 {
288 if (stmt->timing != TRIGGER_TYPE_BEFORE &&
289 stmt->timing != TRIGGER_TYPE_AFTER)
291 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
292 errmsg("\"%s\" is a foreign table",
294 errdetail("Foreign tables cannot have INSTEAD OF triggers.")));
295
296 /*
297 * We disallow constraint triggers to protect the assumption that
298 * triggers on FKs can't be deferred. See notes with AfterTriggers
299 * data structures, below.
300 */
301 if (stmt->isconstraint)
303 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
304 errmsg("\"%s\" is a foreign table",
306 errdetail("Foreign tables cannot have constraint triggers.")));
307 }
308 else
310 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
311 errmsg("relation \"%s\" cannot have triggers",
314
317 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
318 errmsg("permission denied: \"%s\" is a system catalog",
320
321 if (stmt->isconstraint)
322 {
323 /*
324 * We must take a lock on the target relation to protect against
325 * concurrent drop. It's not clear that AccessShareLock is strong
326 * enough, but we certainly need at least that much... otherwise, we
327 * might end up creating a pg_constraint entry referencing a
328 * nonexistent table.
329 */
330 if (OidIsValid(refRelOid))
331 {
333 constrrelid = refRelOid;
334 }
335 else if (stmt->constrrel != NULL)
336 constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock,
337 false);
338 }
339
340 /* permission checks */
341 if (!isInternal)
342 {
343 aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
345 if (aclresult != ACLCHECK_OK)
346 aclcheck_error(aclresult, get_relkind_objtype(rel->rd_rel->relkind),
348
349 if (OidIsValid(constrrelid))
350 {
351 aclresult = pg_class_aclcheck(constrrelid, GetUserId(),
353 if (aclresult != ACLCHECK_OK)
354 aclcheck_error(aclresult, get_relkind_objtype(get_rel_relkind(constrrelid)),
355 get_rel_name(constrrelid));
356 }
357 }
358
359 /*
360 * When called on a partitioned table to create a FOR EACH ROW trigger
361 * that's not internal, we create one trigger for each partition, too.
362 *
363 * For that, we'd better hold lock on all of them ahead of time.
364 */
365 partition_recurse = !isInternal && stmt->row &&
366 rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE;
367 if (partition_recurse)
369 ShareRowExclusiveLock, NULL));
370
371 /* Compute tgtype */
372 TRIGGER_CLEAR_TYPE(tgtype);
373 if (stmt->row)
374 TRIGGER_SETT_ROW(tgtype);
375 tgtype |= stmt->timing;
376 tgtype |= stmt->events;
377
378 /* Disallow ROW-level TRUNCATE triggers */
379 if (TRIGGER_FOR_ROW(tgtype) && TRIGGER_FOR_TRUNCATE(tgtype))
381 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
382 errmsg("TRUNCATE FOR EACH ROW triggers are not supported")));
383
384 /* INSTEAD triggers must be row-level, and can't have WHEN or columns */
385 if (TRIGGER_FOR_INSTEAD(tgtype))
386 {
387 if (!TRIGGER_FOR_ROW(tgtype))
389 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
390 errmsg("INSTEAD OF triggers must be FOR EACH ROW")));
391 if (stmt->whenClause)
393 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
394 errmsg("INSTEAD OF triggers cannot have WHEN conditions")));
395 if (stmt->columns != NIL)
397 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
398 errmsg("INSTEAD OF triggers cannot have column lists")));
399 }
400
401 /*
402 * We don't yet support naming ROW transition variables, but the parser
403 * recognizes the syntax so we can give a nicer message here.
404 *
405 * Per standard, REFERENCING TABLE names are only allowed on AFTER
406 * triggers. Per standard, REFERENCING ROW names are not allowed with FOR
407 * EACH STATEMENT. Per standard, each OLD/NEW, ROW/TABLE permutation is
408 * only allowed once. Per standard, OLD may not be specified when
409 * creating a trigger only for INSERT, and NEW may not be specified when
410 * creating a trigger only for DELETE.
411 *
412 * Notice that the standard allows an AFTER ... FOR EACH ROW trigger to
413 * reference both ROW and TABLE transition data.
414 */
415 if (stmt->transitionRels != NIL)
416 {
417 List *varList = stmt->transitionRels;
418 ListCell *lc;
419
420 foreach(lc, varList)
421 {
423
424 if (!(tt->isTable))
426 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
427 errmsg("ROW variable naming in the REFERENCING clause is not supported"),
428 errhint("Use OLD TABLE or NEW TABLE for naming transition tables.")));
429
430 /*
431 * Because of the above test, we omit further ROW-related testing
432 * below. If we later allow naming OLD and NEW ROW variables,
433 * adjustments will be needed below.
434 */
435
436 if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
438 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
439 errmsg("\"%s\" is a foreign table",
441 errdetail("Triggers on foreign tables cannot have transition tables.")));
442
443 if (rel->rd_rel->relkind == RELKIND_VIEW)
445 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
446 errmsg("\"%s\" is a view",
448 errdetail("Triggers on views cannot have transition tables.")));
449
450 /*
451 * We currently don't allow row-level triggers with transition
452 * tables on partition or inheritance children. Such triggers
453 * would somehow need to see tuples converted to the format of the
454 * table they're attached to, and it's not clear which subset of
455 * tuples each child should see. See also the prohibitions in
456 * ATExecAttachPartition() and ATExecAddInherit().
457 */
458 if (TRIGGER_FOR_ROW(tgtype) && has_superclass(rel->rd_id))
459 {
460 /* Use appropriate error message. */
461 if (rel->rd_rel->relispartition)
463 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
464 errmsg("ROW triggers with transition tables are not supported on partitions")));
465 else
467 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
468 errmsg("ROW triggers with transition tables are not supported on inheritance children")));
469 }
470
471 if (stmt->timing != TRIGGER_TYPE_AFTER)
473 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
474 errmsg("transition table name can only be specified for an AFTER trigger")));
475
476 if (TRIGGER_FOR_TRUNCATE(tgtype))
478 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
479 errmsg("TRUNCATE triggers with transition tables are not supported")));
480
481 /*
482 * We currently don't allow multi-event triggers ("INSERT OR
483 * UPDATE") with transition tables, because it's not clear how to
484 * handle INSERT ... ON CONFLICT statements which can fire both
485 * INSERT and UPDATE triggers. We show the inserted tuples to
486 * INSERT triggers and the updated tuples to UPDATE triggers, but
487 * it's not yet clear what INSERT OR UPDATE trigger should see.
488 * This restriction could be lifted if we can decide on the right
489 * semantics in a later release.
490 */
491 if (((TRIGGER_FOR_INSERT(tgtype) ? 1 : 0) +
492 (TRIGGER_FOR_UPDATE(tgtype) ? 1 : 0) +
493 (TRIGGER_FOR_DELETE(tgtype) ? 1 : 0)) != 1)
495 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
496 errmsg("transition tables cannot be specified for triggers with more than one event")));
497
498 /*
499 * We currently don't allow column-specific triggers with
500 * transition tables. Per spec, that seems to require
501 * accumulating separate transition tables for each combination of
502 * columns, which is a lot of work for a rather marginal feature.
503 */
504 if (stmt->columns != NIL)
506 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
507 errmsg("transition tables cannot be specified for triggers with column lists")));
508
509 /*
510 * We disallow constraint triggers with transition tables, to
511 * protect the assumption that such triggers can't be deferred.
512 * See notes with AfterTriggers data structures, below.
513 *
514 * Currently this is enforced by the grammar, so just Assert here.
515 */
516 Assert(!stmt->isconstraint);
517
518 if (tt->isNew)
519 {
520 if (!(TRIGGER_FOR_INSERT(tgtype) ||
521 TRIGGER_FOR_UPDATE(tgtype)))
523 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
524 errmsg("NEW TABLE can only be specified for an INSERT or UPDATE trigger")));
525
526 if (newtablename != NULL)
528 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
529 errmsg("NEW TABLE cannot be specified multiple times")));
530
531 newtablename = tt->name;
532 }
533 else
534 {
535 if (!(TRIGGER_FOR_DELETE(tgtype) ||
536 TRIGGER_FOR_UPDATE(tgtype)))
538 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
539 errmsg("OLD TABLE can only be specified for a DELETE or UPDATE trigger")));
540
541 if (oldtablename != NULL)
543 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
544 errmsg("OLD TABLE cannot be specified multiple times")));
545
546 oldtablename = tt->name;
547 }
548 }
549
550 if (newtablename != NULL && oldtablename != NULL &&
551 strcmp(newtablename, oldtablename) == 0)
553 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
554 errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
555 }
556
557 /*
558 * Parse the WHEN clause, if any and we weren't passed an already
559 * transformed one.
560 *
561 * Note that as a side effect, we fill whenRtable when parsing. If we got
562 * an already parsed clause, this does not occur, which is what we want --
563 * no point in adding redundant dependencies below.
564 */
565 if (!whenClause && stmt->whenClause)
566 {
567 ParseState *pstate;
568 ParseNamespaceItem *nsitem;
569 List *varList;
570 ListCell *lc;
571
572 /* Set up a pstate to parse with */
573 pstate = make_parsestate(NULL);
574 pstate->p_sourcetext = queryString;
575
576 /*
577 * Set up nsitems for OLD and NEW references.
578 *
579 * 'OLD' must always have varno equal to 1 and 'NEW' equal to 2.
580 */
581 nsitem = addRangeTableEntryForRelation(pstate, rel,
583 makeAlias("old", NIL),
584 false, false);
585 addNSItemToQuery(pstate, nsitem, false, true, true);
586 nsitem = addRangeTableEntryForRelation(pstate, rel,
588 makeAlias("new", NIL),
589 false, false);
590 addNSItemToQuery(pstate, nsitem, false, true, true);
591
592 /* Transform expression. Copy to be sure we don't modify original */
593 whenClause = transformWhereClause(pstate,
594 copyObject(stmt->whenClause),
596 "WHEN");
597 /* we have to fix its collations too */
598 assign_expr_collations(pstate, whenClause);
599
600 /*
601 * Check for disallowed references to OLD/NEW.
602 *
603 * NB: pull_var_clause is okay here only because we don't allow
604 * subselects in WHEN clauses; it would fail to examine the contents
605 * of subselects.
606 */
607 varList = pull_var_clause(whenClause, 0);
608 foreach(lc, varList)
609 {
610 Var *var = (Var *) lfirst(lc);
611
612 switch (var->varno)
613 {
614 case PRS2_OLD_VARNO:
615 if (!TRIGGER_FOR_ROW(tgtype))
617 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
618 errmsg("statement trigger's WHEN condition cannot reference column values"),
619 parser_errposition(pstate, var->location)));
620 if (TRIGGER_FOR_INSERT(tgtype))
622 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
623 errmsg("INSERT trigger's WHEN condition cannot reference OLD values"),
624 parser_errposition(pstate, var->location)));
625 /* system columns are okay here */
626 break;
627 case PRS2_NEW_VARNO:
628 if (!TRIGGER_FOR_ROW(tgtype))
630 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
631 errmsg("statement trigger's WHEN condition cannot reference column values"),
632 parser_errposition(pstate, var->location)));
633 if (TRIGGER_FOR_DELETE(tgtype))
635 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
636 errmsg("DELETE trigger's WHEN condition cannot reference NEW values"),
637 parser_errposition(pstate, var->location)));
638 if (var->varattno < 0 && TRIGGER_FOR_BEFORE(tgtype))
640 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
641 errmsg("BEFORE trigger's WHEN condition cannot reference NEW system columns"),
642 parser_errposition(pstate, var->location)));
643 if (TRIGGER_FOR_BEFORE(tgtype) &&
644 var->varattno == 0 &&
645 RelationGetDescr(rel)->constr &&
646 (RelationGetDescr(rel)->constr->has_generated_stored ||
647 RelationGetDescr(rel)->constr->has_generated_virtual))
649 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
650 errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
651 errdetail("A whole-row reference is used and the table contains generated columns."),
652 parser_errposition(pstate, var->location)));
653 if (TRIGGER_FOR_BEFORE(tgtype) &&
654 var->varattno > 0 &&
655 TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attgenerated)
657 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
658 errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
659 errdetail("Column \"%s\" is a generated column.",
660 NameStr(TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attname)),
661 parser_errposition(pstate, var->location)));
662 break;
663 default:
664 /* can't happen without add_missing_from, so just elog */
665 elog(ERROR, "trigger WHEN condition cannot contain references to other relations");
666 break;
667 }
668 }
669
670 /* we'll need the rtable for recordDependencyOnExpr */
671 whenRtable = pstate->p_rtable;
672
673 qual = nodeToString(whenClause);
674
675 free_parsestate(pstate);
676 }
677 else if (!whenClause)
678 {
679 whenClause = NULL;
680 whenRtable = NIL;
681 qual = NULL;
682 }
683 else
684 {
685 qual = nodeToString(whenClause);
686 whenRtable = NIL;
687 }
688
689 /*
690 * Find and validate the trigger function.
691 */
692 if (!OidIsValid(funcoid))
693 funcoid = LookupFuncName(stmt->funcname, 0, NULL, false);
694 if (!isInternal)
695 {
696 aclresult = object_aclcheck(ProcedureRelationId, funcoid, GetUserId(), ACL_EXECUTE);
697 if (aclresult != ACLCHECK_OK)
699 NameListToString(stmt->funcname));
700 }
701 funcrettype = get_func_rettype(funcoid);
702 if (funcrettype != TRIGGEROID)
704 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
705 errmsg("function %s must return type %s",
706 NameListToString(stmt->funcname), "trigger")));
707
708 /*
709 * Scan pg_trigger to see if there is already a trigger of the same name.
710 * Skip this for internally generated triggers, since we'll modify the
711 * name to be unique below.
712 *
713 * NOTE that this is cool only because we have ShareRowExclusiveLock on
714 * the relation, so the trigger set won't be changing underneath us.
715 */
716 tgrel = table_open(TriggerRelationId, RowExclusiveLock);
717 if (!isInternal)
718 {
719 ScanKeyData skeys[2];
720 SysScanDesc tgscan;
721
722 ScanKeyInit(&skeys[0],
723 Anum_pg_trigger_tgrelid,
724 BTEqualStrategyNumber, F_OIDEQ,
726
727 ScanKeyInit(&skeys[1],
728 Anum_pg_trigger_tgname,
729 BTEqualStrategyNumber, F_NAMEEQ,
730 CStringGetDatum(stmt->trigname));
731
732 tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
733 NULL, 2, skeys);
734
735 /* There should be at most one matching tuple */
736 if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
737 {
738 Form_pg_trigger oldtrigger = (Form_pg_trigger) GETSTRUCT(tuple);
739
740 trigoid = oldtrigger->oid;
741 existing_constraint_oid = oldtrigger->tgconstraint;
742 existing_isInternal = oldtrigger->tgisinternal;
743 existing_isClone = OidIsValid(oldtrigger->tgparentid);
744 trigger_exists = true;
745 /* copy the tuple to use in CatalogTupleUpdate() */
746 tuple = heap_copytuple(tuple);
747 }
748 systable_endscan(tgscan);
749 }
750
751 if (!trigger_exists)
752 {
753 /* Generate the OID for the new trigger. */
754 trigoid = GetNewOidWithIndex(tgrel, TriggerOidIndexId,
755 Anum_pg_trigger_oid);
756 }
757 else
758 {
759 /*
760 * If OR REPLACE was specified, we'll replace the old trigger;
761 * otherwise complain about the duplicate name.
762 */
763 if (!stmt->replace)
766 errmsg("trigger \"%s\" for relation \"%s\" already exists",
767 stmt->trigname, RelationGetRelationName(rel))));
768
769 /*
770 * An internal trigger or a child trigger (isClone) cannot be replaced
771 * by a user-defined trigger. However, skip this test when
772 * in_partition, because then we're recursing from a partitioned table
773 * and the check was made at the parent level.
774 */
775 if ((existing_isInternal || existing_isClone) &&
776 !isInternal && !in_partition)
779 errmsg("trigger \"%s\" for relation \"%s\" is an internal or a child trigger",
780 stmt->trigname, RelationGetRelationName(rel))));
781
782 /*
783 * It is not allowed to replace with a constraint trigger; gram.y
784 * should have enforced this already.
785 */
786 Assert(!stmt->isconstraint);
787
788 /*
789 * It is not allowed to replace an existing constraint trigger,
790 * either. (The reason for these restrictions is partly that it seems
791 * difficult to deal with pending trigger events in such cases, and
792 * partly that the command might imply changing the constraint's
793 * properties as well, which doesn't seem nice.)
794 */
795 if (OidIsValid(existing_constraint_oid))
798 errmsg("trigger \"%s\" for relation \"%s\" is a constraint trigger",
799 stmt->trigname, RelationGetRelationName(rel))));
800 }
801
802 /*
803 * If it's a user-entered CREATE CONSTRAINT TRIGGER command, make a
804 * corresponding pg_constraint entry.
805 */
806 if (stmt->isconstraint && !OidIsValid(constraintOid))
807 {
808 /* Internal callers should have made their own constraints */
809 Assert(!isInternal);
810 constraintOid = CreateConstraintEntry(stmt->trigname,
812 CONSTRAINT_TRIGGER,
813 stmt->deferrable,
814 stmt->initdeferred,
815 true, /* Is Enforced */
816 true,
817 InvalidOid, /* no parent */
818 RelationGetRelid(rel),
819 NULL, /* no conkey */
820 0,
821 0,
822 InvalidOid, /* no domain */
823 InvalidOid, /* no index */
824 InvalidOid, /* no foreign key */
825 NULL,
826 NULL,
827 NULL,
828 NULL,
829 0,
830 ' ',
831 ' ',
832 NULL,
833 0,
834 ' ',
835 NULL, /* no exclusion */
836 NULL, /* no check constraint */
837 NULL,
838 true, /* islocal */
839 0, /* inhcount */
840 true, /* noinherit */
841 false, /* conperiod */
842 isInternal); /* is_internal */
843 }
844
845 /*
846 * If trigger is internally generated, modify the provided trigger name to
847 * ensure uniqueness by appending the trigger OID. (Callers will usually
848 * supply a simple constant trigger name in these cases.)
849 */
850 if (isInternal)
851 {
852 snprintf(internaltrigname, sizeof(internaltrigname),
853 "%s_%u", stmt->trigname, trigoid);
854 trigname = internaltrigname;
855 }
856 else
857 {
858 /* user-defined trigger; use the specified trigger name as-is */
859 trigname = stmt->trigname;
860 }
861
862 /*
863 * Build the new pg_trigger tuple.
864 */
865 memset(nulls, false, sizeof(nulls));
866
867 values[Anum_pg_trigger_oid - 1] = ObjectIdGetDatum(trigoid);
868 values[Anum_pg_trigger_tgrelid - 1] = ObjectIdGetDatum(RelationGetRelid(rel));
869 values[Anum_pg_trigger_tgparentid - 1] = ObjectIdGetDatum(parentTriggerOid);
870 values[Anum_pg_trigger_tgname - 1] = DirectFunctionCall1(namein,
871 CStringGetDatum(trigname));
872 values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
873 values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
874 values[Anum_pg_trigger_tgenabled - 1] = trigger_fires_when;
875 values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal);
876 values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid);
877 values[Anum_pg_trigger_tgconstrindid - 1] = ObjectIdGetDatum(indexOid);
878 values[Anum_pg_trigger_tgconstraint - 1] = ObjectIdGetDatum(constraintOid);
879 values[Anum_pg_trigger_tgdeferrable - 1] = BoolGetDatum(stmt->deferrable);
880 values[Anum_pg_trigger_tginitdeferred - 1] = BoolGetDatum(stmt->initdeferred);
881
882 if (stmt->args)
883 {
884 ListCell *le;
885 char *args;
886 int16 nargs = list_length(stmt->args);
887 int len = 0;
888
889 foreach(le, stmt->args)
890 {
891 char *ar = strVal(lfirst(le));
892
893 len += strlen(ar) + 4;
894 for (; *ar; ar++)
895 {
896 if (*ar == '\\')
897 len++;
898 }
899 }
900 args = (char *) palloc(len + 1);
901 args[0] = '\0';
902 foreach(le, stmt->args)
903 {
904 char *s = strVal(lfirst(le));
905 char *d = args + strlen(args);
906
907 while (*s)
908 {
909 if (*s == '\\')
910 *d++ = '\\';
911 *d++ = *s++;
912 }
913 strcpy(d, "\\000");
914 }
915 values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs);
916 values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
918 }
919 else
920 {
921 values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(0);
922 values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
923 CStringGetDatum(""));
924 }
925
926 /* build column number array if it's a column-specific trigger */
927 ncolumns = list_length(stmt->columns);
928 if (ncolumns == 0)
929 columns = NULL;
930 else
931 {
932 ListCell *cell;
933 int i = 0;
934
935 columns = (int16 *) palloc(ncolumns * sizeof(int16));
936 foreach(cell, stmt->columns)
937 {
938 char *name = strVal(lfirst(cell));
940 int j;
941
942 /* Lookup column name. System columns are not allowed */
943 attnum = attnameAttNum(rel, name, false);
946 (errcode(ERRCODE_UNDEFINED_COLUMN),
947 errmsg("column \"%s\" of relation \"%s\" does not exist",
949
950 /* Check for duplicates */
951 for (j = i - 1; j >= 0; j--)
952 {
953 if (columns[j] == attnum)
955 (errcode(ERRCODE_DUPLICATE_COLUMN),
956 errmsg("column \"%s\" specified more than once",
957 name)));
958 }
959
960 columns[i++] = attnum;
961 }
962 }
963 tgattr = buildint2vector(columns, ncolumns);
964 values[Anum_pg_trigger_tgattr - 1] = PointerGetDatum(tgattr);
965
966 /* set tgqual if trigger has WHEN clause */
967 if (qual)
968 values[Anum_pg_trigger_tgqual - 1] = CStringGetTextDatum(qual);
969 else
970 nulls[Anum_pg_trigger_tgqual - 1] = true;
971
972 if (oldtablename)
973 values[Anum_pg_trigger_tgoldtable - 1] = DirectFunctionCall1(namein,
974 CStringGetDatum(oldtablename));
975 else
976 nulls[Anum_pg_trigger_tgoldtable - 1] = true;
977 if (newtablename)
978 values[Anum_pg_trigger_tgnewtable - 1] = DirectFunctionCall1(namein,
979 CStringGetDatum(newtablename));
980 else
981 nulls[Anum_pg_trigger_tgnewtable - 1] = true;
982
983 /*
984 * Insert or replace tuple in pg_trigger.
985 */
986 if (!trigger_exists)
987 {
988 tuple = heap_form_tuple(tgrel->rd_att, values, nulls);
989 CatalogTupleInsert(tgrel, tuple);
990 }
991 else
992 {
993 HeapTuple newtup;
994
995 newtup = heap_form_tuple(tgrel->rd_att, values, nulls);
996 CatalogTupleUpdate(tgrel, &tuple->t_self, newtup);
997 heap_freetuple(newtup);
998 }
999
1000 heap_freetuple(tuple); /* free either original or new tuple */
1002
1003 pfree(DatumGetPointer(values[Anum_pg_trigger_tgname - 1]));
1004 pfree(DatumGetPointer(values[Anum_pg_trigger_tgargs - 1]));
1005 pfree(DatumGetPointer(values[Anum_pg_trigger_tgattr - 1]));
1006 if (oldtablename)
1007 pfree(DatumGetPointer(values[Anum_pg_trigger_tgoldtable - 1]));
1008 if (newtablename)
1009 pfree(DatumGetPointer(values[Anum_pg_trigger_tgnewtable - 1]));
1010
1011 /*
1012 * Update relation's pg_class entry; if necessary; and if not, send an SI
1013 * message to make other backends (and this one) rebuild relcache entries.
1014 */
1015 pgrel = table_open(RelationRelationId, RowExclusiveLock);
1016 tuple = SearchSysCacheCopy1(RELOID,
1018 if (!HeapTupleIsValid(tuple))
1019 elog(ERROR, "cache lookup failed for relation %u",
1020 RelationGetRelid(rel));
1021 if (!((Form_pg_class) GETSTRUCT(tuple))->relhastriggers)
1022 {
1023 ((Form_pg_class) GETSTRUCT(tuple))->relhastriggers = true;
1024
1025 CatalogTupleUpdate(pgrel, &tuple->t_self, tuple);
1026
1028 }
1029 else
1031
1032 heap_freetuple(tuple);
1034
1035 /*
1036 * If we're replacing a trigger, flush all the old dependencies before
1037 * recording new ones.
1038 */
1039 if (trigger_exists)
1040 deleteDependencyRecordsFor(TriggerRelationId, trigoid, true);
1041
1042 /*
1043 * Record dependencies for trigger. Always place a normal dependency on
1044 * the function.
1045 */
1046 myself.classId = TriggerRelationId;
1047 myself.objectId = trigoid;
1048 myself.objectSubId = 0;
1049
1050 referenced.classId = ProcedureRelationId;
1051 referenced.objectId = funcoid;
1052 referenced.objectSubId = 0;
1053 recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
1054
1055 if (isInternal && OidIsValid(constraintOid))
1056 {
1057 /*
1058 * Internally-generated trigger for a constraint, so make it an
1059 * internal dependency of the constraint. We can skip depending on
1060 * the relation(s), as there'll be an indirect dependency via the
1061 * constraint.
1062 */
1063 referenced.classId = ConstraintRelationId;
1064 referenced.objectId = constraintOid;
1065 referenced.objectSubId = 0;
1066 recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL);
1067 }
1068 else
1069 {
1070 /*
1071 * User CREATE TRIGGER, so place dependencies. We make trigger be
1072 * auto-dropped if its relation is dropped or if the FK relation is
1073 * dropped. (Auto drop is compatible with our pre-7.3 behavior.)
1074 */
1075 referenced.classId = RelationRelationId;
1076 referenced.objectId = RelationGetRelid(rel);
1077 referenced.objectSubId = 0;
1078 recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
1079
1080 if (OidIsValid(constrrelid))
1081 {
1082 referenced.classId = RelationRelationId;
1083 referenced.objectId = constrrelid;
1084 referenced.objectSubId = 0;
1085 recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
1086 }
1087 /* Not possible to have an index dependency in this case */
1088 Assert(!OidIsValid(indexOid));
1089
1090 /*
1091 * If it's a user-specified constraint trigger, make the constraint
1092 * internally dependent on the trigger instead of vice versa.
1093 */
1094 if (OidIsValid(constraintOid))
1095 {
1096 referenced.classId = ConstraintRelationId;
1097 referenced.objectId = constraintOid;
1098 referenced.objectSubId = 0;
1099 recordDependencyOn(&referenced, &myself, DEPENDENCY_INTERNAL);
1100 }
1101
1102 /*
1103 * If it's a partition trigger, create the partition dependencies.
1104 */
1105 if (OidIsValid(parentTriggerOid))
1106 {
1107 ObjectAddressSet(referenced, TriggerRelationId, parentTriggerOid);
1108 recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_PRI);
1109 ObjectAddressSet(referenced, RelationRelationId, RelationGetRelid(rel));
1110 recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_SEC);
1111 }
1112 }
1113
1114 /* If column-specific trigger, add normal dependencies on columns */
1115 if (columns != NULL)
1116 {
1117 int i;
1118
1119 referenced.classId = RelationRelationId;
1120 referenced.objectId = RelationGetRelid(rel);
1121 for (i = 0; i < ncolumns; i++)
1122 {
1123 referenced.objectSubId = columns[i];
1124 recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
1125 }
1126 }
1127
1128 /*
1129 * If it has a WHEN clause, add dependencies on objects mentioned in the
1130 * expression (eg, functions, as well as any columns used).
1131 */
1132 if (whenRtable != NIL)
1133 recordDependencyOnExpr(&myself, whenClause, whenRtable,
1135
1136 /* Post creation hook for new trigger */
1137 InvokeObjectPostCreateHookArg(TriggerRelationId, trigoid, 0,
1138 isInternal);
1139
1140 /*
1141 * Lastly, create the trigger on child relations, if needed.
1142 */
1143 if (partition_recurse)
1144 {
1145 PartitionDesc partdesc = RelationGetPartitionDesc(rel, true);
1146 int i;
1147 MemoryContext oldcxt,
1148 perChildCxt;
1149
1151 "part trig clone",
1153
1154 /*
1155 * We don't currently expect to be called with a valid indexOid. If
1156 * that ever changes then we'll need to write code here to find the
1157 * corresponding child index.
1158 */
1159 Assert(!OidIsValid(indexOid));
1160
1161 oldcxt = MemoryContextSwitchTo(perChildCxt);
1162
1163 /* Iterate to create the trigger on each existing partition */
1164 for (i = 0; i < partdesc->nparts; i++)
1165 {
1166 CreateTrigStmt *childStmt;
1167 Relation childTbl;
1168 Node *qual;
1169
1170 childTbl = table_open(partdesc->oids[i], ShareRowExclusiveLock);
1171
1172 /*
1173 * Initialize our fabricated parse node by copying the original
1174 * one, then resetting fields that we pass separately.
1175 */
1176 childStmt = copyObject(stmt);
1177 childStmt->funcname = NIL;
1178 childStmt->whenClause = NULL;
1179
1180 /* If there is a WHEN clause, create a modified copy of it */
1181 qual = copyObject(whenClause);
1182 qual = (Node *)
1184 childTbl, rel);
1185 qual = (Node *)
1187 childTbl, rel);
1188
1189 CreateTriggerFiringOn(childStmt, queryString,
1190 partdesc->oids[i], refRelOid,
1192 funcoid, trigoid, qual,
1193 isInternal, true, trigger_fires_when);
1194
1195 table_close(childTbl, NoLock);
1196
1197 MemoryContextReset(perChildCxt);
1198 }
1199
1200 MemoryContextSwitchTo(oldcxt);
1201 MemoryContextDelete(perChildCxt);
1202 }
1203
1204 /* Keep lock on target rel until end of xact */
1205 table_close(rel, NoLock);
1206
1207 return myself;
1208}
1209
1210/*
1211 * TriggerSetParentTrigger
1212 * Set a partition's trigger as child of its parent trigger,
1213 * or remove the linkage if parentTrigId is InvalidOid.
1214 *
1215 * This updates the constraint's pg_trigger row to show it as inherited, and
1216 * adds PARTITION dependencies to prevent the trigger from being deleted
1217 * on its own. Alternatively, reverse that.
1218 */
1219void
1221 Oid childTrigId,
1222 Oid parentTrigId,
1223 Oid childTableId)
1224{
1225 SysScanDesc tgscan;
1226 ScanKeyData skey[1];
1227 Form_pg_trigger trigForm;
1228 HeapTuple tuple,
1229 newtup;
1230 ObjectAddress depender;
1231 ObjectAddress referenced;
1232
1233 /*
1234 * Find the trigger to delete.
1235 */
1236 ScanKeyInit(&skey[0],
1237 Anum_pg_trigger_oid,
1238 BTEqualStrategyNumber, F_OIDEQ,
1239 ObjectIdGetDatum(childTrigId));
1240
1241 tgscan = systable_beginscan(trigRel, TriggerOidIndexId, true,
1242 NULL, 1, skey);
1243
1244 tuple = systable_getnext(tgscan);
1245 if (!HeapTupleIsValid(tuple))
1246 elog(ERROR, "could not find tuple for trigger %u", childTrigId);
1247 newtup = heap_copytuple(tuple);
1248 trigForm = (Form_pg_trigger) GETSTRUCT(newtup);
1249 if (OidIsValid(parentTrigId))
1250 {
1251 /* don't allow setting parent for a constraint that already has one */
1252 if (OidIsValid(trigForm->tgparentid))
1253 elog(ERROR, "trigger %u already has a parent trigger",
1254 childTrigId);
1255
1256 trigForm->tgparentid = parentTrigId;
1257
1258 CatalogTupleUpdate(trigRel, &tuple->t_self, newtup);
1259
1260 ObjectAddressSet(depender, TriggerRelationId, childTrigId);
1261
1262 ObjectAddressSet(referenced, TriggerRelationId, parentTrigId);
1263 recordDependencyOn(&depender, &referenced, DEPENDENCY_PARTITION_PRI);
1264
1265 ObjectAddressSet(referenced, RelationRelationId, childTableId);
1266 recordDependencyOn(&depender, &referenced, DEPENDENCY_PARTITION_SEC);
1267 }
1268 else
1269 {
1270 trigForm->tgparentid = InvalidOid;
1271
1272 CatalogTupleUpdate(trigRel, &tuple->t_self, newtup);
1273
1274 deleteDependencyRecordsForClass(TriggerRelationId, childTrigId,
1275 TriggerRelationId,
1277 deleteDependencyRecordsForClass(TriggerRelationId, childTrigId,
1278 RelationRelationId,
1280 }
1281
1282 heap_freetuple(newtup);
1283 systable_endscan(tgscan);
1284}
1285
1286
1287/*
1288 * Guts of trigger deletion.
1289 */
1290void
1292{
1293 Relation tgrel;
1294 SysScanDesc tgscan;
1295 ScanKeyData skey[1];
1296 HeapTuple tup;
1297 Oid relid;
1298 Relation rel;
1299
1300 tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1301
1302 /*
1303 * Find the trigger to delete.
1304 */
1305 ScanKeyInit(&skey[0],
1306 Anum_pg_trigger_oid,
1307 BTEqualStrategyNumber, F_OIDEQ,
1308 ObjectIdGetDatum(trigOid));
1309
1310 tgscan = systable_beginscan(tgrel, TriggerOidIndexId, true,
1311 NULL, 1, skey);
1312
1313 tup = systable_getnext(tgscan);
1314 if (!HeapTupleIsValid(tup))
1315 elog(ERROR, "could not find tuple for trigger %u", trigOid);
1316
1317 /*
1318 * Open and exclusive-lock the relation the trigger belongs to.
1319 */
1320 relid = ((Form_pg_trigger) GETSTRUCT(tup))->tgrelid;
1321
1322 rel = table_open(relid, AccessExclusiveLock);
1323
1324 if (rel->rd_rel->relkind != RELKIND_RELATION &&
1325 rel->rd_rel->relkind != RELKIND_VIEW &&
1326 rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE &&
1327 rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
1328 ereport(ERROR,
1329 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1330 errmsg("relation \"%s\" cannot have triggers",
1332 errdetail_relkind_not_supported(rel->rd_rel->relkind)));
1333
1335 ereport(ERROR,
1336 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1337 errmsg("permission denied: \"%s\" is a system catalog",
1339
1340 /*
1341 * Delete the pg_trigger tuple.
1342 */
1343 CatalogTupleDelete(tgrel, &tup->t_self);
1344
1345 systable_endscan(tgscan);
1347
1348 /*
1349 * We do not bother to try to determine whether any other triggers remain,
1350 * which would be needed in order to decide whether it's safe to clear the
1351 * relation's relhastriggers. (In any case, there might be a concurrent
1352 * process adding new triggers.) Instead, just force a relcache inval to
1353 * make other backends (and this one too!) rebuild their relcache entries.
1354 * There's no great harm in leaving relhastriggers true even if there are
1355 * no triggers left.
1356 */
1358
1359 /* Keep lock on trigger's rel until end of xact */
1360 table_close(rel, NoLock);
1361}
1362
1363/*
1364 * get_trigger_oid - Look up a trigger by name to find its OID.
1365 *
1366 * If missing_ok is false, throw an error if trigger not found. If
1367 * true, just return InvalidOid.
1368 */
1369Oid
1370get_trigger_oid(Oid relid, const char *trigname, bool missing_ok)
1371{
1372 Relation tgrel;
1373 ScanKeyData skey[2];
1374 SysScanDesc tgscan;
1375 HeapTuple tup;
1376 Oid oid;
1377
1378 /*
1379 * Find the trigger, verify permissions, set up object address
1380 */
1381 tgrel = table_open(TriggerRelationId, AccessShareLock);
1382
1383 ScanKeyInit(&skey[0],
1384 Anum_pg_trigger_tgrelid,
1385 BTEqualStrategyNumber, F_OIDEQ,
1386 ObjectIdGetDatum(relid));
1387 ScanKeyInit(&skey[1],
1388 Anum_pg_trigger_tgname,
1389 BTEqualStrategyNumber, F_NAMEEQ,
1390 CStringGetDatum(trigname));
1391
1392 tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1393 NULL, 2, skey);
1394
1395 tup = systable_getnext(tgscan);
1396
1397 if (!HeapTupleIsValid(tup))
1398 {
1399 if (!missing_ok)
1400 ereport(ERROR,
1401 (errcode(ERRCODE_UNDEFINED_OBJECT),
1402 errmsg("trigger \"%s\" for table \"%s\" does not exist",
1403 trigname, get_rel_name(relid))));
1404 oid = InvalidOid;
1405 }
1406 else
1407 {
1408 oid = ((Form_pg_trigger) GETSTRUCT(tup))->oid;
1409 }
1410
1411 systable_endscan(tgscan);
1413 return oid;
1414}
1415
1416/*
1417 * Perform permissions and integrity checks before acquiring a relation lock.
1418 */
1419static void
1421 void *arg)
1422{
1423 HeapTuple tuple;
1424 Form_pg_class form;
1425
1426 tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1427 if (!HeapTupleIsValid(tuple))
1428 return; /* concurrently dropped */
1429 form = (Form_pg_class) GETSTRUCT(tuple);
1430
1431 /* only tables and views can have triggers */
1432 if (form->relkind != RELKIND_RELATION && form->relkind != RELKIND_VIEW &&
1433 form->relkind != RELKIND_FOREIGN_TABLE &&
1434 form->relkind != RELKIND_PARTITIONED_TABLE)
1435 ereport(ERROR,
1436 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1437 errmsg("relation \"%s\" cannot have triggers",
1438 rv->relname),
1439 errdetail_relkind_not_supported(form->relkind)));
1440
1441 /* you must own the table to rename one of its triggers */
1442 if (!object_ownercheck(RelationRelationId, relid, GetUserId()))
1444 if (!allowSystemTableMods && IsSystemClass(relid, form))
1445 ereport(ERROR,
1446 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1447 errmsg("permission denied: \"%s\" is a system catalog",
1448 rv->relname)));
1449
1450 ReleaseSysCache(tuple);
1451}
1452
1453/*
1454 * renametrig - changes the name of a trigger on a relation
1455 *
1456 * trigger name is changed in trigger catalog.
1457 * No record of the previous name is kept.
1458 *
1459 * get proper relrelation from relation catalog (if not arg)
1460 * scan trigger catalog
1461 * for name conflict (within rel)
1462 * for original trigger (if not arg)
1463 * modify tgname in trigger tuple
1464 * update row in catalog
1465 */
1468{
1469 Oid tgoid;
1470 Relation targetrel;
1471 Relation tgrel;
1472 HeapTuple tuple;
1473 SysScanDesc tgscan;
1474 ScanKeyData key[2];
1475 Oid relid;
1476 ObjectAddress address;
1477
1478 /*
1479 * Look up name, check permissions, and acquire lock (which we will NOT
1480 * release until end of transaction).
1481 */
1483 0,
1485 NULL);
1486
1487 /* Have lock already, so just need to build relcache entry. */
1488 targetrel = relation_open(relid, NoLock);
1489
1490 /*
1491 * On partitioned tables, this operation recurses to partitions. Lock all
1492 * tables upfront.
1493 */
1494 if (targetrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1495 (void) find_all_inheritors(relid, AccessExclusiveLock, NULL);
1496
1497 tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1498
1499 /*
1500 * Search for the trigger to modify.
1501 */
1502 ScanKeyInit(&key[0],
1503 Anum_pg_trigger_tgrelid,
1504 BTEqualStrategyNumber, F_OIDEQ,
1505 ObjectIdGetDatum(relid));
1506 ScanKeyInit(&key[1],
1507 Anum_pg_trigger_tgname,
1508 BTEqualStrategyNumber, F_NAMEEQ,
1509 PointerGetDatum(stmt->subname));
1510 tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1511 NULL, 2, key);
1512 if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1513 {
1514 Form_pg_trigger trigform;
1515
1516 trigform = (Form_pg_trigger) GETSTRUCT(tuple);
1517 tgoid = trigform->oid;
1518
1519 /*
1520 * If the trigger descends from a trigger on a parent partitioned
1521 * table, reject the rename. We don't allow a trigger in a partition
1522 * to differ in name from that of its parent: that would lead to an
1523 * inconsistency that pg_dump would not reproduce.
1524 */
1525 if (OidIsValid(trigform->tgparentid))
1526 ereport(ERROR,
1527 errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1528 errmsg("cannot rename trigger \"%s\" on table \"%s\"",
1529 stmt->subname, RelationGetRelationName(targetrel)),
1530 errhint("Rename the trigger on the partitioned table \"%s\" instead.",
1531 get_rel_name(get_partition_parent(relid, false))));
1532
1533
1534 /* Rename the trigger on this relation ... */
1535 renametrig_internal(tgrel, targetrel, tuple, stmt->newname,
1536 stmt->subname);
1537
1538 /* ... and if it is partitioned, recurse to its partitions */
1539 if (targetrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1540 {
1541 PartitionDesc partdesc = RelationGetPartitionDesc(targetrel, true);
1542
1543 for (int i = 0; i < partdesc->nparts; i++)
1544 {
1545 Oid partitionId = partdesc->oids[i];
1546
1547 renametrig_partition(tgrel, partitionId, trigform->oid,
1548 stmt->newname, stmt->subname);
1549 }
1550 }
1551 }
1552 else
1553 {
1554 ereport(ERROR,
1555 (errcode(ERRCODE_UNDEFINED_OBJECT),
1556 errmsg("trigger \"%s\" for table \"%s\" does not exist",
1557 stmt->subname, RelationGetRelationName(targetrel))));
1558 }
1559
1560 ObjectAddressSet(address, TriggerRelationId, tgoid);
1561
1562 systable_endscan(tgscan);
1563
1565
1566 /*
1567 * Close rel, but keep exclusive lock!
1568 */
1569 relation_close(targetrel, NoLock);
1570
1571 return address;
1572}
1573
1574/*
1575 * Subroutine for renametrig -- perform the actual work of renaming one
1576 * trigger on one table.
1577 *
1578 * If the trigger has a name different from the expected one, raise a
1579 * NOTICE about it.
1580 */
1581static void
1583 const char *newname, const char *expected_name)
1584{
1585 HeapTuple tuple;
1586 Form_pg_trigger tgform;
1587 ScanKeyData key[2];
1588 SysScanDesc tgscan;
1589
1590 /* If the trigger already has the new name, nothing to do. */
1591 tgform = (Form_pg_trigger) GETSTRUCT(trigtup);
1592 if (strcmp(NameStr(tgform->tgname), newname) == 0)
1593 return;
1594
1595 /*
1596 * Before actually trying the rename, search for triggers with the same
1597 * name. The update would fail with an ugly message in that case, and it
1598 * is better to throw a nicer error.
1599 */
1600 ScanKeyInit(&key[0],
1601 Anum_pg_trigger_tgrelid,
1602 BTEqualStrategyNumber, F_OIDEQ,
1604 ScanKeyInit(&key[1],
1605 Anum_pg_trigger_tgname,
1606 BTEqualStrategyNumber, F_NAMEEQ,
1607 PointerGetDatum(newname));
1608 tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1609 NULL, 2, key);
1610 if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1611 ereport(ERROR,
1613 errmsg("trigger \"%s\" for relation \"%s\" already exists",
1614 newname, RelationGetRelationName(targetrel))));
1615 systable_endscan(tgscan);
1616
1617 /*
1618 * The target name is free; update the existing pg_trigger tuple with it.
1619 */
1620 tuple = heap_copytuple(trigtup); /* need a modifiable copy */
1621 tgform = (Form_pg_trigger) GETSTRUCT(tuple);
1622
1623 /*
1624 * If the trigger has a name different from what we expected, let the user
1625 * know. (We can proceed anyway, since we must have reached here following
1626 * a tgparentid link.)
1627 */
1628 if (strcmp(NameStr(tgform->tgname), expected_name) != 0)
1630 errmsg("renamed trigger \"%s\" on relation \"%s\"",
1631 NameStr(tgform->tgname),
1632 RelationGetRelationName(targetrel)));
1633
1634 namestrcpy(&tgform->tgname, newname);
1635
1636 CatalogTupleUpdate(tgrel, &tuple->t_self, tuple);
1637
1638 InvokeObjectPostAlterHook(TriggerRelationId, tgform->oid, 0);
1639
1640 /*
1641 * Invalidate relation's relcache entry so that other backends (and this
1642 * one too!) are sent SI message to make them rebuild relcache entries.
1643 * (Ideally this should happen automatically...)
1644 */
1645 CacheInvalidateRelcache(targetrel);
1646}
1647
1648/*
1649 * Subroutine for renametrig -- Helper for recursing to partitions when
1650 * renaming triggers on a partitioned table.
1651 */
1652static void
1653renametrig_partition(Relation tgrel, Oid partitionId, Oid parentTriggerOid,
1654 const char *newname, const char *expected_name)
1655{
1656 SysScanDesc tgscan;
1658 HeapTuple tuple;
1659
1660 /*
1661 * Given a relation and the OID of a trigger on parent relation, find the
1662 * corresponding trigger in the child and rename that trigger to the given
1663 * name.
1664 */
1666 Anum_pg_trigger_tgrelid,
1667 BTEqualStrategyNumber, F_OIDEQ,
1668 ObjectIdGetDatum(partitionId));
1669 tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1670 NULL, 1, &key);
1671 while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1672 {
1673 Form_pg_trigger tgform = (Form_pg_trigger) GETSTRUCT(tuple);
1674 Relation partitionRel;
1675
1676 if (tgform->tgparentid != parentTriggerOid)
1677 continue; /* not our trigger */
1678
1679 partitionRel = table_open(partitionId, NoLock);
1680
1681 /* Rename the trigger on this partition */
1682 renametrig_internal(tgrel, partitionRel, tuple, newname, expected_name);
1683
1684 /* And if this relation is partitioned, recurse to its partitions */
1685 if (partitionRel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1686 {
1687 PartitionDesc partdesc = RelationGetPartitionDesc(partitionRel,
1688 true);
1689
1690 for (int i = 0; i < partdesc->nparts; i++)
1691 {
1692 Oid partoid = partdesc->oids[i];
1693
1694 renametrig_partition(tgrel, partoid, tgform->oid, newname,
1695 NameStr(tgform->tgname));
1696 }
1697 }
1698 table_close(partitionRel, NoLock);
1699
1700 /* There should be at most one matching tuple */
1701 break;
1702 }
1703 systable_endscan(tgscan);
1704}
1705
1706/*
1707 * EnableDisableTrigger()
1708 *
1709 * Called by ALTER TABLE ENABLE/DISABLE [ REPLICA | ALWAYS ] TRIGGER
1710 * to change 'tgenabled' field for the specified trigger(s)
1711 *
1712 * rel: relation to process (caller must hold suitable lock on it)
1713 * tgname: name of trigger to process, or NULL to scan all triggers
1714 * tgparent: if not zero, process only triggers with this tgparentid
1715 * fires_when: new value for tgenabled field. In addition to generic
1716 * enablement/disablement, this also defines when the trigger
1717 * should be fired in session replication roles.
1718 * skip_system: if true, skip "system" triggers (constraint triggers)
1719 * recurse: if true, recurse to partitions
1720 *
1721 * Caller should have checked permissions for the table; here we also
1722 * enforce that superuser privilege is required to alter the state of
1723 * system triggers
1724 */
1725void
1726EnableDisableTrigger(Relation rel, const char *tgname, Oid tgparent,
1727 char fires_when, bool skip_system, bool recurse,
1728 LOCKMODE lockmode)
1729{
1730 Relation tgrel;
1731 int nkeys;
1732 ScanKeyData keys[2];
1733 SysScanDesc tgscan;
1734 HeapTuple tuple;
1735 bool found;
1736 bool changed;
1737
1738 /* Scan the relevant entries in pg_triggers */
1739 tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1740
1741 ScanKeyInit(&keys[0],
1742 Anum_pg_trigger_tgrelid,
1743 BTEqualStrategyNumber, F_OIDEQ,
1745 if (tgname)
1746 {
1747 ScanKeyInit(&keys[1],
1748 Anum_pg_trigger_tgname,
1749 BTEqualStrategyNumber, F_NAMEEQ,
1750 CStringGetDatum(tgname));
1751 nkeys = 2;
1752 }
1753 else
1754 nkeys = 1;
1755
1756 tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1757 NULL, nkeys, keys);
1758
1759 found = changed = false;
1760
1761 while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1762 {
1763 Form_pg_trigger oldtrig = (Form_pg_trigger) GETSTRUCT(tuple);
1764
1765 if (OidIsValid(tgparent) && tgparent != oldtrig->tgparentid)
1766 continue;
1767
1768 if (oldtrig->tgisinternal)
1769 {
1770 /* system trigger ... ok to process? */
1771 if (skip_system)
1772 continue;
1773 if (!superuser())
1774 ereport(ERROR,
1775 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1776 errmsg("permission denied: \"%s\" is a system trigger",
1777 NameStr(oldtrig->tgname))));
1778 }
1779
1780 found = true;
1781
1782 if (oldtrig->tgenabled != fires_when)
1783 {
1784 /* need to change this one ... make a copy to scribble on */
1785 HeapTuple newtup = heap_copytuple(tuple);
1786 Form_pg_trigger newtrig = (Form_pg_trigger) GETSTRUCT(newtup);
1787
1788 newtrig->tgenabled = fires_when;
1789
1790 CatalogTupleUpdate(tgrel, &newtup->t_self, newtup);
1791
1792 heap_freetuple(newtup);
1793
1794 changed = true;
1795 }
1796
1797 /*
1798 * When altering FOR EACH ROW triggers on a partitioned table, do the
1799 * same on the partitions as well, unless ONLY is specified.
1800 *
1801 * Note that we recurse even if we didn't change the trigger above,
1802 * because the partitions' copy of the trigger may have a different
1803 * value of tgenabled than the parent's trigger and thus might need to
1804 * be changed.
1805 */
1806 if (recurse &&
1807 rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
1808 (TRIGGER_FOR_ROW(oldtrig->tgtype)))
1809 {
1810 PartitionDesc partdesc = RelationGetPartitionDesc(rel, true);
1811 int i;
1812
1813 for (i = 0; i < partdesc->nparts; i++)
1814 {
1815 Relation part;
1816
1817 part = relation_open(partdesc->oids[i], lockmode);
1818 /* Match on child triggers' tgparentid, not their name */
1819 EnableDisableTrigger(part, NULL, oldtrig->oid,
1820 fires_when, skip_system, recurse,
1821 lockmode);
1822 table_close(part, NoLock); /* keep lock till commit */
1823 }
1824 }
1825
1826 InvokeObjectPostAlterHook(TriggerRelationId,
1827 oldtrig->oid, 0);
1828 }
1829
1830 systable_endscan(tgscan);
1831
1833
1834 if (tgname && !found)
1835 ereport(ERROR,
1836 (errcode(ERRCODE_UNDEFINED_OBJECT),
1837 errmsg("trigger \"%s\" for table \"%s\" does not exist",
1838 tgname, RelationGetRelationName(rel))));
1839
1840 /*
1841 * If we changed anything, broadcast a SI inval message to force each
1842 * backend (including our own!) to rebuild relation's relcache entry.
1843 * Otherwise they will fail to apply the change promptly.
1844 */
1845 if (changed)
1847}
1848
1849
1850/*
1851 * Build trigger data to attach to the given relcache entry.
1852 *
1853 * Note that trigger data attached to a relcache entry must be stored in
1854 * CacheMemoryContext to ensure it survives as long as the relcache entry.
1855 * But we should be running in a less long-lived working context. To avoid
1856 * leaking cache memory if this routine fails partway through, we build a
1857 * temporary TriggerDesc in working memory and then copy the completed
1858 * structure into cache memory.
1859 */
1860void
1862{
1863 TriggerDesc *trigdesc;
1864 int numtrigs;
1865 int maxtrigs;
1866 Trigger *triggers;
1867 Relation tgrel;
1868 ScanKeyData skey;
1869 SysScanDesc tgscan;
1870 HeapTuple htup;
1871 MemoryContext oldContext;
1872 int i;
1873
1874 /*
1875 * Allocate a working array to hold the triggers (the array is extended if
1876 * necessary)
1877 */
1878 maxtrigs = 16;
1879 triggers = (Trigger *) palloc(maxtrigs * sizeof(Trigger));
1880 numtrigs = 0;
1881
1882 /*
1883 * Note: since we scan the triggers using TriggerRelidNameIndexId, we will
1884 * be reading the triggers in name order, except possibly during
1885 * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn
1886 * ensures that triggers will be fired in name order.
1887 */
1888 ScanKeyInit(&skey,
1889 Anum_pg_trigger_tgrelid,
1890 BTEqualStrategyNumber, F_OIDEQ,
1892
1893 tgrel = table_open(TriggerRelationId, AccessShareLock);
1894 tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1895 NULL, 1, &skey);
1896
1897 while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
1898 {
1899 Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
1900 Trigger *build;
1901 Datum datum;
1902 bool isnull;
1903
1904 if (numtrigs >= maxtrigs)
1905 {
1906 maxtrigs *= 2;
1907 triggers = (Trigger *) repalloc(triggers, maxtrigs * sizeof(Trigger));
1908 }
1909 build = &(triggers[numtrigs]);
1910
1911 build->tgoid = pg_trigger->oid;
1913 NameGetDatum(&pg_trigger->tgname)));
1914 build->tgfoid = pg_trigger->tgfoid;
1915 build->tgtype = pg_trigger->tgtype;
1916 build->tgenabled = pg_trigger->tgenabled;
1917 build->tgisinternal = pg_trigger->tgisinternal;
1918 build->tgisclone = OidIsValid(pg_trigger->tgparentid);
1919 build->tgconstrrelid = pg_trigger->tgconstrrelid;
1920 build->tgconstrindid = pg_trigger->tgconstrindid;
1921 build->tgconstraint = pg_trigger->tgconstraint;
1922 build->tgdeferrable = pg_trigger->tgdeferrable;
1923 build->tginitdeferred = pg_trigger->tginitdeferred;
1924 build->tgnargs = pg_trigger->tgnargs;
1925 /* tgattr is first var-width field, so OK to access directly */
1926 build->tgnattr = pg_trigger->tgattr.dim1;
1927 if (build->tgnattr > 0)
1928 {
1929 build->tgattr = (int16 *) palloc(build->tgnattr * sizeof(int16));
1930 memcpy(build->tgattr, &(pg_trigger->tgattr.values),
1931 build->tgnattr * sizeof(int16));
1932 }
1933 else
1934 build->tgattr = NULL;
1935 if (build->tgnargs > 0)
1936 {
1937 bytea *val;
1938 char *p;
1939
1941 Anum_pg_trigger_tgargs,
1942 tgrel->rd_att, &isnull));
1943 if (isnull)
1944 elog(ERROR, "tgargs is null in trigger for relation \"%s\"",
1945 RelationGetRelationName(relation));
1946 p = (char *) VARDATA_ANY(val);
1947 build->tgargs = (char **) palloc(build->tgnargs * sizeof(char *));
1948 for (i = 0; i < build->tgnargs; i++)
1949 {
1950 build->tgargs[i] = pstrdup(p);
1951 p += strlen(p) + 1;
1952 }
1953 }
1954 else
1955 build->tgargs = NULL;
1956
1957 datum = fastgetattr(htup, Anum_pg_trigger_tgoldtable,
1958 tgrel->rd_att, &isnull);
1959 if (!isnull)
1960 build->tgoldtable =
1962 else
1963 build->tgoldtable = NULL;
1964
1965 datum = fastgetattr(htup, Anum_pg_trigger_tgnewtable,
1966 tgrel->rd_att, &isnull);
1967 if (!isnull)
1968 build->tgnewtable =
1970 else
1971 build->tgnewtable = NULL;
1972
1973 datum = fastgetattr(htup, Anum_pg_trigger_tgqual,
1974 tgrel->rd_att, &isnull);
1975 if (!isnull)
1976 build->tgqual = TextDatumGetCString(datum);
1977 else
1978 build->tgqual = NULL;
1979
1980 numtrigs++;
1981 }
1982
1983 systable_endscan(tgscan);
1985
1986 /* There might not be any triggers */
1987 if (numtrigs == 0)
1988 {
1989 pfree(triggers);
1990 return;
1991 }
1992
1993 /* Build trigdesc */
1994 trigdesc = (TriggerDesc *) palloc0(sizeof(TriggerDesc));
1995 trigdesc->triggers = triggers;
1996 trigdesc->numtriggers = numtrigs;
1997 for (i = 0; i < numtrigs; i++)
1998 SetTriggerFlags(trigdesc, &(triggers[i]));
1999
2000 /* Copy completed trigdesc into cache storage */
2002 relation->trigdesc = CopyTriggerDesc(trigdesc);
2003 MemoryContextSwitchTo(oldContext);
2004
2005 /* Release working memory */
2006 FreeTriggerDesc(trigdesc);
2007}
2008
2009/*
2010 * Update the TriggerDesc's hint flags to include the specified trigger
2011 */
2012static void
2014{
2015 int16 tgtype = trigger->tgtype;
2016
2017 trigdesc->trig_insert_before_row |=
2018 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2019 TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
2020 trigdesc->trig_insert_after_row |=
2021 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2022 TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
2023 trigdesc->trig_insert_instead_row |=
2024 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2025 TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_INSERT);
2026 trigdesc->trig_insert_before_statement |=
2027 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2028 TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
2029 trigdesc->trig_insert_after_statement |=
2030 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2031 TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
2032 trigdesc->trig_update_before_row |=
2033 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2034 TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
2035 trigdesc->trig_update_after_row |=
2036 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2037 TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
2038 trigdesc->trig_update_instead_row |=
2039 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2040 TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_UPDATE);
2041 trigdesc->trig_update_before_statement |=
2042 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2043 TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
2044 trigdesc->trig_update_after_statement |=
2045 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2046 TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
2047 trigdesc->trig_delete_before_row |=
2048 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2049 TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
2050 trigdesc->trig_delete_after_row |=
2051 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2052 TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
2053 trigdesc->trig_delete_instead_row |=
2054 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2055 TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_DELETE);
2056 trigdesc->trig_delete_before_statement |=
2057 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2058 TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
2059 trigdesc->trig_delete_after_statement |=
2060 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2061 TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
2062 /* there are no row-level truncate triggers */
2064 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2065 TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_TRUNCATE);
2067 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2068 TRIGGER_TYPE_AFTER, TRIGGER_TYPE_TRUNCATE);
2069
2070 trigdesc->trig_insert_new_table |=
2071 (TRIGGER_FOR_INSERT(tgtype) &&
2072 TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
2073 trigdesc->trig_update_old_table |=
2074 (TRIGGER_FOR_UPDATE(tgtype) &&
2075 TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
2076 trigdesc->trig_update_new_table |=
2077 (TRIGGER_FOR_UPDATE(tgtype) &&
2078 TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
2079 trigdesc->trig_delete_old_table |=
2080 (TRIGGER_FOR_DELETE(tgtype) &&
2081 TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
2082}
2083
2084/*
2085 * Copy a TriggerDesc data structure.
2086 *
2087 * The copy is allocated in the current memory context.
2088 */
2091{
2092 TriggerDesc *newdesc;
2093 Trigger *trigger;
2094 int i;
2095
2096 if (trigdesc == NULL || trigdesc->numtriggers <= 0)
2097 return NULL;
2098
2099 newdesc = (TriggerDesc *) palloc(sizeof(TriggerDesc));
2100 memcpy(newdesc, trigdesc, sizeof(TriggerDesc));
2101
2102 trigger = (Trigger *) palloc(trigdesc->numtriggers * sizeof(Trigger));
2103 memcpy(trigger, trigdesc->triggers,
2104 trigdesc->numtriggers * sizeof(Trigger));
2105 newdesc->triggers = trigger;
2106
2107 for (i = 0; i < trigdesc->numtriggers; i++)
2108 {
2109 trigger->tgname = pstrdup(trigger->tgname);
2110 if (trigger->tgnattr > 0)
2111 {
2112 int16 *newattr;
2113
2114 newattr = (int16 *) palloc(trigger->tgnattr * sizeof(int16));
2115 memcpy(newattr, trigger->tgattr,
2116 trigger->tgnattr * sizeof(int16));
2117 trigger->tgattr = newattr;
2118 }
2119 if (trigger->tgnargs > 0)
2120 {
2121 char **newargs;
2122 int16 j;
2123
2124 newargs = (char **) palloc(trigger->tgnargs * sizeof(char *));
2125 for (j = 0; j < trigger->tgnargs; j++)
2126 newargs[j] = pstrdup(trigger->tgargs[j]);
2127 trigger->tgargs = newargs;
2128 }
2129 if (trigger->tgqual)
2130 trigger->tgqual = pstrdup(trigger->tgqual);
2131 if (trigger->tgoldtable)
2132 trigger->tgoldtable = pstrdup(trigger->tgoldtable);
2133 if (trigger->tgnewtable)
2134 trigger->tgnewtable = pstrdup(trigger->tgnewtable);
2135 trigger++;
2136 }
2137
2138 return newdesc;
2139}
2140
2141/*
2142 * Free a TriggerDesc data structure.
2143 */
2144void
2146{
2147 Trigger *trigger;
2148 int i;
2149
2150 if (trigdesc == NULL)
2151 return;
2152
2153 trigger = trigdesc->triggers;
2154 for (i = 0; i < trigdesc->numtriggers; i++)
2155 {
2156 pfree(trigger->tgname);
2157 if (trigger->tgnattr > 0)
2158 pfree(trigger->tgattr);
2159 if (trigger->tgnargs > 0)
2160 {
2161 while (--(trigger->tgnargs) >= 0)
2162 pfree(trigger->tgargs[trigger->tgnargs]);
2163 pfree(trigger->tgargs);
2164 }
2165 if (trigger->tgqual)
2166 pfree(trigger->tgqual);
2167 if (trigger->tgoldtable)
2168 pfree(trigger->tgoldtable);
2169 if (trigger->tgnewtable)
2170 pfree(trigger->tgnewtable);
2171 trigger++;
2172 }
2173 pfree(trigdesc->triggers);
2174 pfree(trigdesc);
2175}
2176
2177/*
2178 * Compare two TriggerDesc structures for logical equality.
2179 */
2180#ifdef NOT_USED
2181bool
2182equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
2183{
2184 int i,
2185 j;
2186
2187 /*
2188 * We need not examine the hint flags, just the trigger array itself; if
2189 * we have the same triggers with the same types, the flags should match.
2190 *
2191 * As of 7.3 we assume trigger set ordering is significant in the
2192 * comparison; so we just compare corresponding slots of the two sets.
2193 *
2194 * Note: comparing the stringToNode forms of the WHEN clauses means that
2195 * parse column locations will affect the result. This is okay as long as
2196 * this function is only used for detecting exact equality, as for example
2197 * in checking for staleness of a cache entry.
2198 */
2199 if (trigdesc1 != NULL)
2200 {
2201 if (trigdesc2 == NULL)
2202 return false;
2203 if (trigdesc1->numtriggers != trigdesc2->numtriggers)
2204 return false;
2205 for (i = 0; i < trigdesc1->numtriggers; i++)
2206 {
2207 Trigger *trig1 = trigdesc1->triggers + i;
2208 Trigger *trig2 = trigdesc2->triggers + i;
2209
2210 if (trig1->tgoid != trig2->tgoid)
2211 return false;
2212 if (strcmp(trig1->tgname, trig2->tgname) != 0)
2213 return false;
2214 if (trig1->tgfoid != trig2->tgfoid)
2215 return false;
2216 if (trig1->tgtype != trig2->tgtype)
2217 return false;
2218 if (trig1->tgenabled != trig2->tgenabled)
2219 return false;
2220 if (trig1->tgisinternal != trig2->tgisinternal)
2221 return false;
2222 if (trig1->tgisclone != trig2->tgisclone)
2223 return false;
2224 if (trig1->tgconstrrelid != trig2->tgconstrrelid)
2225 return false;
2226 if (trig1->tgconstrindid != trig2->tgconstrindid)
2227 return false;
2228 if (trig1->tgconstraint != trig2->tgconstraint)
2229 return false;
2230 if (trig1->tgdeferrable != trig2->tgdeferrable)
2231 return false;
2232 if (trig1->tginitdeferred != trig2->tginitdeferred)
2233 return false;
2234 if (trig1->tgnargs != trig2->tgnargs)
2235 return false;
2236 if (trig1->tgnattr != trig2->tgnattr)
2237 return false;
2238 if (trig1->tgnattr > 0 &&
2239 memcmp(trig1->tgattr, trig2->tgattr,
2240 trig1->tgnattr * sizeof(int16)) != 0)
2241 return false;
2242 for (j = 0; j < trig1->tgnargs; j++)
2243 if (strcmp(trig1->tgargs[j], trig2->tgargs[j]) != 0)
2244 return false;
2245 if (trig1->tgqual == NULL && trig2->tgqual == NULL)
2246 /* ok */ ;
2247 else if (trig1->tgqual == NULL || trig2->tgqual == NULL)
2248 return false;
2249 else if (strcmp(trig1->tgqual, trig2->tgqual) != 0)
2250 return false;
2251 if (trig1->tgoldtable == NULL && trig2->tgoldtable == NULL)
2252 /* ok */ ;
2253 else if (trig1->tgoldtable == NULL || trig2->tgoldtable == NULL)
2254 return false;
2255 else if (strcmp(trig1->tgoldtable, trig2->tgoldtable) != 0)
2256 return false;
2257 if (trig1->tgnewtable == NULL && trig2->tgnewtable == NULL)
2258 /* ok */ ;
2259 else if (trig1->tgnewtable == NULL || trig2->tgnewtable == NULL)
2260 return false;
2261 else if (strcmp(trig1->tgnewtable, trig2->tgnewtable) != 0)
2262 return false;
2263 }
2264 }
2265 else if (trigdesc2 != NULL)
2266 return false;
2267 return true;
2268}
2269#endif /* NOT_USED */
2270
2271/*
2272 * Check if there is a row-level trigger with transition tables that prevents
2273 * a table from becoming an inheritance child or partition. Return the name
2274 * of the first such incompatible trigger, or NULL if there is none.
2275 */
2276const char *
2278{
2279 if (trigdesc != NULL)
2280 {
2281 int i;
2282
2283 for (i = 0; i < trigdesc->numtriggers; ++i)
2284 {
2285 Trigger *trigger = &trigdesc->triggers[i];
2286
2287 if (trigger->tgoldtable != NULL || trigger->tgnewtable != NULL)
2288 return trigger->tgname;
2289 }
2290 }
2291
2292 return NULL;
2293}
2294
2295/*
2296 * Call a trigger function.
2297 *
2298 * trigdata: trigger descriptor.
2299 * tgindx: trigger's index in finfo and instr arrays.
2300 * finfo: array of cached trigger function call information.
2301 * instr: optional array of EXPLAIN ANALYZE instrumentation state.
2302 * per_tuple_context: memory context to execute the function in.
2303 *
2304 * Returns the tuple (or NULL) as returned by the function.
2305 */
2306static HeapTuple
2308 int tgindx,
2309 FmgrInfo *finfo,
2310 Instrumentation *instr,
2311 MemoryContext per_tuple_context)
2312{
2313 LOCAL_FCINFO(fcinfo, 0);
2315 Datum result;
2316 MemoryContext oldContext;
2317
2318 /*
2319 * Protect against code paths that may fail to initialize transition table
2320 * info.
2321 */
2323 TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event) ||
2324 TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) &&
2325 TRIGGER_FIRED_AFTER(trigdata->tg_event) &&
2326 !(trigdata->tg_event & AFTER_TRIGGER_DEFERRABLE) &&
2327 !(trigdata->tg_event & AFTER_TRIGGER_INITDEFERRED)) ||
2328 (trigdata->tg_oldtable == NULL && trigdata->tg_newtable == NULL));
2329
2330 finfo += tgindx;
2331
2332 /*
2333 * We cache fmgr lookup info, to avoid making the lookup again on each
2334 * call.
2335 */
2336 if (finfo->fn_oid == InvalidOid)
2337 fmgr_info(trigdata->tg_trigger->tgfoid, finfo);
2338
2339 Assert(finfo->fn_oid == trigdata->tg_trigger->tgfoid);
2340
2341 /*
2342 * If doing EXPLAIN ANALYZE, start charging time to this trigger.
2343 */
2344 if (instr)
2345 InstrStartNode(instr + tgindx);
2346
2347 /*
2348 * Do the function evaluation in the per-tuple memory context, so that
2349 * leaked memory will be reclaimed once per tuple. Note in particular that
2350 * any new tuple created by the trigger function will live till the end of
2351 * the tuple cycle.
2352 */
2353 oldContext = MemoryContextSwitchTo(per_tuple_context);
2354
2355 /*
2356 * Call the function, passing no arguments but setting a context.
2357 */
2358 InitFunctionCallInfoData(*fcinfo, finfo, 0,
2359 InvalidOid, (Node *) trigdata, NULL);
2360
2361 pgstat_init_function_usage(fcinfo, &fcusage);
2362
2364 PG_TRY();
2365 {
2366 result = FunctionCallInvoke(fcinfo);
2367 }
2368 PG_FINALLY();
2369 {
2371 }
2372 PG_END_TRY();
2373
2374 pgstat_end_function_usage(&fcusage, true);
2375
2376 MemoryContextSwitchTo(oldContext);
2377
2378 /*
2379 * Trigger protocol allows function to return a null pointer, but NOT to
2380 * set the isnull result flag.
2381 */
2382 if (fcinfo->isnull)
2383 ereport(ERROR,
2384 (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2385 errmsg("trigger function %u returned null value",
2386 fcinfo->flinfo->fn_oid)));
2387
2388 /*
2389 * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
2390 * one "tuple returned" (really the number of firings).
2391 */
2392 if (instr)
2393 InstrStopNode(instr + tgindx, 1);
2394
2395 return (HeapTuple) DatumGetPointer(result);
2396}
2397
2398void
2400{
2401 TriggerDesc *trigdesc;
2402 int i;
2403 TriggerData LocTriggerData = {0};
2404
2405 trigdesc = relinfo->ri_TrigDesc;
2406
2407 if (trigdesc == NULL)
2408 return;
2409 if (!trigdesc->trig_insert_before_statement)
2410 return;
2411
2412 /* no-op if we already fired BS triggers in this context */
2414 CMD_INSERT))
2415 return;
2416
2417 LocTriggerData.type = T_TriggerData;
2418 LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2420 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2421 for (i = 0; i < trigdesc->numtriggers; i++)
2422 {
2423 Trigger *trigger = &trigdesc->triggers[i];
2424 HeapTuple newtuple;
2425
2426 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2427 TRIGGER_TYPE_STATEMENT,
2428 TRIGGER_TYPE_BEFORE,
2429 TRIGGER_TYPE_INSERT))
2430 continue;
2431 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2432 NULL, NULL, NULL))
2433 continue;
2434
2435 LocTriggerData.tg_trigger = trigger;
2436 newtuple = ExecCallTriggerFunc(&LocTriggerData,
2437 i,
2438 relinfo->ri_TrigFunctions,
2439 relinfo->ri_TrigInstrument,
2440 GetPerTupleMemoryContext(estate));
2441
2442 if (newtuple)
2443 ereport(ERROR,
2444 (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2445 errmsg("BEFORE STATEMENT trigger cannot return a value")));
2446 }
2447}
2448
2449void
2451 TransitionCaptureState *transition_capture)
2452{
2453 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2454
2455 if (trigdesc && trigdesc->trig_insert_after_statement)
2456 AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2458 false, NULL, NULL, NIL, NULL, transition_capture,
2459 false);
2460}
2461
2462bool
2464 TupleTableSlot *slot)
2465{
2466 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2467 HeapTuple newtuple = NULL;
2468 bool should_free;
2469 TriggerData LocTriggerData = {0};
2470 int i;
2471
2472 LocTriggerData.type = T_TriggerData;
2473 LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2476 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2477 for (i = 0; i < trigdesc->numtriggers; i++)
2478 {
2479 Trigger *trigger = &trigdesc->triggers[i];
2480 HeapTuple oldtuple;
2481
2482 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2483 TRIGGER_TYPE_ROW,
2484 TRIGGER_TYPE_BEFORE,
2485 TRIGGER_TYPE_INSERT))
2486 continue;
2487 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2488 NULL, NULL, slot))
2489 continue;
2490
2491 if (!newtuple)
2492 newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2493
2494 LocTriggerData.tg_trigslot = slot;
2495 LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2496 LocTriggerData.tg_trigger = trigger;
2497 newtuple = ExecCallTriggerFunc(&LocTriggerData,
2498 i,
2499 relinfo->ri_TrigFunctions,
2500 relinfo->ri_TrigInstrument,
2501 GetPerTupleMemoryContext(estate));
2502 if (newtuple == NULL)
2503 {
2504 if (should_free)
2505 heap_freetuple(oldtuple);
2506 return false; /* "do nothing" */
2507 }
2508 else if (newtuple != oldtuple)
2509 {
2511
2512 ExecForceStoreHeapTuple(newtuple, slot, false);
2513
2514 /*
2515 * After a tuple in a partition goes through a trigger, the user
2516 * could have changed the partition key enough that the tuple no
2517 * longer fits the partition. Verify that.
2518 */
2519 if (trigger->tgisclone &&
2520 !ExecPartitionCheck(relinfo, slot, estate, false))
2521 ereport(ERROR,
2522 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2523 errmsg("moving row to another partition during a BEFORE FOR EACH ROW trigger is not supported"),
2524 errdetail("Before executing trigger \"%s\", the row was to be in partition \"%s.%s\".",
2525 trigger->tgname,
2528
2529 if (should_free)
2530 heap_freetuple(oldtuple);
2531
2532 /* signal tuple should be re-fetched if used */
2533 newtuple = NULL;
2534 }
2535 }
2536
2537 return true;
2538}
2539
2540void
2542 TupleTableSlot *slot, List *recheckIndexes,
2543 TransitionCaptureState *transition_capture)
2544{
2545 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2546
2547 if ((trigdesc && trigdesc->trig_insert_after_row) ||
2548 (transition_capture && transition_capture->tcs_insert_new_table))
2549 AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2551 true, NULL, slot,
2552 recheckIndexes, NULL,
2553 transition_capture,
2554 false);
2555}
2556
2557bool
2559 TupleTableSlot *slot)
2560{
2561 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2562 HeapTuple newtuple = NULL;
2563 bool should_free;
2564 TriggerData LocTriggerData = {0};
2565 int i;
2566
2567 LocTriggerData.type = T_TriggerData;
2568 LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2571 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2572 for (i = 0; i < trigdesc->numtriggers; i++)
2573 {
2574 Trigger *trigger = &trigdesc->triggers[i];
2575 HeapTuple oldtuple;
2576
2577 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2578 TRIGGER_TYPE_ROW,
2579 TRIGGER_TYPE_INSTEAD,
2580 TRIGGER_TYPE_INSERT))
2581 continue;
2582 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2583 NULL, NULL, slot))
2584 continue;
2585
2586 if (!newtuple)
2587 newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2588
2589 LocTriggerData.tg_trigslot = slot;
2590 LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2591 LocTriggerData.tg_trigger = trigger;
2592 newtuple = ExecCallTriggerFunc(&LocTriggerData,
2593 i,
2594 relinfo->ri_TrigFunctions,
2595 relinfo->ri_TrigInstrument,
2596 GetPerTupleMemoryContext(estate));
2597 if (newtuple == NULL)
2598 {
2599 if (should_free)
2600 heap_freetuple(oldtuple);
2601 return false; /* "do nothing" */
2602 }
2603 else if (newtuple != oldtuple)
2604 {
2605 ExecForceStoreHeapTuple(newtuple, slot, false);
2606
2607 if (should_free)
2608 heap_freetuple(oldtuple);
2609
2610 /* signal tuple should be re-fetched if used */
2611 newtuple = NULL;
2612 }
2613 }
2614
2615 return true;
2616}
2617
2618void
2620{
2621 TriggerDesc *trigdesc;
2622 int i;
2623 TriggerData LocTriggerData = {0};
2624
2625 trigdesc = relinfo->ri_TrigDesc;
2626
2627 if (trigdesc == NULL)
2628 return;
2629 if (!trigdesc->trig_delete_before_statement)
2630 return;
2631
2632 /* no-op if we already fired BS triggers in this context */
2634 CMD_DELETE))
2635 return;
2636
2637 LocTriggerData.type = T_TriggerData;
2638 LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2640 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2641 for (i = 0; i < trigdesc->numtriggers; i++)
2642 {
2643 Trigger *trigger = &trigdesc->triggers[i];
2644 HeapTuple newtuple;
2645
2646 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2647 TRIGGER_TYPE_STATEMENT,
2648 TRIGGER_TYPE_BEFORE,
2649 TRIGGER_TYPE_DELETE))
2650 continue;
2651 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2652 NULL, NULL, NULL))
2653 continue;
2654
2655 LocTriggerData.tg_trigger = trigger;
2656 newtuple = ExecCallTriggerFunc(&LocTriggerData,
2657 i,
2658 relinfo->ri_TrigFunctions,
2659 relinfo->ri_TrigInstrument,
2660 GetPerTupleMemoryContext(estate));
2661
2662 if (newtuple)
2663 ereport(ERROR,
2664 (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2665 errmsg("BEFORE STATEMENT trigger cannot return a value")));
2666 }
2667}
2668
2669void
2671 TransitionCaptureState *transition_capture)
2672{
2673 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2674
2675 if (trigdesc && trigdesc->trig_delete_after_statement)
2676 AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2678 false, NULL, NULL, NIL, NULL, transition_capture,
2679 false);
2680}
2681
2682/*
2683 * Execute BEFORE ROW DELETE triggers.
2684 *
2685 * True indicates caller can proceed with the delete. False indicates caller
2686 * need to suppress the delete and additionally if requested, we need to pass
2687 * back the concurrently updated tuple if any.
2688 */
2689bool
2691 ResultRelInfo *relinfo,
2692 ItemPointer tupleid,
2693 HeapTuple fdw_trigtuple,
2694 TupleTableSlot **epqslot,
2695 TM_Result *tmresult,
2696 TM_FailureData *tmfd)
2697{
2698 TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2699 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2700 bool result = true;
2701 TriggerData LocTriggerData = {0};
2702 HeapTuple trigtuple;
2703 bool should_free = false;
2704 int i;
2705
2706 Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2707 if (fdw_trigtuple == NULL)
2708 {
2709 TupleTableSlot *epqslot_candidate = NULL;
2710
2711 if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2712 LockTupleExclusive, slot, &epqslot_candidate,
2713 tmresult, tmfd))
2714 return false;
2715
2716 /*
2717 * If the tuple was concurrently updated and the caller of this
2718 * function requested for the updated tuple, skip the trigger
2719 * execution.
2720 */
2721 if (epqslot_candidate != NULL && epqslot != NULL)
2722 {
2723 *epqslot = epqslot_candidate;
2724 return false;
2725 }
2726
2727 trigtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2728 }
2729 else
2730 {
2731 trigtuple = fdw_trigtuple;
2732 ExecForceStoreHeapTuple(trigtuple, slot, false);
2733 }
2734
2735 LocTriggerData.type = T_TriggerData;
2736 LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2739 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2740 for (i = 0; i < trigdesc->numtriggers; i++)
2741 {
2742 HeapTuple newtuple;
2743 Trigger *trigger = &trigdesc->triggers[i];
2744
2745 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2746 TRIGGER_TYPE_ROW,
2747 TRIGGER_TYPE_BEFORE,
2748 TRIGGER_TYPE_DELETE))
2749 continue;
2750 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2751 NULL, slot, NULL))
2752 continue;
2753
2754 LocTriggerData.tg_trigslot = slot;
2755 LocTriggerData.tg_trigtuple = trigtuple;
2756 LocTriggerData.tg_trigger = trigger;
2757 newtuple = ExecCallTriggerFunc(&LocTriggerData,
2758 i,
2759 relinfo->ri_TrigFunctions,
2760 relinfo->ri_TrigInstrument,
2761 GetPerTupleMemoryContext(estate));
2762 if (newtuple == NULL)
2763 {
2764 result = false; /* tell caller to suppress delete */
2765 break;
2766 }
2767 if (newtuple != trigtuple)
2768 heap_freetuple(newtuple);
2769 }
2770 if (should_free)
2771 heap_freetuple(trigtuple);
2772
2773 return result;
2774}
2775
2776/*
2777 * Note: is_crosspart_update must be true if the DELETE is being performed
2778 * as part of a cross-partition update.
2779 */
2780void
2782 ResultRelInfo *relinfo,
2783 ItemPointer tupleid,
2784 HeapTuple fdw_trigtuple,
2785 TransitionCaptureState *transition_capture,
2786 bool is_crosspart_update)
2787{
2788 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2789
2790 if ((trigdesc && trigdesc->trig_delete_after_row) ||
2791 (transition_capture && transition_capture->tcs_delete_old_table))
2792 {
2793 TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2794
2795 Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2796 if (fdw_trigtuple == NULL)
2797 GetTupleForTrigger(estate,
2798 NULL,
2799 relinfo,
2800 tupleid,
2802 slot,
2803 NULL,
2804 NULL,
2805 NULL);
2806 else
2807 ExecForceStoreHeapTuple(fdw_trigtuple, slot, false);
2808
2809 AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2811 true, slot, NULL, NIL, NULL,
2812 transition_capture,
2813 is_crosspart_update);
2814 }
2815}
2816
2817bool
2819 HeapTuple trigtuple)
2820{
2821 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2822 TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2823 TriggerData LocTriggerData = {0};
2824 int i;
2825
2826 LocTriggerData.type = T_TriggerData;
2827 LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2830 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2831
2832 ExecForceStoreHeapTuple(trigtuple, slot, false);
2833
2834 for (i = 0; i < trigdesc->numtriggers; i++)
2835 {
2836 HeapTuple rettuple;
2837 Trigger *trigger = &trigdesc->triggers[i];
2838
2839 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2840 TRIGGER_TYPE_ROW,
2841 TRIGGER_TYPE_INSTEAD,
2842 TRIGGER_TYPE_DELETE))
2843 continue;
2844 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2845 NULL, slot, NULL))
2846 continue;
2847
2848 LocTriggerData.tg_trigslot = slot;
2849 LocTriggerData.tg_trigtuple = trigtuple;
2850 LocTriggerData.tg_trigger = trigger;
2851 rettuple = ExecCallTriggerFunc(&LocTriggerData,
2852 i,
2853 relinfo->ri_TrigFunctions,
2854 relinfo->ri_TrigInstrument,
2855 GetPerTupleMemoryContext(estate));
2856 if (rettuple == NULL)
2857 return false; /* Delete was suppressed */
2858 if (rettuple != trigtuple)
2859 heap_freetuple(rettuple);
2860 }
2861 return true;
2862}
2863
2864void
2866{
2867 TriggerDesc *trigdesc;
2868 int i;
2869 TriggerData LocTriggerData = {0};
2870 Bitmapset *updatedCols;
2871
2872 trigdesc = relinfo->ri_TrigDesc;
2873
2874 if (trigdesc == NULL)
2875 return;
2876 if (!trigdesc->trig_update_before_statement)
2877 return;
2878
2879 /* no-op if we already fired BS triggers in this context */
2881 CMD_UPDATE))
2882 return;
2883
2884 /* statement-level triggers operate on the parent table */
2885 Assert(relinfo->ri_RootResultRelInfo == NULL);
2886
2887 updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
2888
2889 LocTriggerData.type = T_TriggerData;
2890 LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2892 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2893 LocTriggerData.tg_updatedcols = updatedCols;
2894 for (i = 0; i < trigdesc->numtriggers; i++)
2895 {
2896 Trigger *trigger = &trigdesc->triggers[i];
2897 HeapTuple newtuple;
2898
2899 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2900 TRIGGER_TYPE_STATEMENT,
2901 TRIGGER_TYPE_BEFORE,
2902 TRIGGER_TYPE_UPDATE))
2903 continue;
2904 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2905 updatedCols, NULL, NULL))
2906 continue;
2907
2908 LocTriggerData.tg_trigger = trigger;
2909 newtuple = ExecCallTriggerFunc(&LocTriggerData,
2910 i,
2911 relinfo->ri_TrigFunctions,
2912 relinfo->ri_TrigInstrument,
2913 GetPerTupleMemoryContext(estate));
2914
2915 if (newtuple)
2916 ereport(ERROR,
2917 (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2918 errmsg("BEFORE STATEMENT trigger cannot return a value")));
2919 }
2920}
2921
2922void
2924 TransitionCaptureState *transition_capture)
2925{
2926 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2927
2928 /* statement-level triggers operate on the parent table */
2929 Assert(relinfo->ri_RootResultRelInfo == NULL);
2930
2931 if (trigdesc && trigdesc->trig_update_after_statement)
2932 AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2934 false, NULL, NULL, NIL,
2935 ExecGetAllUpdatedCols(relinfo, estate),
2936 transition_capture,
2937 false);
2938}
2939
2940bool
2942 ResultRelInfo *relinfo,
2943 ItemPointer tupleid,
2944 HeapTuple fdw_trigtuple,
2945 TupleTableSlot *newslot,
2946 TM_Result *tmresult,
2947 TM_FailureData *tmfd)
2948{
2949 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2950 TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
2951 HeapTuple newtuple = NULL;
2952 HeapTuple trigtuple;
2953 bool should_free_trig = false;
2954 bool should_free_new = false;
2955 TriggerData LocTriggerData = {0};
2956 int i;
2957 Bitmapset *updatedCols;
2958 LockTupleMode lockmode;
2959
2960 /* Determine lock mode to use */
2961 lockmode = ExecUpdateLockMode(estate, relinfo);
2962
2963 Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2964 if (fdw_trigtuple == NULL)
2965 {
2966 TupleTableSlot *epqslot_candidate = NULL;
2967
2968 /* get a copy of the on-disk tuple we are planning to update */
2969 if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2970 lockmode, oldslot, &epqslot_candidate,
2971 tmresult, tmfd))
2972 return false; /* cancel the update action */
2973
2974 /*
2975 * In READ COMMITTED isolation level it's possible that target tuple
2976 * was changed due to concurrent update. In that case we have a raw
2977 * subplan output tuple in epqslot_candidate, and need to form a new
2978 * insertable tuple using ExecGetUpdateNewTuple to replace the one we
2979 * received in newslot. Neither we nor our callers have any further
2980 * interest in the passed-in tuple, so it's okay to overwrite newslot
2981 * with the newer data.
2982 */
2983 if (epqslot_candidate != NULL)
2984 {
2985 TupleTableSlot *epqslot_clean;
2986
2987 epqslot_clean = ExecGetUpdateNewTuple(relinfo, epqslot_candidate,
2988 oldslot);
2989
2990 /*
2991 * Typically, the caller's newslot was also generated by
2992 * ExecGetUpdateNewTuple, so that epqslot_clean will be the same
2993 * slot and copying is not needed. But do the right thing if it
2994 * isn't.
2995 */
2996 if (unlikely(newslot != epqslot_clean))
2997 ExecCopySlot(newslot, epqslot_clean);
2998
2999 /*
3000 * At this point newslot contains a virtual tuple that may
3001 * reference some fields of oldslot's tuple in some disk buffer.
3002 * If that tuple is in a different page than the original target
3003 * tuple, then our only pin on that buffer is oldslot's, and we're
3004 * about to release it. Hence we'd better materialize newslot to
3005 * ensure it doesn't contain references into an unpinned buffer.
3006 * (We'd materialize it below anyway, but too late for safety.)
3007 */
3008 ExecMaterializeSlot(newslot);
3009 }
3010
3011 /*
3012 * Here we convert oldslot to a materialized slot holding trigtuple.
3013 * Neither slot passed to the triggers will hold any buffer pin.
3014 */
3015 trigtuple = ExecFetchSlotHeapTuple(oldslot, true, &should_free_trig);
3016 }
3017 else
3018 {
3019 /* Put the FDW-supplied tuple into oldslot to unify the cases */
3020 ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
3021 trigtuple = fdw_trigtuple;
3022 }
3023
3024 LocTriggerData.type = T_TriggerData;
3025 LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
3028 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3029 updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
3030 LocTriggerData.tg_updatedcols = updatedCols;
3031 for (i = 0; i < trigdesc->numtriggers; i++)
3032 {
3033 Trigger *trigger = &trigdesc->triggers[i];
3034 HeapTuple oldtuple;
3035
3036 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3037 TRIGGER_TYPE_ROW,
3038 TRIGGER_TYPE_BEFORE,
3039 TRIGGER_TYPE_UPDATE))
3040 continue;
3041 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3042 updatedCols, oldslot, newslot))
3043 continue;
3044
3045 if (!newtuple)
3046 newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free_new);
3047
3048 LocTriggerData.tg_trigslot = oldslot;
3049 LocTriggerData.tg_trigtuple = trigtuple;
3050 LocTriggerData.tg_newtuple = oldtuple = newtuple;
3051 LocTriggerData.tg_newslot = newslot;
3052 LocTriggerData.tg_trigger = trigger;
3053 newtuple = ExecCallTriggerFunc(&LocTriggerData,
3054 i,
3055 relinfo->ri_TrigFunctions,
3056 relinfo->ri_TrigInstrument,
3057 GetPerTupleMemoryContext(estate));
3058
3059 if (newtuple == NULL)
3060 {
3061 if (should_free_trig)
3062 heap_freetuple(trigtuple);
3063 if (should_free_new)
3064 heap_freetuple(oldtuple);
3065 return false; /* "do nothing" */
3066 }
3067 else if (newtuple != oldtuple)
3068 {
3070
3071 ExecForceStoreHeapTuple(newtuple, newslot, false);
3072
3073 /*
3074 * If the tuple returned by the trigger / being stored, is the old
3075 * row version, and the heap tuple passed to the trigger was
3076 * allocated locally, materialize the slot. Otherwise we might
3077 * free it while still referenced by the slot.
3078 */
3079 if (should_free_trig && newtuple == trigtuple)
3080 ExecMaterializeSlot(newslot);
3081
3082 if (should_free_new)
3083 heap_freetuple(oldtuple);
3084
3085 /* signal tuple should be re-fetched if used */
3086 newtuple = NULL;
3087 }
3088 }
3089 if (should_free_trig)
3090 heap_freetuple(trigtuple);
3091
3092 return true;
3093}
3094
3095/*
3096 * Note: 'src_partinfo' and 'dst_partinfo', when non-NULL, refer to the source
3097 * and destination partitions, respectively, of a cross-partition update of
3098 * the root partitioned table mentioned in the query, given by 'relinfo'.
3099 * 'tupleid' in that case refers to the ctid of the "old" tuple in the source
3100 * partition, and 'newslot' contains the "new" tuple in the destination
3101 * partition. This interface allows to support the requirements of
3102 * ExecCrossPartitionUpdateForeignKey(); is_crosspart_update must be true in
3103 * that case.
3104 */
3105void
3107 ResultRelInfo *src_partinfo,
3108 ResultRelInfo *dst_partinfo,
3109 ItemPointer tupleid,
3110 HeapTuple fdw_trigtuple,
3111 TupleTableSlot *newslot,
3112 List *recheckIndexes,
3113 TransitionCaptureState *transition_capture,
3114 bool is_crosspart_update)
3115{
3116 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3117
3118 if ((trigdesc && trigdesc->trig_update_after_row) ||
3119 (transition_capture &&
3120 (transition_capture->tcs_update_old_table ||
3121 transition_capture->tcs_update_new_table)))
3122 {
3123 /*
3124 * Note: if the UPDATE is converted into a DELETE+INSERT as part of
3125 * update-partition-key operation, then this function is also called
3126 * separately for DELETE and INSERT to capture transition table rows.
3127 * In such case, either old tuple or new tuple can be NULL.
3128 */
3129 TupleTableSlot *oldslot;
3130 ResultRelInfo *tupsrc;
3131
3132 Assert((src_partinfo != NULL && dst_partinfo != NULL) ||
3133 !is_crosspart_update);
3134
3135 tupsrc = src_partinfo ? src_partinfo : relinfo;
3136 oldslot = ExecGetTriggerOldSlot(estate, tupsrc);
3137
3138 if (fdw_trigtuple == NULL && ItemPointerIsValid(tupleid))
3139 GetTupleForTrigger(estate,
3140 NULL,
3141 tupsrc,
3142 tupleid,
3144 oldslot,
3145 NULL,
3146 NULL,
3147 NULL);
3148 else if (fdw_trigtuple != NULL)
3149 ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
3150 else
3151 ExecClearTuple(oldslot);
3152
3153 AfterTriggerSaveEvent(estate, relinfo,
3154 src_partinfo, dst_partinfo,
3156 true,
3157 oldslot, newslot, recheckIndexes,
3158 ExecGetAllUpdatedCols(relinfo, estate),
3159 transition_capture,
3160 is_crosspart_update);
3161 }
3162}
3163
3164bool
3166 HeapTuple trigtuple, TupleTableSlot *newslot)
3167{
3168 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3169 TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
3170 HeapTuple newtuple = NULL;
3171 bool should_free;
3172 TriggerData LocTriggerData = {0};
3173 int i;
3174
3175 LocTriggerData.type = T_TriggerData;
3176 LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
3179 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3180
3181 ExecForceStoreHeapTuple(trigtuple, oldslot, false);
3182
3183 for (i = 0; i < trigdesc->numtriggers; i++)
3184 {
3185 Trigger *trigger = &trigdesc->triggers[i];
3186 HeapTuple oldtuple;
3187
3188 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3189 TRIGGER_TYPE_ROW,
3190 TRIGGER_TYPE_INSTEAD,
3191 TRIGGER_TYPE_UPDATE))
3192 continue;
3193 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3194 NULL, oldslot, newslot))
3195 continue;
3196
3197 if (!newtuple)
3198 newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free);
3199
3200 LocTriggerData.tg_trigslot = oldslot;
3201 LocTriggerData.tg_trigtuple = trigtuple;
3202 LocTriggerData.tg_newslot = newslot;
3203 LocTriggerData.tg_newtuple = oldtuple = newtuple;
3204
3205 LocTriggerData.tg_trigger = trigger;
3206 newtuple = ExecCallTriggerFunc(&LocTriggerData,
3207 i,
3208 relinfo->ri_TrigFunctions,
3209 relinfo->ri_TrigInstrument,
3210 GetPerTupleMemoryContext(estate));
3211 if (newtuple == NULL)
3212 {
3213 return false; /* "do nothing" */
3214 }
3215 else if (newtuple != oldtuple)
3216 {
3217 ExecForceStoreHeapTuple(newtuple, newslot, false);
3218
3219 if (should_free)
3220 heap_freetuple(oldtuple);
3221
3222 /* signal tuple should be re-fetched if used */
3223 newtuple = NULL;
3224 }
3225 }
3226
3227 return true;
3228}
3229
3230void
3232{
3233 TriggerDesc *trigdesc;
3234 int i;
3235 TriggerData LocTriggerData = {0};
3236
3237 trigdesc = relinfo->ri_TrigDesc;
3238
3239 if (trigdesc == NULL)
3240 return;
3241 if (!trigdesc->trig_truncate_before_statement)
3242 return;
3243
3244 LocTriggerData.type = T_TriggerData;
3245 LocTriggerData.tg_event = TRIGGER_EVENT_TRUNCATE |
3247 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3248
3249 for (i = 0; i < trigdesc->numtriggers; i++)
3250 {
3251 Trigger *trigger = &trigdesc->triggers[i];
3252 HeapTuple newtuple;
3253
3254 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3255 TRIGGER_TYPE_STATEMENT,
3256 TRIGGER_TYPE_BEFORE,
3257 TRIGGER_TYPE_TRUNCATE))
3258 continue;
3259 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3260 NULL, NULL, NULL))
3261 continue;
3262
3263 LocTriggerData.tg_trigger = trigger;
3264 newtuple = ExecCallTriggerFunc(&LocTriggerData,
3265 i,
3266 relinfo->ri_TrigFunctions,
3267 relinfo->ri_TrigInstrument,
3268 GetPerTupleMemoryContext(estate));
3269
3270 if (newtuple)
3271 ereport(ERROR,
3272 (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
3273 errmsg("BEFORE STATEMENT trigger cannot return a value")));
3274 }
3275}
3276
3277void
3279{
3280 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3281
3282 if (trigdesc && trigdesc->trig_truncate_after_statement)
3283 AfterTriggerSaveEvent(estate, relinfo,
3284 NULL, NULL,
3286 false, NULL, NULL, NIL, NULL, NULL,
3287 false);
3288}
3289
3290
3291/*
3292 * Fetch tuple into "oldslot", dealing with locking and EPQ if necessary
3293 */
3294static bool
3296 EPQState *epqstate,
3297 ResultRelInfo *relinfo,
3298 ItemPointer tid,
3299 LockTupleMode lockmode,
3300 TupleTableSlot *oldslot,
3301 TupleTableSlot **epqslot,
3302 TM_Result *tmresultp,
3303 TM_FailureData *tmfdp)
3304{
3305 Relation relation = relinfo->ri_RelationDesc;
3306
3307 if (epqslot != NULL)
3308 {
3310 TM_FailureData tmfd;
3311 int lockflags = 0;
3312
3313 *epqslot = NULL;
3314
3315 /* caller must pass an epqstate if EvalPlanQual is possible */
3316 Assert(epqstate != NULL);
3317
3318 /*
3319 * lock tuple for update
3320 */
3323 test = table_tuple_lock(relation, tid, estate->es_snapshot, oldslot,
3324 estate->es_output_cid,
3325 lockmode, LockWaitBlock,
3326 lockflags,
3327 &tmfd);
3328
3329 /* Let the caller know about the status of this operation */
3330 if (tmresultp)
3331 *tmresultp = test;
3332 if (tmfdp)
3333 *tmfdp = tmfd;
3334
3335 switch (test)
3336 {
3337 case TM_SelfModified:
3338
3339 /*
3340 * The target tuple was already updated or deleted by the
3341 * current command, or by a later command in the current
3342 * transaction. We ignore the tuple in the former case, and
3343 * throw error in the latter case, for the same reasons
3344 * enumerated in ExecUpdate and ExecDelete in
3345 * nodeModifyTable.c.
3346 */
3347 if (tmfd.cmax != estate->es_output_cid)
3348 ereport(ERROR,
3349 (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3350 errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
3351 errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3352
3353 /* treat it as deleted; do not process */
3354 return false;
3355
3356 case TM_Ok:
3357 if (tmfd.traversed)
3358 {
3359 /*
3360 * Recheck the tuple using EPQ. For MERGE, we leave this
3361 * to the caller (it must do additional rechecking, and
3362 * might end up executing a different action entirely).
3363 */
3364 if (estate->es_plannedstmt->commandType == CMD_MERGE)
3365 {
3366 if (tmresultp)
3367 *tmresultp = TM_Updated;
3368 return false;
3369 }
3370
3371 *epqslot = EvalPlanQual(epqstate,
3372 relation,
3373 relinfo->ri_RangeTableIndex,
3374 oldslot);
3375
3376 /*
3377 * If PlanQual failed for updated tuple - we must not
3378 * process this tuple!
3379 */
3380 if (TupIsNull(*epqslot))
3381 {
3382 *epqslot = NULL;
3383 return false;
3384 }
3385 }
3386 break;
3387
3388 case TM_Updated:
3390 ereport(ERROR,
3392 errmsg("could not serialize access due to concurrent update")));
3393 elog(ERROR, "unexpected table_tuple_lock status: %u", test);
3394 break;
3395
3396 case TM_Deleted:
3398 ereport(ERROR,
3400 errmsg("could not serialize access due to concurrent delete")));
3401 /* tuple was deleted */
3402 return false;
3403
3404 case TM_Invisible:
3405 elog(ERROR, "attempted to lock invisible tuple");
3406 break;
3407
3408 default:
3409 elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
3410 return false; /* keep compiler quiet */
3411 }
3412 }
3413 else
3414 {
3415 /*
3416 * We expect the tuple to be present, thus very simple error handling
3417 * suffices.
3418 */
3419 if (!table_tuple_fetch_row_version(relation, tid, SnapshotAny,
3420 oldslot))
3421 elog(ERROR, "failed to fetch tuple for trigger");
3422 }
3423
3424 return true;
3425}
3426
3427/*
3428 * Is trigger enabled to fire?
3429 */
3430static bool
3432 Trigger *trigger, TriggerEvent event,
3433 Bitmapset *modifiedCols,
3434 TupleTableSlot *oldslot, TupleTableSlot *newslot)
3435{
3436 /* Check replication-role-dependent enable state */
3438 {
3439 if (trigger->tgenabled == TRIGGER_FIRES_ON_ORIGIN ||
3440 trigger->tgenabled == TRIGGER_DISABLED)
3441 return false;
3442 }
3443 else /* ORIGIN or LOCAL role */
3444 {
3445 if (trigger->tgenabled == TRIGGER_FIRES_ON_REPLICA ||
3446 trigger->tgenabled == TRIGGER_DISABLED)
3447 return false;
3448 }
3449
3450 /*
3451 * Check for column-specific trigger (only possible for UPDATE, and in
3452 * fact we *must* ignore tgattr for other event types)
3453 */
3454 if (trigger->tgnattr > 0 && TRIGGER_FIRED_BY_UPDATE(event))
3455 {
3456 int i;
3457 bool modified;
3458
3459 modified = false;
3460 for (i = 0; i < trigger->tgnattr; i++)
3461 {
3463 modifiedCols))
3464 {
3465 modified = true;
3466 break;
3467 }
3468 }
3469 if (!modified)
3470 return false;
3471 }
3472
3473 /* Check for WHEN clause */
3474 if (trigger->tgqual)
3475 {
3476 ExprState **predicate;
3477 ExprContext *econtext;
3478 MemoryContext oldContext;
3479 int i;
3480
3481 Assert(estate != NULL);
3482
3483 /*
3484 * trigger is an element of relinfo->ri_TrigDesc->triggers[]; find the
3485 * matching element of relinfo->ri_TrigWhenExprs[]
3486 */
3487 i = trigger - relinfo->ri_TrigDesc->triggers;
3488 predicate = &relinfo->ri_TrigWhenExprs[i];
3489
3490 /*
3491 * If first time through for this WHEN expression, build expression
3492 * nodetrees for it. Keep them in the per-query memory context so
3493 * they'll survive throughout the query.
3494 */
3495 if (*predicate == NULL)
3496 {
3497 Node *tgqual;
3498
3499 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3500 tgqual = stringToNode(trigger->tgqual);
3503 /* Change references to OLD and NEW to INNER_VAR and OUTER_VAR */
3506 /* ExecPrepareQual wants implicit-AND form */
3507 tgqual = (Node *) make_ands_implicit((Expr *) tgqual);
3508 *predicate = ExecPrepareQual((List *) tgqual, estate);
3509 MemoryContextSwitchTo(oldContext);
3510 }
3511
3512 /*
3513 * We will use the EState's per-tuple context for evaluating WHEN
3514 * expressions (creating it if it's not already there).
3515 */
3516 econtext = GetPerTupleExprContext(estate);
3517
3518 /*
3519 * Finally evaluate the expression, making the old and/or new tuples
3520 * available as INNER_VAR/OUTER_VAR respectively.
3521 */
3522 econtext->ecxt_innertuple = oldslot;
3523 econtext->ecxt_outertuple = newslot;
3524 if (!ExecQual(*predicate, econtext))
3525 return false;
3526 }
3527
3528 return true;
3529}
3530
3531
3532/* ----------
3533 * After-trigger stuff
3534 *
3535 * The AfterTriggersData struct holds data about pending AFTER trigger events
3536 * during the current transaction tree. (BEFORE triggers are fired
3537 * immediately so we don't need any persistent state about them.) The struct
3538 * and most of its subsidiary data are kept in TopTransactionContext; however
3539 * some data that can be discarded sooner appears in the CurTransactionContext
3540 * of the relevant subtransaction. Also, the individual event records are
3541 * kept in a separate sub-context of TopTransactionContext. This is done
3542 * mainly so that it's easy to tell from a memory context dump how much space
3543 * is being eaten by trigger events.
3544 *
3545 * Because the list of pending events can grow large, we go to some
3546 * considerable effort to minimize per-event memory consumption. The event
3547 * records are grouped into chunks and common data for similar events in the
3548 * same chunk is only stored once.
3549 *
3550 * XXX We need to be able to save the per-event data in a file if it grows too
3551 * large.
3552 * ----------
3553 */
3554
3555/* Per-trigger SET CONSTRAINT status */
3557{
3561
3563
3564/*
3565 * SET CONSTRAINT intra-transaction status.
3566 *
3567 * We make this a single palloc'd object so it can be copied and freed easily.
3568 *
3569 * all_isset and all_isdeferred are used to keep track
3570 * of SET CONSTRAINTS ALL {DEFERRED, IMMEDIATE}.
3571 *
3572 * trigstates[] stores per-trigger tgisdeferred settings.
3573 */
3575{
3578 int numstates; /* number of trigstates[] entries in use */
3579 int numalloc; /* allocated size of trigstates[] */
3582
3584
3585
3586/*
3587 * Per-trigger-event data
3588 *
3589 * The actual per-event data, AfterTriggerEventData, includes DONE/IN_PROGRESS
3590 * status bits, up to two tuple CTIDs, and optionally two OIDs of partitions.
3591 * Each event record also has an associated AfterTriggerSharedData that is
3592 * shared across all instances of similar events within a "chunk".
3593 *
3594 * For row-level triggers, we arrange not to waste storage on unneeded ctid
3595 * fields. Updates of regular tables use two; inserts and deletes of regular
3596 * tables use one; foreign tables always use zero and save the tuple(s) to a
3597 * tuplestore. AFTER_TRIGGER_FDW_FETCH directs AfterTriggerExecute() to
3598 * retrieve a fresh tuple or pair of tuples from that tuplestore, while
3599 * AFTER_TRIGGER_FDW_REUSE directs it to use the most-recently-retrieved
3600 * tuple(s). This permits storing tuples once regardless of the number of
3601 * row-level triggers on a foreign table.
3602 *
3603 * When updates on partitioned tables cause rows to move between partitions,
3604 * the OIDs of both partitions are stored too, so that the tuples can be
3605 * fetched; such entries are marked AFTER_TRIGGER_CP_UPDATE (for "cross-
3606 * partition update").
3607 *
3608 * Note that we need triggers on foreign tables to be fired in exactly the
3609 * order they were queued, so that the tuples come out of the tuplestore in
3610 * the right order. To ensure that, we forbid deferrable (constraint)
3611 * triggers on foreign tables. This also ensures that such triggers do not
3612 * get deferred into outer trigger query levels, meaning that it's okay to
3613 * destroy the tuplestore at the end of the query level.
3614 *
3615 * Statement-level triggers always bear AFTER_TRIGGER_1CTID, though they
3616 * require no ctid field. We lack the flag bit space to neatly represent that
3617 * distinct case, and it seems unlikely to be worth much trouble.
3618 *
3619 * Note: ats_firing_id is initially zero and is set to something else when
3620 * AFTER_TRIGGER_IN_PROGRESS is set. It indicates which trigger firing
3621 * cycle the trigger will be fired in (or was fired in, if DONE is set).
3622 * Although this is mutable state, we can keep it in AfterTriggerSharedData
3623 * because all instances of the same type of event in a given event list will
3624 * be fired at the same time, if they were queued between the same firing
3625 * cycles. So we need only ensure that ats_firing_id is zero when attaching
3626 * a new event to an existing AfterTriggerSharedData record.
3627 */
3629
3630#define AFTER_TRIGGER_OFFSET 0x07FFFFFF /* must be low-order bits */
3631#define AFTER_TRIGGER_DONE 0x80000000
3632#define AFTER_TRIGGER_IN_PROGRESS 0x40000000
3633/* bits describing the size and tuple sources of this event */
3634#define AFTER_TRIGGER_FDW_REUSE 0x00000000
3635#define AFTER_TRIGGER_FDW_FETCH 0x20000000
3636#define AFTER_TRIGGER_1CTID 0x10000000
3637#define AFTER_TRIGGER_2CTID 0x30000000
3638#define AFTER_TRIGGER_CP_UPDATE 0x08000000
3639#define AFTER_TRIGGER_TUP_BITS 0x38000000
3641
3643{
3644 TriggerEvent ats_event; /* event type indicator, see trigger.h */
3645 Oid ats_tgoid; /* the trigger's ID */
3646 Oid ats_relid; /* the relation it's on */
3647 Oid ats_rolid; /* role to execute the trigger */
3648 CommandId ats_firing_id; /* ID for firing cycle */
3649 struct AfterTriggersTableData *ats_table; /* transition table access */
3650 Bitmapset *ats_modifiedcols; /* modified columns */
3652
3654
3656{
3657 TriggerFlags ate_flags; /* status bits and offset to shared data */
3658 ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3659 ItemPointerData ate_ctid2; /* new updated tuple */
3660
3661 /*
3662 * During a cross-partition update of a partitioned table, we also store
3663 * the OIDs of source and destination partitions that are needed to fetch
3664 * the old (ctid1) and the new tuple (ctid2) from, respectively.
3665 */
3669
3670/* AfterTriggerEventData, minus ate_src_part, ate_dst_part */
3672{
3677
3678/* AfterTriggerEventData, minus ate_*_part and ate_ctid2 */
3680{
3681 TriggerFlags ate_flags; /* status bits and offset to shared data */
3682 ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3684
3685/* AfterTriggerEventData, minus ate_*_part, ate_ctid1 and ate_ctid2 */
3687{
3688 TriggerFlags ate_flags; /* status bits and offset to shared data */
3690
3691#define SizeofTriggerEvent(evt) \
3692 (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_CP_UPDATE ? \
3693 sizeof(AfterTriggerEventData) : \
3694 (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \
3695 sizeof(AfterTriggerEventDataNoOids) : \
3696 (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_1CTID ? \
3697 sizeof(AfterTriggerEventDataOneCtid) : \
3698 sizeof(AfterTriggerEventDataZeroCtids))))
3699
3700#define GetTriggerSharedData(evt) \
3701 ((AfterTriggerShared) ((char *) (evt) + ((evt)->ate_flags & AFTER_TRIGGER_OFFSET)))
3702
3703/*
3704 * To avoid palloc overhead, we keep trigger events in arrays in successively-
3705 * larger chunks (a slightly more sophisticated version of an expansible
3706 * array). The space between CHUNK_DATA_START and freeptr is occupied by
3707 * AfterTriggerEventData records; the space between endfree and endptr is
3708 * occupied by AfterTriggerSharedData records.
3709 */
3711{
3712 struct AfterTriggerEventChunk *next; /* list link */
3713 char *freeptr; /* start of free space in chunk */
3714 char *endfree; /* end of free space in chunk */
3715 char *endptr; /* end of chunk */
3716 /* event data follows here */
3718
3719#define CHUNK_DATA_START(cptr) ((char *) (cptr) + MAXALIGN(sizeof(AfterTriggerEventChunk)))
3720
3721/* A list of events */
3723{
3726 char *tailfree; /* freeptr of tail chunk */
3728
3729/* Macros to help in iterating over a list of events */
3730#define for_each_chunk(cptr, evtlist) \
3731 for (cptr = (evtlist).head; cptr != NULL; cptr = cptr->next)
3732#define for_each_event(eptr, cptr) \
3733 for (eptr = (AfterTriggerEvent) CHUNK_DATA_START(cptr); \
3734 (char *) eptr < (cptr)->freeptr; \
3735 eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3736/* Use this if no special per-chunk processing is needed */
3737#define for_each_event_chunk(eptr, cptr, evtlist) \
3738 for_each_chunk(cptr, evtlist) for_each_event(eptr, cptr)
3739
3740/* Macros for iterating from a start point that might not be list start */
3741#define for_each_chunk_from(cptr) \
3742 for (; cptr != NULL; cptr = cptr->next)
3743#define for_each_event_from(eptr, cptr) \
3744 for (; \
3745 (char *) eptr < (cptr)->freeptr; \
3746 eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3747
3748
3749/*
3750 * All per-transaction data for the AFTER TRIGGERS module.
3751 *
3752 * AfterTriggersData has the following fields:
3753 *
3754 * firing_counter is incremented for each call of afterTriggerInvokeEvents.
3755 * We mark firable events with the current firing cycle's ID so that we can
3756 * tell which ones to work on. This ensures sane behavior if a trigger
3757 * function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
3758 * only fire those events that weren't already scheduled for firing.
3759 *
3760 * state keeps track of the transaction-local effects of SET CONSTRAINTS.
3761 * This is saved and restored across failed subtransactions.
3762 *
3763 * events is the current list of deferred events. This is global across
3764 * all subtransactions of the current transaction. In a subtransaction
3765 * abort, we know that the events added by the subtransaction are at the
3766 * end of the list, so it is relatively easy to discard them. The event
3767 * list chunks themselves are stored in event_cxt.
3768 *
3769 * query_depth is the current depth of nested AfterTriggerBeginQuery calls
3770 * (-1 when the stack is empty).
3771 *
3772 * query_stack[query_depth] is the per-query-level data, including these fields:
3773 *
3774 * events is a list of AFTER trigger events queued by the current query.
3775 * None of these are valid until the matching AfterTriggerEndQuery call
3776 * occurs. At that point we fire immediate-mode triggers, and append any
3777 * deferred events to the main events list.
3778 *
3779 * fdw_tuplestore is a tuplestore containing the foreign-table tuples
3780 * needed by events queued by the current query. (Note: we use just one
3781 * tuplestore even though more than one foreign table might be involved.
3782 * This is okay because tuplestores don't really care what's in the tuples
3783 * they store; but it's possible that someday it'd break.)
3784 *
3785 * tables is a List of AfterTriggersTableData structs for target tables
3786 * of the current query (see below).
3787 *
3788 * maxquerydepth is just the allocated length of query_stack.
3789 *
3790 * trans_stack holds per-subtransaction data, including these fields:
3791 *
3792 * state is NULL or a pointer to a saved copy of the SET CONSTRAINTS
3793 * state data. Each subtransaction level that modifies that state first
3794 * saves a copy, which we use to restore the state if we abort.
3795 *
3796 * events is a copy of the events head/tail pointers,
3797 * which we use to restore those values during subtransaction abort.
3798 *
3799 * query_depth is the subtransaction-start-time value of query_depth,
3800 * which we similarly use to clean up at subtransaction abort.
3801 *
3802 * firing_counter is the subtransaction-start-time value of firing_counter.
3803 * We use this to recognize which deferred triggers were fired (or marked
3804 * for firing) within an aborted subtransaction.
3805 *
3806 * We use GetCurrentTransactionNestLevel() to determine the correct array
3807 * index in trans_stack. maxtransdepth is the number of allocated entries in
3808 * trans_stack. (By not keeping our own stack pointer, we can avoid trouble
3809 * in cases where errors during subxact abort cause multiple invocations
3810 * of AfterTriggerEndSubXact() at the same nesting depth.)
3811 *
3812 * We create an AfterTriggersTableData struct for each target table of the
3813 * current query, and each operation mode (INSERT/UPDATE/DELETE), that has
3814 * either transition tables or statement-level triggers. This is used to
3815 * hold the relevant transition tables, as well as info tracking whether
3816 * we already queued the statement triggers. (We use that info to prevent
3817 * firing the same statement triggers more than once per statement, or really
3818 * once per transition table set.) These structs, along with the transition
3819 * table tuplestores, live in the (sub)transaction's CurTransactionContext.
3820 * That's sufficient lifespan because we don't allow transition tables to be
3821 * used by deferrable triggers, so they only need to survive until
3822 * AfterTriggerEndQuery.
3823 */
3827
3828typedef struct AfterTriggersData
3829{
3830 CommandId firing_counter; /* next firing ID to assign */
3831 SetConstraintState state; /* the active S C state */
3832 AfterTriggerEventList events; /* deferred-event list */
3833 MemoryContext event_cxt; /* memory context for events, if any */
3834
3835 /* per-query-level data: */
3836 AfterTriggersQueryData *query_stack; /* array of structs shown below */
3837 int query_depth; /* current index in above array */
3838 int maxquerydepth; /* allocated len of above array */
3839
3840 /* per-subtransaction-level data: */
3841 AfterTriggersTransData *trans_stack; /* array of structs shown below */
3842 int maxtransdepth; /* allocated len of above array */
3844
3846{
3847 AfterTriggerEventList events; /* events pending from this query */
3848 Tuplestorestate *fdw_tuplestore; /* foreign tuples for said events */
3849 List *tables; /* list of AfterTriggersTableData, see below */
3850};
3851
3853{
3854 /* these fields are just for resetting at subtrans abort: */
3855 SetConstraintState state; /* saved S C state, or NULL if not yet saved */
3856 AfterTriggerEventList events; /* saved list pointer */
3857 int query_depth; /* saved query_depth */
3858 CommandId firing_counter; /* saved firing_counter */
3859};
3860
3862{
3863 /* relid + cmdType form the lookup key for these structs: */
3864 Oid relid; /* target table's OID */
3865 CmdType cmdType; /* event type, CMD_INSERT/UPDATE/DELETE */
3866 bool closed; /* true when no longer OK to add tuples */
3867 bool before_trig_done; /* did we already queue BS triggers? */
3868 bool after_trig_done; /* did we already queue AS triggers? */
3869 AfterTriggerEventList after_trig_events; /* if so, saved list pointer */
3870
3871 /*
3872 * We maintain separate transition tables for UPDATE/INSERT/DELETE since
3873 * MERGE can run all three actions in a single statement. Note that UPDATE
3874 * needs both old and new transition tables whereas INSERT needs only new,
3875 * and DELETE needs only old.
3876 */
3877
3878 /* "old" transition table for UPDATE, if any */
3880 /* "new" transition table for UPDATE, if any */
3882 /* "old" transition table for DELETE, if any */
3884 /* "new" transition table for INSERT, if any */
3886
3887 TupleTableSlot *storeslot; /* for converting to tuplestore's format */
3888};
3889
3891
3892static void AfterTriggerExecute(EState *estate,
3893 AfterTriggerEvent event,
3894 ResultRelInfo *relInfo,
3895 ResultRelInfo *src_relInfo,
3896 ResultRelInfo *dst_relInfo,
3897 TriggerDesc *trigdesc,
3898 FmgrInfo *finfo,
3899 Instrumentation *instr,
3900 MemoryContext per_tuple_context,
3901 TupleTableSlot *trig_tuple_slot1,
3902 TupleTableSlot *trig_tuple_slot2);
3904 CmdType cmdType);
3906 TupleDesc tupdesc);
3908 TupleTableSlot *oldslot,
3909 TupleTableSlot *newslot,
3910 TransitionCaptureState *transition_capture);
3911static void TransitionTableAddTuple(EState *estate,
3912 TransitionCaptureState *transition_capture,
3913 ResultRelInfo *relinfo,
3914 TupleTableSlot *slot,
3915 TupleTableSlot *original_insert_tuple,
3916 Tuplestorestate *tuplestore);
3918static SetConstraintState SetConstraintStateCreate(int numalloc);
3921 Oid tgoid, bool tgisdeferred);
3922static void cancel_prior_stmt_triggers(Oid relid, CmdType cmdType, int tgevent);
3923
3924
3925/*
3926 * Get the FDW tuplestore for the current trigger query level, creating it
3927 * if necessary.
3928 */
3929static Tuplestorestate *
3931{
3932 Tuplestorestate *ret;
3933
3935 if (ret == NULL)
3936 {
3937 MemoryContext oldcxt;
3938 ResourceOwner saveResourceOwner;
3939
3940 /*
3941 * Make the tuplestore valid until end of subtransaction. We really
3942 * only need it until AfterTriggerEndQuery().
3943 */
3945 saveResourceOwner = CurrentResourceOwner;
3947
3948 ret = tuplestore_begin_heap(false, false, work_mem);
3949
3950 CurrentResourceOwner = saveResourceOwner;
3951 MemoryContextSwitchTo(oldcxt);
3952
3954 }
3955
3956 return ret;
3957}
3958
3959/* ----------
3960 * afterTriggerCheckState()
3961 *
3962 * Returns true if the trigger event is actually in state DEFERRED.
3963 * ----------
3964 */
3965static bool
3967{
3968 Oid tgoid = evtshared->ats_tgoid;
3970 int i;
3971
3972 /*
3973 * For not-deferrable triggers (i.e. normal AFTER ROW triggers and
3974 * constraints declared NOT DEFERRABLE), the state is always false.
3975 */
3976 if ((evtshared->ats_event & AFTER_TRIGGER_DEFERRABLE) == 0)
3977 return false;
3978
3979 /*
3980 * If constraint state exists, SET CONSTRAINTS might have been executed
3981 * either for this trigger or for all triggers.
3982 */
3983 if (state != NULL)
3984 {
3985 /* Check for SET CONSTRAINTS for this specific trigger. */
3986 for (i = 0; i < state->numstates; i++)
3987 {
3988 if (state->trigstates[i].sct_tgoid == tgoid)
3989 return state->trigstates[i].sct_tgisdeferred;
3990 }
3991
3992 /* Check for SET CONSTRAINTS ALL. */
3993 if (state->all_isset)
3994 return state->all_isdeferred;
3995 }
3996
3997 /*
3998 * Otherwise return the default state for the trigger.
3999 */
4000 return ((evtshared->ats_event & AFTER_TRIGGER_INITDEFERRED) != 0);
4001}
4002
4003/* ----------
4004 * afterTriggerCopyBitmap()
4005 *
4006 * Copy bitmap into AfterTriggerEvents memory context, which is where the after
4007 * trigger events are kept.
4008 * ----------
4009 */
4010static Bitmapset *
4012{
4013 Bitmapset *dst;
4014 MemoryContext oldcxt;
4015
4016 if (src == NULL)
4017 return NULL;
4018
4020
4021 dst = bms_copy(src);
4022
4023 MemoryContextSwitchTo(oldcxt);
4024
4025 return dst;
4026}
4027
4028/* ----------
4029 * afterTriggerAddEvent()
4030 *
4031 * Add a new trigger event to the specified queue.
4032 * The passed-in event data is copied.
4033 * ----------
4034 */
4035static void
4037 AfterTriggerEvent event, AfterTriggerShared evtshared)
4038{
4039 Size eventsize = SizeofTriggerEvent(event);
4040 Size needed = eventsize + sizeof(AfterTriggerSharedData);
4042 AfterTriggerShared newshared;
4043 AfterTriggerEvent newevent;
4044
4045 /*
4046 * If empty list or not enough room in the tail chunk, make a new chunk.
4047 * We assume here that a new shared record will always be needed.
4048 */
4049 chunk = events->tail;
4050 if (chunk == NULL ||
4051 chunk->endfree - chunk->freeptr < needed)
4052 {
4053 Size chunksize;
4054
4055 /* Create event context if we didn't already */
4056 if (afterTriggers.event_cxt == NULL)
4059 "AfterTriggerEvents",
4061
4062 /*
4063 * Chunk size starts at 1KB and is allowed to increase up to 1MB.
4064 * These numbers are fairly arbitrary, though there is a hard limit at
4065 * AFTER_TRIGGER_OFFSET; else we couldn't link event records to their
4066 * shared records using the available space in ate_flags. Another
4067 * constraint is that if the chunk size gets too huge, the search loop
4068 * below would get slow given a (not too common) usage pattern with
4069 * many distinct event types in a chunk. Therefore, we double the
4070 * preceding chunk size only if there weren't too many shared records
4071 * in the preceding chunk; otherwise we halve it. This gives us some
4072 * ability to adapt to the actual usage pattern of the current query
4073 * while still having large chunk sizes in typical usage. All chunk
4074 * sizes used should be MAXALIGN multiples, to ensure that the shared
4075 * records will be aligned safely.
4076 */
4077#define MIN_CHUNK_SIZE 1024
4078#define MAX_CHUNK_SIZE (1024*1024)
4079
4080#if MAX_CHUNK_SIZE > (AFTER_TRIGGER_OFFSET+1)
4081#error MAX_CHUNK_SIZE must not exceed AFTER_TRIGGER_OFFSET
4082#endif
4083
4084 if (chunk == NULL)
4085 chunksize = MIN_CHUNK_SIZE;
4086 else
4087 {
4088 /* preceding chunk size... */
4089 chunksize = chunk->endptr - (char *) chunk;
4090 /* check number of shared records in preceding chunk */
4091 if ((chunk->endptr - chunk->endfree) <=
4092 (100 * sizeof(AfterTriggerSharedData)))
4093 chunksize *= 2; /* okay, double it */
4094 else
4095 chunksize /= 2; /* too many shared records */
4096 chunksize = Min(chunksize, MAX_CHUNK_SIZE);
4097 }
4098 chunk = MemoryContextAlloc(afterTriggers.event_cxt, chunksize);
4099 chunk->next = NULL;
4100 chunk->freeptr = CHUNK_DATA_START(chunk);
4101 chunk->endptr = chunk->endfree = (char *) chunk + chunksize;
4102 Assert(chunk->endfree - chunk->freeptr >= needed);
4103
4104 if (events->tail == NULL)
4105 {
4106 Assert(events->head == NULL);
4107 events->head = chunk;
4108 }
4109 else
4110 events->tail->next = chunk;
4111 events->tail = chunk;
4112 /* events->tailfree is now out of sync, but we'll fix it below */
4113 }
4114
4115 /*
4116 * Try to locate a matching shared-data record already in the chunk. If
4117 * none, make a new one. The search begins with the most recently added
4118 * record, since newer ones are most likely to match.
4119 */
4120 for (newshared = (AfterTriggerShared) chunk->endfree;
4121 (char *) newshared < chunk->endptr;
4122 newshared++)
4123 {
4124 /* compare fields roughly by probability of them being different */
4125 if (newshared->ats_tgoid == evtshared->ats_tgoid &&
4126 newshared->ats_event == evtshared->ats_event &&
4127 newshared->ats_firing_id == 0 &&
4128 newshared->ats_table == evtshared->ats_table &&
4129 newshared->ats_relid == evtshared->ats_relid &&
4130 newshared->ats_rolid == evtshared->ats_rolid &&
4131 bms_equal(newshared->ats_modifiedcols,
4132 evtshared->ats_modifiedcols))
4133 break;
4134 }
4135 if ((char *) newshared >= chunk->endptr)
4136 {
4137 newshared = ((AfterTriggerShared) chunk->endfree) - 1;
4138 *newshared = *evtshared;
4139 /* now we must make a suitably-long-lived copy of the bitmap */
4141 newshared->ats_firing_id = 0; /* just to be sure */
4142 chunk->endfree = (char *) newshared;
4143 }
4144
4145 /* Insert the data */
4146 newevent = (AfterTriggerEvent) chunk->freeptr;
4147 memcpy(newevent, event, eventsize);
4148 /* ... and link the new event to its shared record */
4149 newevent->ate_flags &= ~AFTER_TRIGGER_OFFSET;
4150 newevent->ate_flags |= (char *) newshared - (char *) newevent;
4151
4152 chunk->freeptr += eventsize;
4153 events->tailfree = chunk->freeptr;
4154}
4155
4156/* ----------
4157 * afterTriggerFreeEventList()
4158 *
4159 * Free all the event storage in the given list.
4160 * ----------
4161 */
4162static void
4164{
4166
4167 while ((chunk = events->head) != NULL)
4168 {
4169 events->head = chunk->next;
4170 pfree(chunk);
4171 }
4172 events->tail = NULL;
4173 events->tailfree = NULL;
4174}
4175
4176/* ----------
4177 * afterTriggerRestoreEventList()
4178 *
4179 * Restore an event list to its prior length, removing all the events
4180 * added since it had the value old_events.
4181 * ----------
4182 */
4183static void
4185 const AfterTriggerEventList *old_events)
4186{
4188 AfterTriggerEventChunk *next_chunk;
4189
4190 if (old_events->tail == NULL)
4191 {
4192 /* restoring to a completely empty state, so free everything */
4194 }
4195 else
4196 {
4197 *events = *old_events;
4198 /* free any chunks after the last one we want to keep */
4199 for (chunk = events->tail->next; chunk != NULL; chunk = next_chunk)
4200 {
4201 next_chunk = chunk->next;
4202 pfree(chunk);
4203 }
4204 /* and clean up the tail chunk to be the right length */
4205 events->tail->next = NULL;
4206 events->tail->freeptr = events->tailfree;
4207
4208 /*
4209 * We don't make any effort to remove now-unused shared data records.
4210 * They might still be useful, anyway.
4211 */
4212 }
4213}
4214
4215/* ----------
4216 * afterTriggerDeleteHeadEventChunk()
4217 *
4218 * Remove the first chunk of events from the query level's event list.
4219 * Keep any event list pointers elsewhere in the query level's data
4220 * structures in sync.
4221 * ----------
4222 */
4223static void
4225{
4226 AfterTriggerEventChunk *target = qs->events.head;
4227 ListCell *lc;
4228
4229 Assert(target && target->next);
4230
4231 /*
4232 * First, update any pointers in the per-table data, so that they won't be
4233 * dangling. Resetting obsoleted pointers to NULL will make
4234 * cancel_prior_stmt_triggers start from the list head, which is fine.
4235 */
4236 foreach(lc, qs->tables)
4237 {
4239
4240 if (table->after_trig_done &&
4241 table->after_trig_events.tail == target)
4242 {
4243 table->after_trig_events.head = NULL;
4244 table->after_trig_events.tail = NULL;
4245 table->after_trig_events.tailfree = NULL;
4246 }
4247 }
4248
4249 /* Now we can flush the head chunk */
4250 qs->events.head = target->next;
4251 pfree(target);
4252}
4253
4254
4255/* ----------
4256 * AfterTriggerExecute()
4257 *
4258 * Fetch the required tuples back from the heap and fire one
4259 * single trigger function.
4260 *
4261 * Frequently, this will be fired many times in a row for triggers of
4262 * a single relation. Therefore, we cache the open relation and provide
4263 * fmgr lookup cache space at the caller level. (For triggers fired at
4264 * the end of a query, we can even piggyback on the executor's state.)
4265 *
4266 * When fired for a cross-partition update of a partitioned table, the old
4267 * tuple is fetched using 'src_relInfo' (the source leaf partition) and
4268 * the new tuple using 'dst_relInfo' (the destination leaf partition), though
4269 * both are converted into the root partitioned table's format before passing
4270 * to the trigger function.
4271 *
4272 * event: event currently being fired.
4273 * relInfo: result relation for event.
4274 * src_relInfo: source partition of a cross-partition update
4275 * dst_relInfo: its destination partition
4276 * trigdesc: working copy of rel's trigger info.
4277 * finfo: array of fmgr lookup cache entries (one per trigger in trigdesc).
4278 * instr: array of EXPLAIN ANALYZE instrumentation nodes (one per trigger),
4279 * or NULL if no instrumentation is wanted.
4280 * per_tuple_context: memory context to call trigger function in.
4281 * trig_tuple_slot1: scratch slot for tg_trigtuple (foreign tables only)
4282 * trig_tuple_slot2: scratch slot for tg_newtuple (foreign tables only)
4283 * ----------
4284 */
4285static void
4287 AfterTriggerEvent event,
4288 ResultRelInfo *relInfo,
4289 ResultRelInfo *src_relInfo,
4290 ResultRelInfo *dst_relInfo,
4291 TriggerDesc *trigdesc,
4292 FmgrInfo *finfo, Instrumentation *instr,
4293 MemoryContext per_tuple_context,
4294 TupleTableSlot *trig_tuple_slot1,
4295 TupleTableSlot *trig_tuple_slot2)
4296{
4297 Relation rel = relInfo->ri_RelationDesc;
4298 Relation src_rel = src_relInfo->ri_RelationDesc;
4299 Relation dst_rel = dst_relInfo->ri_RelationDesc;
4300 AfterTriggerShared evtshared = GetTriggerSharedData(event);
4301 Oid tgoid = evtshared->ats_tgoid;
4302 TriggerData LocTriggerData = {0};
4303 Oid save_rolid;
4304 int save_sec_context;
4305 HeapTuple rettuple;
4306 int tgindx;
4307 bool should_free_trig = false;
4308 bool should_free_new = false;
4309
4310 /*
4311 * Locate trigger in trigdesc. It might not be present, and in fact the
4312 * trigdesc could be NULL, if the trigger was dropped since the event was
4313 * queued. In that case, silently do nothing.
4314 */
4315 if (trigdesc == NULL)
4316 return;
4317 for (tgindx = 0; tgindx < trigdesc->numtriggers; tgindx++)
4318 {
4319 if (trigdesc->triggers[tgindx].tgoid == tgoid)
4320 {
4321 LocTriggerData.tg_trigger = &(trigdesc->triggers[tgindx]);
4322 break;
4323 }
4324 }
4325 if (LocTriggerData.tg_trigger == NULL)
4326 return;
4327
4328 /*
4329 * If doing EXPLAIN ANALYZE, start charging time to this trigger. We want
4330 * to include time spent re-fetching tuples in the trigger cost.
4331 */
4332 if (instr)
4333 InstrStartNode(instr + tgindx);
4334
4335 /*
4336 * Fetch the required tuple(s).
4337 */
4338 switch (event->ate_flags & AFTER_TRIGGER_TUP_BITS)
4339 {
4341 {
4342 Tuplestorestate *fdw_tuplestore = GetCurrentFDWTuplestore();
4343
4344 if (!tuplestore_gettupleslot(fdw_tuplestore, true, false,
4345 trig_tuple_slot1))
4346 elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
4347
4348 if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
4350 !tuplestore_gettupleslot(fdw_tuplestore, true, false,
4351 trig_tuple_slot2))
4352 elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
4353 }
4354 /* fall through */
4356
4357 /*
4358 * Store tuple in the slot so that tg_trigtuple does not reference
4359 * tuplestore memory. (It is formally possible for the trigger
4360 * function to queue trigger events that add to the same
4361 * tuplestore, which can push other tuples out of memory.) The
4362 * distinction is academic, because we start with a minimal tuple
4363 * that is stored as a heap tuple, constructed in different memory
4364 * context, in the slot anyway.
4365 */
4366 LocTriggerData.tg_trigslot = trig_tuple_slot1;
4367 LocTriggerData.tg_trigtuple =
4368 ExecFetchSlotHeapTuple(trig_tuple_slot1, true, &should_free_trig);
4369
4370 if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
4372 {
4373 LocTriggerData.tg_newslot = trig_tuple_slot2;
4374 LocTriggerData.tg_newtuple =
4375 ExecFetchSlotHeapTuple(trig_tuple_slot2, true, &should_free_new);
4376 }
4377 else
4378 {
4379 LocTriggerData.tg_newtuple = NULL;
4380 }
4381 break;
4382
4383 default:
4384 if (ItemPointerIsValid(&(event->ate_ctid1)))
4385 {
4386 TupleTableSlot *src_slot = ExecGetTriggerOldSlot(estate,
4387 src_relInfo);
4388
4389 if (!table_tuple_fetch_row_version(src_rel,
4390 &(event->ate_ctid1),
4392 src_slot))
4393 elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
4394
4395 /*
4396 * Store the tuple fetched from the source partition into the
4397 * target (root partitioned) table slot, converting if needed.
4398 */
4399 if (src_relInfo != relInfo)
4400 {
4401 TupleConversionMap *map = ExecGetChildToRootMap(src_relInfo);
4402
4403 LocTriggerData.tg_trigslot = ExecGetTriggerOldSlot(estate, relInfo);
4404 if (map)
4405 {
4407 src_slot,
4408 LocTriggerData.tg_trigslot);
4409 }
4410 else
4411 ExecCopySlot(LocTriggerData.tg_trigslot, src_slot);
4412 }
4413 else
4414 LocTriggerData.tg_trigslot = src_slot;
4415 LocTriggerData.tg_trigtuple =
4416 ExecFetchSlotHeapTuple(LocTriggerData.tg_trigslot, false, &should_free_trig);
4417 }
4418 else
4419 {
4420 LocTriggerData.tg_trigtuple = NULL;
4421 }
4422
4423 /* don't touch ctid2 if not there */
4425 (event->ate_flags & AFTER_TRIGGER_CP_UPDATE)) &&
4426 ItemPointerIsValid(&(event->ate_ctid2)))
4427 {
4428 TupleTableSlot *dst_slot = ExecGetTriggerNewSlot(estate,
4429 dst_relInfo);
4430
4431 if (!table_tuple_fetch_row_version(dst_rel,
4432 &(event->ate_ctid2),
4434 dst_slot))
4435 elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
4436
4437 /*
4438 * Store the tuple fetched from the destination partition into
4439 * the target (root partitioned) table slot, converting if
4440 * needed.
4441 */
4442 if (dst_relInfo != relInfo)
4443 {
4444 TupleConversionMap *map = ExecGetChildToRootMap(dst_relInfo);
4445
4446 LocTriggerData.tg_newslot = ExecGetTriggerNewSlot(estate, relInfo);
4447 if (map)
4448 {
4450 dst_slot,
4451 LocTriggerData.tg_newslot);
4452 }
4453 else
4454 ExecCopySlot(LocTriggerData.tg_newslot, dst_slot);
4455 }
4456 else
4457 LocTriggerData.tg_newslot = dst_slot;
4458 LocTriggerData.tg_newtuple =
4459 ExecFetchSlotHeapTuple(LocTriggerData.tg_newslot, false, &should_free_new);
4460 }
4461 else
4462 {
4463 LocTriggerData.tg_newtuple = NULL;
4464 }
4465 }
4466
4467 /*
4468 * Set up the tuplestore information to let the trigger have access to
4469 * transition tables. When we first make a transition table available to
4470 * a trigger, mark it "closed" so that it cannot change anymore. If any
4471 * additional events of the same type get queued in the current trigger
4472 * query level, they'll go into new transition tables.
4473 */
4474 LocTriggerData.tg_oldtable = LocTriggerData.tg_newtable = NULL;
4475 if (evtshared->ats_table)
4476 {
4477 if (LocTriggerData.tg_trigger->tgoldtable)
4478 {
4479 if (TRIGGER_FIRED_BY_UPDATE(evtshared->ats_event))
4480 LocTriggerData.tg_oldtable = evtshared->ats_table->old_upd_tuplestore;
4481 else
4482 LocTriggerData.tg_oldtable = evtshared->ats_table->old_del_tuplestore;
4483 evtshared->ats_table->closed = true;
4484 }
4485
4486 if (LocTriggerData.tg_trigger->tgnewtable)
4487 {
4488 if (TRIGGER_FIRED_BY_INSERT(evtshared->ats_event))
4489 LocTriggerData.tg_newtable = evtshared->ats_table->new_ins_tuplestore;
4490 else
4491 LocTriggerData.tg_newtable = evtshared->ats_table->new_upd_tuplestore;
4492 evtshared->ats_table->closed = true;
4493 }
4494 }
4495
4496 /*
4497 * Setup the remaining trigger information
4498 */
4499 LocTriggerData.type = T_TriggerData;
4500 LocTriggerData.tg_event =
4502 LocTriggerData.tg_relation = rel;
4503 if (TRIGGER_FOR_UPDATE(LocTriggerData.tg_trigger->tgtype))
4504 LocTriggerData.tg_updatedcols = evtshared->ats_modifiedcols;
4505
4506 MemoryContextReset(per_tuple_context);
4507
4508 /*
4509 * If necessary, become the role that was active when the trigger got
4510 * queued. Note that the role might have been dropped since the trigger
4511 * was queued, but if that is a problem, we will get an error later.
4512 * Checking here would still leave a race condition.
4513 */
4514 GetUserIdAndSecContext(&save_rolid, &save_sec_context);
4515 if (save_rolid != evtshared->ats_rolid)
4517 save_sec_context | SECURITY_LOCAL_USERID_CHANGE);
4518
4519 /*
4520 * Call the trigger and throw away any possibly returned updated tuple.
4521 * (Don't let ExecCallTriggerFunc measure EXPLAIN time.)
4522 */
4523 rettuple = ExecCallTriggerFunc(&LocTriggerData,
4524 tgindx,
4525 finfo,
4526 NULL,
4527 per_tuple_context);
4528 if (rettuple != NULL &&
4529 rettuple != LocTriggerData.tg_trigtuple &&
4530 rettuple != LocTriggerData.tg_newtuple)
4531 heap_freetuple(rettuple);
4532
4533 /* Restore the current role if necessary */
4534 if (save_rolid != evtshared->ats_rolid)
4535 SetUserIdAndSecContext(save_rolid, save_sec_context);
4536
4537 /*
4538 * Release resources
4539 */
4540 if (should_free_trig)
4541 heap_freetuple(LocTriggerData.tg_trigtuple);
4542 if (should_free_new)
4543 heap_freetuple(LocTriggerData.tg_newtuple);
4544
4545 /* don't clear slots' contents if foreign table */
4546 if (trig_tuple_slot1 == NULL)
4547 {
4548 if (LocTriggerData.tg_trigslot)
4549 ExecClearTuple(LocTriggerData.tg_trigslot);
4550 if (LocTriggerData.tg_newslot)
4551 ExecClearTuple(LocTriggerData.tg_newslot);
4552 }
4553
4554 /*
4555 * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
4556 * one "tuple returned" (really the number of firings).
4557 */
4558 if (instr)
4559 InstrStopNode(instr + tgindx, 1);
4560}
4561
4562
4563/*
4564 * afterTriggerMarkEvents()
4565 *
4566 * Scan the given event list for not yet invoked events. Mark the ones
4567 * that can be invoked now with the current firing ID.
4568 *
4569 * If move_list isn't NULL, events that are not to be invoked now are
4570 * transferred to move_list.
4571 *
4572 * When immediate_only is true, do not invoke currently-deferred triggers.
4573 * (This will be false only at main transaction exit.)
4574 *
4575 * Returns true if any invokable events were found.
4576 */
4577static bool
4579 AfterTriggerEventList *move_list,
4580 bool immediate_only)
4581{
4582 bool found = false;
4583 bool deferred_found = false;
4584 AfterTriggerEvent event;
4586
4587 for_each_event_chunk(event, chunk, *events)
4588 {
4589 AfterTriggerShared evtshared = GetTriggerSharedData(event);
4590 bool defer_it = false;
4591
4592 if (!(event->ate_flags &
4594 {
4595 /*
4596 * This trigger hasn't been called or scheduled yet. Check if we
4597 * should call it now.
4598 */
4599 if (immediate_only && afterTriggerCheckState(evtshared))
4600 {
4601 defer_it = true;
4602 }
4603 else
4604 {
4605 /*
4606 * Mark it as to be fired in this firing cycle.
4607 */
4609 event->ate_flags |= AFTER_TRIGGER_IN_PROGRESS;
4610 found = true;
4611 }
4612 }
4613
4614 /*
4615 * If it's deferred, move it to move_list, if requested.
4616 */
4617 if (defer_it && move_list != NULL)
4618 {
4619 deferred_found = true;
4620 /* add it to move_list */
4621 afterTriggerAddEvent(move_list, event, evtshared);
4622 /* mark original copy "done" so we don't do it again */
4623 event->ate_flags |= AFTER_TRIGGER_DONE;
4624 }
4625 }
4626
4627 /*
4628 * We could allow deferred triggers if, before the end of the
4629 * security-restricted operation, we were to verify that a SET CONSTRAINTS
4630 * ... IMMEDIATE has fired all such triggers. For now, don't bother.
4631 */
4632 if (deferred_found && InSecurityRestrictedOperation())
4633 ereport(ERROR,
4634 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
4635 errmsg("cannot fire deferred trigger within security-restricted operation")));
4636
4637 return found;
4638}
4639
4640/*
4641 * afterTriggerInvokeEvents()
4642 *
4643 * Scan the given event list for events that are marked as to be fired
4644 * in the current firing cycle, and fire them.
4645 *
4646 * If estate isn't NULL, we use its result relation info to avoid repeated
4647 * openings and closing of trigger target relations. If it is NULL, we
4648 * make one locally to cache the info in case there are multiple trigger
4649 * events per rel.
4650 *
4651 * When delete_ok is true, it's safe to delete fully-processed events.
4652 * (We are not very tense about that: we simply reset a chunk to be empty
4653 * if all its events got fired. The objective here is just to avoid useless
4654 * rescanning of events when a trigger queues new events during transaction
4655 * end, so it's not necessary to worry much about the case where only
4656 * some events are fired.)
4657 *
4658 * Returns true if no unfired events remain in the list (this allows us
4659 * to avoid repeating afterTriggerMarkEvents).
4660 */
4661static bool
4663 CommandId firing_id,
4664 EState *estate,
4665 bool delete_ok)
4666{
4667 bool all_fired = true;
4669 MemoryContext per_tuple_context;
4670 bool local_estate = false;
4671 ResultRelInfo *rInfo = NULL;
4672 Relation rel = NULL;
4673 TriggerDesc *trigdesc = NULL;
4674 FmgrInfo *finfo = NULL;
4675 Instrumentation *instr = NULL;
4676 TupleTableSlot *slot1 = NULL,
4677 *slot2 = NULL;
4678
4679 /* Make a local EState if need be */
4680 if (estate == NULL)
4681 {
4682 estate = CreateExecutorState();
4683 local_estate = true;
4684 }
4685
4686 /* Make a per-tuple memory context for trigger function calls */
4687 per_tuple_context =
4689 "AfterTriggerTupleContext",
4691
4692 for_each_chunk(chunk, *events)
4693 {
4694 AfterTriggerEvent event;
4695 bool all_fired_in_chunk = true;
4696
4697 for_each_event(event, chunk)
4698 {
4699 AfterTriggerShared evtshared = GetTriggerSharedData(event);
4700
4701 /*
4702 * Is it one for me to fire?
4703 */
4704 if ((event->ate_flags & AFTER_TRIGGER_IN_PROGRESS) &&
4705 evtshared->ats_firing_id == firing_id)
4706 {
4707 ResultRelInfo *src_rInfo,
4708 *dst_rInfo;
4709
4710 /*
4711 * So let's fire it... but first, find the correct relation if
4712 * this is not the same relation as before.
4713 */
4714 if (rel == NULL || RelationGetRelid(rel) != evtshared->ats_relid)
4715 {
4716 rInfo = ExecGetTriggerResultRel(estate, evtshared->ats_relid,
4717 NULL);
4718 rel = rInfo->ri_RelationDesc;
4719 /* Catch calls with insufficient relcache refcounting */
4721 trigdesc = rInfo->ri_TrigDesc;
4722 /* caution: trigdesc could be NULL here */
4723 finfo = rInfo->ri_TrigFunctions;
4724 instr = rInfo->ri_TrigInstrument;
4725 if (slot1 != NULL)
4726 {
4729 slot1 = slot2 = NULL;
4730 }
4731 if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
4732 {
4733 slot1 = MakeSingleTupleTableSlot(rel->rd_att,
4735 slot2 = MakeSingleTupleTableSlot(rel->rd_att,
4737 }
4738 }
4739
4740 /*
4741 * Look up source and destination partition result rels of a
4742 * cross-partition update event.
4743 */
4744 if ((event->ate_flags & AFTER_TRIGGER_TUP_BITS) ==
4746 {
4747 Assert(OidIsValid(event->ate_src_part) &&
4748 OidIsValid(event->ate_dst_part));
4749 src_rInfo = ExecGetTriggerResultRel(estate,
4750 event->ate_src_part,
4751 rInfo);
4752 dst_rInfo = ExecGetTriggerResultRel(estate,
4753 event->ate_dst_part,
4754 rInfo);
4755 }
4756 else
4757 src_rInfo = dst_rInfo = rInfo;
4758
4759 /*
4760 * Fire it. Note that the AFTER_TRIGGER_IN_PROGRESS flag is
4761 * still set, so recursive examinations of the event list
4762 * won't try to re-fire it.
4763 */
4764 AfterTriggerExecute(estate, event, rInfo,
4765 src_rInfo, dst_rInfo,
4766 trigdesc, finfo, instr,
4767 per_tuple_context, slot1, slot2);
4768
4769 /*
4770 * Mark the event as done.
4771 */
4772 event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS;
4773 event->ate_flags |= AFTER_TRIGGER_DONE;
4774 }
4775 else if (!(event->ate_flags & AFTER_TRIGGER_DONE))
4776 {
4777 /* something remains to be done */
4778 all_fired = all_fired_in_chunk = false;
4779 }
4780 }
4781
4782 /* Clear the chunk if delete_ok and nothing left of interest */
4783 if (delete_ok && all_fired_in_chunk)
4784 {
4785 chunk->freeptr = CHUNK_DATA_START(chunk);
4786 chunk->endfree = chunk->endptr;
4787
4788 /*
4789 * If it's last chunk, must sync event list's tailfree too. Note
4790 * that delete_ok must NOT be passed as true if there could be
4791 * additional AfterTriggerEventList values pointing at this event
4792 * list, since we'd fail to fix their copies of tailfree.
4793 */
4794 if (chunk == events->tail)
4795 events->tailfree = chunk->freeptr;
4796 }
4797 }
4798 if (slot1 != NULL)
4799 {
4802 }
4803
4804 /* Release working resources */
4805 MemoryContextDelete(per_tuple_context);
4806
4807 if (local_estate)
4808 {
4810 ExecResetTupleTable(estate->es_tupleTable, false);
4811 FreeExecutorState(estate);
4812 }
4813
4814 return all_fired;
4815}
4816
4817
4818/*
4819 * GetAfterTriggersTableData
4820 *
4821 * Find or create an AfterTriggersTableData struct for the specified
4822 * trigger event (relation + operation type). Ignore existing structs
4823 * marked "closed"; we don't want to put any additional tuples into them,
4824 * nor change their stmt-triggers-fired state.
4825 *
4826 * Note: the AfterTriggersTableData list is allocated in the current
4827 * (sub)transaction's CurTransactionContext. This is OK because
4828 * we don't need it to live past AfterTriggerEndQuery.
4829 */
4832{
4835 MemoryContext oldcxt;
4836 ListCell *lc;
4837
4838 /* Caller should have ensured query_depth is OK. */
4842
4843 foreach(lc, qs->tables)
4844 {
4845 table = (AfterTriggersTableData *) lfirst(lc);
4846 if (table->relid == relid && table->cmdType == cmdType &&
4847 !table->closed)
4848 return table;
4849 }
4850
4852
4854 table->relid = relid;
4855 table->cmdType = cmdType;
4856 qs->tables = lappend(qs->tables, table);
4857
4858 MemoryContextSwitchTo(oldcxt);
4859
4860 return table;
4861}
4862
4863/*
4864 * Returns a TupleTableSlot suitable for holding the tuples to be put
4865 * into AfterTriggersTableData's transition table tuplestores.
4866 */
4867static TupleTableSlot *
4869 TupleDesc tupdesc)
4870{
4871 /* Create it if not already done. */
4872 if (!table->storeslot)
4873 {
4874 MemoryContext oldcxt;
4875
4876 /*
4877 * We need this slot only until AfterTriggerEndQuery, but making it
4878 * last till end-of-subxact is good enough. It'll be freed by
4879 * AfterTriggerFreeQuery(). However, the passed-in tupdesc might have
4880 * a different lifespan, so we'd better make a copy of that.
4881 */
4883 tupdesc = CreateTupleDescCopy(tupdesc);
4885 MemoryContextSwitchTo(oldcxt);
4886 }
4887
4888 return table->storeslot;
4889}
4890
4891/*
4892 * MakeTransitionCaptureState
4893 *
4894 * Make a TransitionCaptureState object for the given TriggerDesc, target
4895 * relation, and operation type. The TCS object holds all the state needed
4896 * to decide whether to capture tuples in transition tables.
4897 *
4898 * If there are no triggers in 'trigdesc' that request relevant transition
4899 * tables, then return NULL.
4900 *
4901 * The resulting object can be passed to the ExecAR* functions. When
4902 * dealing with child tables, the caller can set tcs_original_insert_tuple
4903 * to avoid having to reconstruct the original tuple in the root table's
4904 * format.
4905 *
4906 * Note that we copy the flags from a parent table into this struct (rather
4907 * than subsequently using the relation's TriggerDesc directly) so that we can
4908 * use it to control collection of transition tuples from child tables.
4909 *
4910 * Per SQL spec, all operations of the same kind (INSERT/UPDATE/DELETE)
4911 * on the same table during one query should share one transition table.
4912 * Therefore, the Tuplestores are owned by an AfterTriggersTableData struct
4913 * looked up using the table OID + CmdType, and are merely referenced by
4914 * the TransitionCaptureState objects we hand out to callers.
4915 */
4918{
4920 bool need_old_upd,
4921 need_new_upd,
4922 need_old_del,
4923 need_new_ins;
4925 MemoryContext oldcxt;
4926 ResourceOwner saveResourceOwner;
4927
4928 if (trigdesc == NULL)
4929 return NULL;
4930
4931 /* Detect which table(s) we need. */
4932 switch (cmdType)
4933 {
4934 case CMD_INSERT:
4935 need_old_upd = need_old_del = need_new_upd = false;
4936 need_new_ins = trigdesc->trig_insert_new_table;
4937 break;
4938 case CMD_UPDATE:
4939 need_old_upd = trigdesc->trig_update_old_table;
4940 need_new_upd = trigdesc->trig_update_new_table;
4941 need_old_del = need_new_ins = false;
4942 break;
4943 case CMD_DELETE:
4944 need_old_del = trigdesc->trig_delete_old_table;
4945 need_old_upd = need_new_upd = need_new_ins = false;
4946 break;
4947 case CMD_MERGE:
4948 need_old_upd = trigdesc->trig_update_old_table;
4949 need_new_upd = trigdesc->trig_update_new_table;
4950 need_old_del = trigdesc->trig_delete_old_table;
4951 need_new_ins = trigdesc->trig_insert_new_table;
4952 break;
4953 default:
4954 elog(ERROR, "unexpected CmdType: %d", (int) cmdType);
4955 /* keep compiler quiet */
4956 need_old_upd = need_new_upd = need_old_del = need_new_ins = false;
4957 break;
4958 }
4959 if (!need_old_upd && !need_new_upd && !need_new_ins && !need_old_del)
4960 return NULL;
4961
4962 /* Check state, like AfterTriggerSaveEvent. */
4963 if (afterTriggers.query_depth < 0)
4964 elog(ERROR, "MakeTransitionCaptureState() called outside of query");
4965
4966 /* Be sure we have enough space to record events at this query depth. */
4969
4970 /*
4971 * Find or create an AfterTriggersTableData struct to hold the
4972 * tuplestore(s). If there's a matching struct but it's marked closed,
4973 * ignore it; we need a newer one.
4974 *
4975 * Note: the AfterTriggersTableData list, as well as the tuplestores, are
4976 * allocated in the current (sub)transaction's CurTransactionContext, and
4977 * the tuplestores are managed by the (sub)transaction's resource owner.
4978 * This is sufficient lifespan because we do not allow triggers using
4979 * transition tables to be deferrable; they will be fired during
4980 * AfterTriggerEndQuery, after which it's okay to delete the data.
4981 */
4982 table = GetAfterTriggersTableData(relid, cmdType);
4983
4984 /* Now create required tuplestore(s), if we don't have them already. */
4986 saveResourceOwner = CurrentResourceOwner;
4988
4989 if (need_old_upd && table->old_upd_tuplestore == NULL)
4990 table->old_upd_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4991 if (need_new_upd && table->new_upd_tuplestore == NULL)
4992 table->new_upd_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4993 if (need_old_del && table->old_del_tuplestore == NULL)
4994 table->old_del_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4995 if (need_new_ins && table->new_ins_tuplestore == NULL)
4996 table->new_ins_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4997
4998 CurrentResourceOwner = saveResourceOwner;
4999 MemoryContextSwitchTo(oldcxt);
5000
5001 /* Now build the TransitionCaptureState struct, in caller's context */
5003 state->tcs_delete_old_table = need_old_del;
5004 state->tcs_update_old_table = need_old_upd;
5005 state->tcs_update_new_table = need_new_upd;
5006 state->tcs_insert_new_table = need_new_ins;
5007 state->tcs_private = table;
5008
5009 return state;
5010}
5011
5012
5013/* ----------
5014 * AfterTriggerBeginXact()
5015 *
5016 * Called at transaction start (either BEGIN or implicit for single
5017 * statement outside of transaction block).
5018 * ----------
5019 */
5020void
5022{
5023 /*
5024 * Initialize after-trigger state structure to empty
5025 */
5026 afterTriggers.firing_counter = (CommandId) 1; /* mustn't be 0 */
5028
5029 /*
5030 * Verify that there is no leftover state remaining. If these assertions
5031 * trip, it means that AfterTriggerEndXact wasn't called or didn't clean
5032 * up properly.
5033 */
5034 Assert(afterTriggers.state == NULL);
5041}
5042
5043
5044/* ----------
5045 * AfterTriggerBeginQuery()
5046 *
5047 * Called just before we start processing a single query within a
5048 * transaction (or subtransaction). Most of the real work gets deferred
5049 * until somebody actually tries to queue a trigger event.
5050 * ----------
5051 */
5052void
5054{
5055 /* Increase the query stack depth */
5057}
5058
5059
5060/* ----------
5061 * AfterTriggerEndQuery()
5062 *
5063 * Called after one query has been completely processed. At this time
5064 * we invoke all AFTER IMMEDIATE trigger events queued by the query, and
5065 * transfer deferred trigger events to the global deferred-trigger list.
5066 *
5067 * Note that this must be called BEFORE closing down the executor
5068 * with ExecutorEnd, because we make use of the EState's info about
5069 * target relations. Normally it is called from ExecutorFinish.
5070 * ----------
5071 */
5072void
5074{
5076
5077 /* Must be inside a query, too */
5079
5080 /*
5081 * If we never even got as far as initializing the event stack, there
5082 * certainly won't be any events, so exit quickly.
5083 */
5085 {
5087 return;
5088 }
5089
5090 /*
5091 * Process all immediate-mode triggers queued by the query, and move the
5092 * deferred ones to the main list of deferred events.
5093 *
5094 * Notice that we decide which ones will be fired, and put the deferred
5095 * ones on the main list, before anything is actually fired. This ensures
5096 * reasonably sane behavior if a trigger function does SET CONSTRAINTS ...
5097 * IMMEDIATE: all events we have decided to defer will be available for it
5098 * to fire.
5099 *
5100 * We loop in case a trigger queues more events at the same query level.
5101 * Ordinary trigger functions, including all PL/pgSQL trigger functions,
5102 * will instead fire any triggers in a dedicated query level. Foreign key
5103 * enforcement triggers do add to the current query level, thanks to their
5104 * passing fire_triggers = false to SPI_execute_snapshot(). Other
5105 * C-language triggers might do likewise.
5106 *
5107 * If we find no firable events, we don't have to increment
5108 * firing_counter.
5109 */
5111
5112 for (;;)
5113 {
5115 {
5117 AfterTriggerEventChunk *oldtail = qs->events.tail;
5118
5119 if (afterTriggerInvokeEvents(&qs->events, firing_id, estate, false))
5120 break; /* all fired */
5121
5122 /*
5123 * Firing a trigger could result in query_stack being repalloc'd,
5124 * so we must recalculate qs after each afterTriggerInvokeEvents
5125 * call. Furthermore, it's unsafe to pass delete_ok = true here,
5126 * because that could cause afterTriggerInvokeEvents to try to
5127 * access qs->events after the stack has been repalloc'd.
5128 */
5130
5131 /*
5132 * We'll need to scan the events list again. To reduce the cost
5133 * of doing so, get rid of completely-fired chunks. We know that
5134 * all events were marked IN_PROGRESS or DONE at the conclusion of
5135 * afterTriggerMarkEvents, so any still-interesting events must
5136 * have been added after that, and so must be in the chunk that
5137 * was then the tail chunk, or in later chunks. So, zap all
5138 * chunks before oldtail. This is approximately the same set of
5139 * events we would have gotten rid of by passing delete_ok = true.
5140 */
5141 Assert(oldtail != NULL);
5142 while (qs->events.head != oldtail)
5144 }
5145 else
5146 break;
5147 }
5148
5149 /* Release query-level-local storage, including tuplestores if any */
5151
5153}
5154
5155
5156/*
5157 * AfterTriggerFreeQuery
5158 * Release subsidiary storage for a trigger query level.
5159 * This includes closing down tuplestores.
5160 * Note: it's important for this to be safe if interrupted by an error
5161 * and then called again for the same query level.
5162 */
5163static void
5165{
5166 Tuplestorestate *ts;
5167 List *tables;
5168 ListCell *lc;
5169
5170 /* Drop the trigger events */
5172
5173 /* Drop FDW tuplestore if any */
5174 ts = qs->fdw_tuplestore;
5175 qs->fdw_tuplestore = NULL;
5176 if (ts)
5177 tuplestore_end(ts);
5178
5179 /* Release per-table subsidiary storage */
5180 tables = qs->tables;
5181 foreach(lc, tables)
5182 {
5184
5185 ts = table->old_upd_tuplestore;
5186 table->old_upd_tuplestore = NULL;
5187 if (ts)
5188 tuplestore_end(ts);
5189 ts = table->new_upd_tuplestore;
5190 table->new_upd_tuplestore = NULL;
5191 if (ts)
5192 tuplestore_end(ts);
5193 ts = table->old_del_tuplestore;
5194 table->old_del_tuplestore = NULL;
5195 if (ts)
5196 tuplestore_end(ts);
5197 ts = table->new_ins_tuplestore;
5198 table->new_ins_tuplestore = NULL;
5199 if (ts)
5200 tuplestore_end(ts);
5201 if (table->storeslot)
5202 {
5203 TupleTableSlot *slot = table->storeslot;
5204
5205 table->storeslot = NULL;
5207 }
5208 }
5209
5210 /*
5211 * Now free the AfterTriggersTableData structs and list cells. Reset list
5212 * pointer first; if list_free_deep somehow gets an error, better to leak
5213 * that storage than have an infinite loop.
5214 */
5215 qs->tables = NIL;
5216 list_free_deep(tables);
5217}
5218
5219
5220/* ----------
5221 * AfterTriggerFireDeferred()
5222 *
5223 * Called just before the current transaction is committed. At this
5224 * time we invoke all pending DEFERRED triggers.
5225 *
5226 * It is possible for other modules to queue additional deferred triggers
5227 * during pre-commit processing; therefore xact.c may have to call this
5228 * multiple times.
5229 * ----------
5230 */
5231void
5233{
5234 AfterTriggerEventList *events;
5235 bool snap_pushed = false;
5236
5237 /* Must not be inside a query */
5239
5240 /*
5241 * If there are any triggers to fire, make sure we have set a snapshot for
5242 * them to use. (Since PortalRunUtility doesn't set a snap for COMMIT, we
5243 * can't assume ActiveSnapshot is valid on entry.)
5244 */
5245 events = &