PostgreSQL Source Code git master
Loading...
Searching...
No Matches
nodeModifyTable.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * nodeModifyTable.c
4 * routines to handle ModifyTable nodes.
5 *
6 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/executor/nodeModifyTable.c
12 *
13 *-------------------------------------------------------------------------
14 */
15/* INTERFACE ROUTINES
16 * ExecInitModifyTable - initialize the ModifyTable node
17 * ExecModifyTable - retrieve the next tuple from the node
18 * ExecEndModifyTable - shut down the ModifyTable node
19 * ExecReScanModifyTable - rescan the ModifyTable node
20 *
21 * NOTES
22 * The ModifyTable node receives input from its outerPlan, which is
23 * the data to insert for INSERT cases, the changed columns' new
24 * values plus row-locating info for UPDATE and MERGE cases, or just the
25 * row-locating info for DELETE cases.
26 *
27 * The relation to modify can be an ordinary table, a foreign table, or a
28 * view. If it's a view, either it has sufficient INSTEAD OF triggers or
29 * this node executes only MERGE ... DO NOTHING. If the original MERGE
30 * targeted a view not in one of those two categories, earlier processing
31 * already pointed the ModifyTable result relation to an underlying
32 * relation of that other view. This node does process
33 * ri_WithCheckOptions, which may have expressions from those other,
34 * automatically updatable views.
35 *
36 * MERGE runs a join between the source relation and the target table.
37 * If any WHEN NOT MATCHED [BY TARGET] clauses are present, then the join
38 * is an outer join that might output tuples without a matching target
39 * tuple. In this case, any unmatched target tuples will have NULL
40 * row-locating info, and only INSERT can be run. But for matched target
41 * tuples, the row-locating info is used to determine the tuple to UPDATE
42 * or DELETE. When all clauses are WHEN MATCHED or WHEN NOT MATCHED BY
43 * SOURCE, all tuples produced by the join will include a matching target
44 * tuple, so all tuples contain row-locating info.
45 *
46 * If the query specifies RETURNING, then the ModifyTable returns a
47 * RETURNING tuple after completing each row insert, update, or delete.
48 * It must be called again to continue the operation. Without RETURNING,
49 * we just loop within the node until all the work is done, then
50 * return NULL. This avoids useless call/return overhead.
51 */
52
53#include "postgres.h"
54
55#include "access/htup_details.h"
56#include "access/tableam.h"
57#include "access/tupconvert.h"
58#include "access/xact.h"
59#include "commands/trigger.h"
61#include "executor/executor.h"
62#include "executor/instrument.h"
64#include "foreign/fdwapi.h"
65#include "miscadmin.h"
66#include "nodes/nodeFuncs.h"
67#include "optimizer/optimizer.h"
70#include "storage/lmgr.h"
71#include "utils/builtins.h"
72#include "utils/datum.h"
74#include "utils/rangetypes.h"
75#include "utils/rel.h"
76#include "utils/snapmgr.h"
77
78
79typedef struct MTTargetRelLookup
80{
81 Oid relationOid; /* hash key, must be first */
82 int relationIndex; /* rel's index in resultRelInfo[] array */
84
85/*
86 * Context struct for a ModifyTable operation, containing basic execution
87 * state and some output variables populated by ExecUpdateAct() and
88 * ExecDeleteAct() to report the result of their actions to callers.
89 */
90typedef struct ModifyTableContext
91{
92 /* Operation state */
96
97 /*
98 * Slot containing tuple obtained from ModifyTable's subplan. Used to
99 * access "junk" columns that are not going to be stored.
100 */
102
103 /*
104 * Information about the changes that were made concurrently to a tuple
105 * being updated or deleted
106 */
108
109 /*
110 * The tuple deleted when doing a cross-partition UPDATE with a RETURNING
111 * clause that refers to OLD columns (converted to the root's tuple
112 * descriptor).
113 */
115
116 /*
117 * The tuple projected by the INSERT's RETURNING clause, when doing a
118 * cross-partition UPDATE
119 */
122
123/*
124 * Context struct containing output data specific to UPDATE operations.
125 */
126typedef struct UpdateContext
127{
128 bool crossPartUpdate; /* was it a cross-partition update? */
129 TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
130
131 /*
132 * Lock mode to acquire on the latest tuple version before performing
133 * EvalPlanQual on it
134 */
137
138
139static void ExecBatchInsert(ModifyTableState *mtstate,
140 ResultRelInfo *resultRelInfo,
141 TupleTableSlot **slots,
143 int numSlots,
144 EState *estate,
145 bool canSetTag);
146static void ExecPendingInserts(EState *estate);
153static bool ExecOnConflictLockRow(ModifyTableContext *context,
156 Relation relation,
157 LockTupleMode lockmode,
158 bool isUpdate);
159static bool ExecOnConflictUpdate(ModifyTableContext *context,
160 ResultRelInfo *resultRelInfo,
163 bool canSetTag,
164 TupleTableSlot **returning);
165static bool ExecOnConflictSelect(ModifyTableContext *context,
166 ResultRelInfo *resultRelInfo,
169 bool canSetTag,
170 TupleTableSlot **returning);
172 EState *estate,
173 ResultRelInfo *resultRelInfo,
176 EState *estate,
177 PartitionTupleRouting *proute,
178 ResultRelInfo *targetRelInfo,
179 TupleTableSlot *slot,
181
183 ResultRelInfo *resultRelInfo,
185 HeapTuple oldtuple,
186 bool canSetTag);
187static void ExecInitMerge(ModifyTableState *mtstate, EState *estate);
189 ResultRelInfo *resultRelInfo,
191 HeapTuple oldtuple,
192 bool canSetTag,
193 bool *matched);
195 ResultRelInfo *resultRelInfo,
196 bool canSetTag);
197static void ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate);
198static void fireBSTriggers(ModifyTableState *node);
199static void fireASTriggers(ModifyTableState *node);
200
201
202/*
203 * Verify that the tuples to be produced by INSERT match the
204 * target relation's rowtype
205 *
206 * We do this to guard against stale plans. If plan invalidation is
207 * functioning properly then we should never get a failure here, but better
208 * safe than sorry. Note that this is called after we have obtained lock
209 * on the target rel, so the rowtype can't change underneath us.
210 *
211 * The plan output is represented by its targetlist, because that makes
212 * handling the dropped-column case easier.
213 *
214 * We used to use this for UPDATE as well, but now the equivalent checks
215 * are done in ExecBuildUpdateProjection.
216 */
217static void
218ExecCheckPlanOutput(Relation resultRel, List *targetList)
219{
220 TupleDesc resultDesc = RelationGetDescr(resultRel);
221 int attno = 0;
222 ListCell *lc;
223
224 foreach(lc, targetList)
225 {
228
229 Assert(!tle->resjunk); /* caller removed junk items already */
230
231 if (attno >= resultDesc->natts)
234 errmsg("table row type and query-specified row type do not match"),
235 errdetail("Query has too many columns.")));
236 attr = TupleDescAttr(resultDesc, attno);
237 attno++;
238
239 /*
240 * Special cases here should match planner's expand_insert_targetlist.
241 */
242 if (attr->attisdropped)
243 {
244 /*
245 * For a dropped column, we can't check atttypid (it's likely 0).
246 * In any case the planner has most likely inserted an INT4 null.
247 * What we insist on is just *some* NULL constant.
248 */
249 if (!IsA(tle->expr, Const) ||
250 !((Const *) tle->expr)->constisnull)
253 errmsg("table row type and query-specified row type do not match"),
254 errdetail("Query provides a value for a dropped column at ordinal position %d.",
255 attno)));
256 }
257 else if (attr->attgenerated)
258 {
259 /*
260 * For a generated column, the planner will have inserted a null
261 * of the column's base type (to avoid possibly failing on domain
262 * not-null constraints). It doesn't seem worth insisting on that
263 * exact type though, since a null value is type-independent. As
264 * above, just insist on *some* NULL constant.
265 */
266 if (!IsA(tle->expr, Const) ||
267 !((Const *) tle->expr)->constisnull)
270 errmsg("table row type and query-specified row type do not match"),
271 errdetail("Query provides a value for a generated column at ordinal position %d.",
272 attno)));
273 }
274 else
275 {
276 /* Normal case: demand type match */
277 if (exprType((Node *) tle->expr) != attr->atttypid)
280 errmsg("table row type and query-specified row type do not match"),
281 errdetail("Table has type %s at ordinal position %d, but query expects %s.",
282 format_type_be(attr->atttypid),
283 attno,
284 format_type_be(exprType((Node *) tle->expr)))));
285 }
286 }
287 if (attno != resultDesc->natts)
290 errmsg("table row type and query-specified row type do not match"),
291 errdetail("Query has too few columns.")));
292}
293
294/*
295 * ExecProcessReturning --- evaluate a RETURNING list
296 *
297 * context: context for the ModifyTable operation
298 * resultRelInfo: current result rel
299 * isDelete: true if the operation/merge action is a DELETE
300 * oldSlot: slot holding old tuple deleted or updated
301 * newSlot: slot holding new tuple inserted or updated
302 * planSlot: slot holding tuple returned by top subplan node
303 *
304 * Note: If oldSlot and newSlot are NULL, the FDW should have already provided
305 * econtext's scan tuple and its old & new tuples are not needed (FDW direct-
306 * modify is disabled if the RETURNING list refers to any OLD/NEW values).
307 *
308 * Note: For the SELECT path of INSERT ... ON CONFLICT DO SELECT, oldSlot and
309 * newSlot are both the existing tuple, since it's not changed.
310 *
311 * Returns a slot holding the result tuple
312 */
313static TupleTableSlot *
315 ResultRelInfo *resultRelInfo,
316 bool isDelete,
319 TupleTableSlot *planSlot)
320{
321 EState *estate = context->estate;
323 ExprContext *econtext = projectReturning->pi_exprContext;
324
325 /* Make tuple and any needed join variables available to ExecProject */
326 if (isDelete)
327 {
328 /* return old tuple by default */
329 if (oldSlot)
330 econtext->ecxt_scantuple = oldSlot;
331 }
332 else
333 {
334 /* return new tuple by default */
335 if (newSlot)
336 econtext->ecxt_scantuple = newSlot;
337 }
338 econtext->ecxt_outertuple = planSlot;
339
340 /* Make old/new tuples available to ExecProject, if required */
341 if (oldSlot)
342 econtext->ecxt_oldtuple = oldSlot;
343 else if (projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD)
344 econtext->ecxt_oldtuple = ExecGetAllNullSlot(estate, resultRelInfo);
345 else
346 econtext->ecxt_oldtuple = NULL; /* No references to OLD columns */
347
348 if (newSlot)
349 econtext->ecxt_newtuple = newSlot;
350 else if (projectReturning->pi_state.flags & EEO_FLAG_HAS_NEW)
351 econtext->ecxt_newtuple = ExecGetAllNullSlot(estate, resultRelInfo);
352 else
353 econtext->ecxt_newtuple = NULL; /* No references to NEW columns */
354
355 /*
356 * Tell ExecProject whether or not the OLD/NEW rows actually exist. This
357 * information is required to evaluate ReturningExpr nodes and also in
358 * ExecEvalSysVar() and ExecEvalWholeRowVar().
359 */
360 if (oldSlot == NULL)
361 projectReturning->pi_state.flags |= EEO_FLAG_OLD_IS_NULL;
362 else
363 projectReturning->pi_state.flags &= ~EEO_FLAG_OLD_IS_NULL;
364
365 if (newSlot == NULL)
366 projectReturning->pi_state.flags |= EEO_FLAG_NEW_IS_NULL;
367 else
368 projectReturning->pi_state.flags &= ~EEO_FLAG_NEW_IS_NULL;
369
370 /* Compute the RETURNING expressions */
372}
373
374/*
375 * ExecCheckTupleVisible -- verify tuple is visible
376 *
377 * It would not be consistent with guarantees of the higher isolation levels to
378 * proceed with avoiding insertion (taking speculative insertion's alternative
379 * path) on the basis of another tuple that is not visible to MVCC snapshot.
380 * Check for the need to raise a serialization failure, and do so as necessary.
381 */
382static void
384 Relation rel,
385 TupleTableSlot *slot)
386{
388 return;
389
390 if (!table_tuple_satisfies_snapshot(rel, slot, estate->es_snapshot))
391 {
393 TransactionId xmin;
394 bool isnull;
395
397 Assert(!isnull);
399
400 /*
401 * We should not raise a serialization failure if the conflict is
402 * against a tuple inserted by our own transaction, even if it's not
403 * visible to our snapshot. (This would happen, for example, if
404 * conflicting keys are proposed for insertion in a single command.)
405 */
409 errmsg("could not serialize access due to concurrent update")));
410 }
411}
412
413/*
414 * ExecCheckTIDVisible -- convenience variant of ExecCheckTupleVisible()
415 */
416static void
419 ItemPointer tid,
421{
422 Relation rel = relinfo->ri_RelationDesc;
423
424 /* Redundantly check isolation level */
426 return;
427
429 elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
430 ExecCheckTupleVisible(estate, rel, tempSlot);
432}
433
434/*
435 * Initialize generated columns handling for a tuple
436 *
437 * This fills the resultRelInfo's ri_GeneratedExprsI/ri_NumGeneratedNeededI or
438 * ri_GeneratedExprsU/ri_NumGeneratedNeededU fields, depending on cmdtype.
439 * This is used only for stored generated columns.
440 *
441 * If cmdType == CMD_UPDATE, the ri_extraUpdatedCols field is filled too.
442 * This is used by both stored and virtual generated columns.
443 *
444 * Note: usually, a given query would need only one of ri_GeneratedExprsI and
445 * ri_GeneratedExprsU per result rel; but MERGE can need both, and so can
446 * cross-partition UPDATEs, since a partition might be the target of both
447 * UPDATE and INSERT actions.
448 */
449void
451 EState *estate,
452 CmdType cmdtype)
453{
454 Relation rel = resultRelInfo->ri_RelationDesc;
455 TupleDesc tupdesc = RelationGetDescr(rel);
456 int natts = tupdesc->natts;
459 Bitmapset *updatedCols;
461
462 /* Nothing to do if no generated columns */
463 if (!(tupdesc->constr && (tupdesc->constr->has_generated_stored || tupdesc->constr->has_generated_virtual)))
464 return;
465
466 /*
467 * In an UPDATE, we can skip computing any generated columns that do not
468 * depend on any UPDATE target column. But if there is a BEFORE ROW
469 * UPDATE trigger, we cannot skip because the trigger might change more
470 * columns.
471 */
472 if (cmdtype == CMD_UPDATE &&
474 updatedCols = ExecGetUpdatedCols(resultRelInfo, estate);
475 else
476 updatedCols = NULL;
477
478 /*
479 * Make sure these data structures are built in the per-query memory
480 * context so they'll survive throughout the query.
481 */
483
484 ri_GeneratedExprs = (ExprState **) palloc0(natts * sizeof(ExprState *));
486
487 for (int i = 0; i < natts; i++)
488 {
489 char attgenerated = TupleDescAttr(tupdesc, i)->attgenerated;
490
491 if (attgenerated)
492 {
493 Expr *expr;
494
495 /* Fetch the GENERATED AS expression tree */
496 expr = (Expr *) build_column_default(rel, i + 1);
497 if (expr == NULL)
498 elog(ERROR, "no generation expression found for column number %d of table \"%s\"",
499 i + 1, RelationGetRelationName(rel));
500
501 /*
502 * If it's an update with a known set of update target columns,
503 * see if we can skip the computation.
504 */
505 if (updatedCols)
506 {
507 Bitmapset *attrs_used = NULL;
508
509 pull_varattnos((Node *) expr, 1, &attrs_used);
510
511 if (!bms_overlap(updatedCols, attrs_used))
512 continue; /* need not update this column */
513 }
514
515 /* No luck, so prepare the expression for execution */
516 if (attgenerated == ATTRIBUTE_GENERATED_STORED)
517 {
518 ri_GeneratedExprs[i] = ExecPrepareExpr(expr, estate);
520 }
521
522 /* If UPDATE, mark column in resultRelInfo->ri_extraUpdatedCols */
523 if (cmdtype == CMD_UPDATE)
524 resultRelInfo->ri_extraUpdatedCols =
525 bms_add_member(resultRelInfo->ri_extraUpdatedCols,
527 }
528 }
529
530 if (ri_NumGeneratedNeeded == 0)
531 {
532 /* didn't need it after all */
535 }
536
537 /* Save in appropriate set of fields */
538 if (cmdtype == CMD_UPDATE)
539 {
540 /* Don't call twice */
541 Assert(resultRelInfo->ri_GeneratedExprsU == NULL);
542
543 resultRelInfo->ri_GeneratedExprsU = ri_GeneratedExprs;
545
546 resultRelInfo->ri_extraUpdatedCols_valid = true;
547 }
548 else
549 {
550 /* Don't call twice */
551 Assert(resultRelInfo->ri_GeneratedExprsI == NULL);
552
553 resultRelInfo->ri_GeneratedExprsI = ri_GeneratedExprs;
555 }
556
558}
559
560/*
561 * Compute stored generated columns for a tuple
562 */
563void
565 EState *estate, TupleTableSlot *slot,
566 CmdType cmdtype)
567{
568 Relation rel = resultRelInfo->ri_RelationDesc;
569 TupleDesc tupdesc = RelationGetDescr(rel);
570 int natts = tupdesc->natts;
571 ExprContext *econtext = GetPerTupleExprContext(estate);
574 Datum *values;
575 bool *nulls;
576
577 /* We should not be called unless this is true */
578 Assert(tupdesc->constr && tupdesc->constr->has_generated_stored);
579
580 /*
581 * Initialize the expressions if we didn't already, and check whether we
582 * can exit early because nothing needs to be computed.
583 */
584 if (cmdtype == CMD_UPDATE)
585 {
586 if (resultRelInfo->ri_GeneratedExprsU == NULL)
587 ExecInitGenerated(resultRelInfo, estate, cmdtype);
588 if (resultRelInfo->ri_NumGeneratedNeededU == 0)
589 return;
590 ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsU;
591 }
592 else
593 {
594 if (resultRelInfo->ri_GeneratedExprsI == NULL)
595 ExecInitGenerated(resultRelInfo, estate, cmdtype);
596 /* Early exit is impossible given the prior Assert */
597 Assert(resultRelInfo->ri_NumGeneratedNeededI > 0);
598 ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsI;
599 }
600
602
603 values = palloc_array(Datum, natts);
604 nulls = palloc_array(bool, natts);
605
606 slot_getallattrs(slot);
607 memcpy(nulls, slot->tts_isnull, sizeof(*nulls) * natts);
608
609 for (int i = 0; i < natts; i++)
610 {
611 CompactAttribute *attr = TupleDescCompactAttr(tupdesc, i);
612
613 if (ri_GeneratedExprs[i])
614 {
615 Datum val;
616 bool isnull;
617
618 Assert(TupleDescAttr(tupdesc, i)->attgenerated == ATTRIBUTE_GENERATED_STORED);
619
620 econtext->ecxt_scantuple = slot;
621
622 val = ExecEvalExpr(ri_GeneratedExprs[i], econtext, &isnull);
623
624 /*
625 * We must make a copy of val as we have no guarantees about where
626 * memory for a pass-by-reference Datum is located.
627 */
628 if (!isnull)
629 val = datumCopy(val, attr->attbyval, attr->attlen);
630
631 values[i] = val;
632 nulls[i] = isnull;
633 }
634 else
635 {
636 if (!nulls[i])
637 values[i] = datumCopy(slot->tts_values[i], attr->attbyval, attr->attlen);
638 }
639 }
640
641 ExecClearTuple(slot);
642 memcpy(slot->tts_values, values, sizeof(*values) * natts);
643 memcpy(slot->tts_isnull, nulls, sizeof(*nulls) * natts);
646
648}
649
650/*
651 * ExecInitInsertProjection
652 * Do one-time initialization of projection data for INSERT tuples.
653 *
654 * INSERT queries may need a projection to filter out junk attrs in the tlist.
655 *
656 * This is also a convenient place to verify that the
657 * output of an INSERT matches the target table.
658 */
659static void
661 ResultRelInfo *resultRelInfo)
662{
663 ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
664 Plan *subplan = outerPlan(node);
665 EState *estate = mtstate->ps.state;
667 bool need_projection = false;
668 ListCell *l;
669
670 /* Extract non-junk columns of the subplan's result tlist. */
671 foreach(l, subplan->targetlist)
672 {
674
675 if (!tle->resjunk)
677 else
678 need_projection = true;
679 }
680
681 /*
682 * The junk-free list must produce a tuple suitable for the result
683 * relation.
684 */
686
687 /* We'll need a slot matching the table's format. */
688 resultRelInfo->ri_newTupleSlot =
689 table_slot_create(resultRelInfo->ri_RelationDesc,
690 &estate->es_tupleTable);
691
692 /* Build ProjectionInfo if needed (it probably isn't). */
693 if (need_projection)
694 {
696
697 /* need an expression context to do the projection */
698 if (mtstate->ps.ps_ExprContext == NULL)
699 ExecAssignExprContext(estate, &mtstate->ps);
700
701 resultRelInfo->ri_projectNew =
703 mtstate->ps.ps_ExprContext,
704 resultRelInfo->ri_newTupleSlot,
705 &mtstate->ps,
706 relDesc);
707 }
708
709 resultRelInfo->ri_projectNewInfoValid = true;
710}
711
712/*
713 * ExecInitUpdateProjection
714 * Do one-time initialization of projection data for UPDATE tuples.
715 *
716 * UPDATE always needs a projection, because (1) there's always some junk
717 * attrs, and (2) we may need to merge values of not-updated columns from
718 * the old tuple into the final tuple. In UPDATE, the tuple arriving from
719 * the subplan contains only new values for the changed columns, plus row
720 * identity info in the junk attrs.
721 *
722 * This is "one-time" for any given result rel, but we might touch more than
723 * one result rel in the course of an inherited UPDATE, and each one needs
724 * its own projection due to possible column order variation.
725 *
726 * This is also a convenient place to verify that the output of an UPDATE
727 * matches the target table (ExecBuildUpdateProjection does that).
728 */
729static void
731 ResultRelInfo *resultRelInfo)
732{
733 ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
734 Plan *subplan = outerPlan(node);
735 EState *estate = mtstate->ps.state;
737 int whichrel;
739
740 /*
741 * Usually, mt_lastResultIndex matches the target rel. If it happens not
742 * to, we can get the index the hard way with an integer division.
743 */
744 whichrel = mtstate->mt_lastResultIndex;
745 if (resultRelInfo != mtstate->resultRelInfo + whichrel)
746 {
747 whichrel = resultRelInfo - mtstate->resultRelInfo;
748 Assert(whichrel >= 0 && whichrel < mtstate->mt_nrels);
749 }
750
752
753 /*
754 * For UPDATE, we use the old tuple to fill up missing values in the tuple
755 * produced by the subplan to get the new tuple. We need two slots, both
756 * matching the table's desired format.
757 */
758 resultRelInfo->ri_oldTupleSlot =
759 table_slot_create(resultRelInfo->ri_RelationDesc,
760 &estate->es_tupleTable);
761 resultRelInfo->ri_newTupleSlot =
762 table_slot_create(resultRelInfo->ri_RelationDesc,
763 &estate->es_tupleTable);
764
765 /* need an expression context to do the projection */
766 if (mtstate->ps.ps_ExprContext == NULL)
767 ExecAssignExprContext(estate, &mtstate->ps);
768
769 resultRelInfo->ri_projectNew =
771 false, /* subplan did the evaluation */
773 relDesc,
774 mtstate->ps.ps_ExprContext,
775 resultRelInfo->ri_newTupleSlot,
776 &mtstate->ps);
777
778 resultRelInfo->ri_projectNewInfoValid = true;
779}
780
781/*
782 * ExecGetInsertNewTuple
783 * This prepares a "new" tuple ready to be inserted into given result
784 * relation, by removing any junk columns of the plan's output tuple
785 * and (if necessary) coercing the tuple to the right tuple format.
786 */
787static TupleTableSlot *
789 TupleTableSlot *planSlot)
790{
791 ProjectionInfo *newProj = relinfo->ri_projectNew;
792 ExprContext *econtext;
793
794 /*
795 * If there's no projection to be done, just make sure the slot is of the
796 * right type for the target rel. If the planSlot is the right type we
797 * can use it as-is, else copy the data into ri_newTupleSlot.
798 */
799 if (newProj == NULL)
800 {
801 if (relinfo->ri_newTupleSlot->tts_ops != planSlot->tts_ops)
802 {
803 ExecCopySlot(relinfo->ri_newTupleSlot, planSlot);
804 return relinfo->ri_newTupleSlot;
805 }
806 else
807 return planSlot;
808 }
809
810 /*
811 * Else project; since the projection output slot is ri_newTupleSlot, this
812 * will also fix any slot-type problem.
813 *
814 * Note: currently, this is dead code, because INSERT cases don't receive
815 * any junk columns so there's never a projection to be done.
816 */
817 econtext = newProj->pi_exprContext;
818 econtext->ecxt_outertuple = planSlot;
819 return ExecProject(newProj);
820}
821
822/*
823 * ExecGetUpdateNewTuple
824 * This prepares a "new" tuple by combining an UPDATE subplan's output
825 * tuple (which contains values of changed columns) with unchanged
826 * columns taken from the old tuple.
827 *
828 * The subplan tuple might also contain junk columns, which are ignored.
829 * Note that the projection also ensures we have a slot of the right type.
830 */
833 TupleTableSlot *planSlot,
835{
836 ProjectionInfo *newProj = relinfo->ri_projectNew;
837 ExprContext *econtext;
838
839 /* Use a few extra Asserts to protect against outside callers */
840 Assert(relinfo->ri_projectNewInfoValid);
841 Assert(planSlot != NULL && !TTS_EMPTY(planSlot));
843
844 econtext = newProj->pi_exprContext;
845 econtext->ecxt_outertuple = planSlot;
846 econtext->ecxt_scantuple = oldSlot;
847 return ExecProject(newProj);
848}
849
850/* ----------------------------------------------------------------
851 * ExecInsert
852 *
853 * For INSERT, we have to insert the tuple into the target relation
854 * (or partition thereof) and insert appropriate tuples into the index
855 * relations.
856 *
857 * slot contains the new tuple value to be stored.
858 *
859 * Returns RETURNING result if any, otherwise NULL.
860 * *inserted_tuple is the tuple that's effectively inserted;
861 * *insert_destrel is the relation where it was inserted.
862 * These are only set on success.
863 *
864 * This may change the currently active tuple conversion map in
865 * mtstate->mt_transition_capture, so the callers must take care to
866 * save the previous value to avoid losing track of it.
867 * ----------------------------------------------------------------
868 */
869static TupleTableSlot *
871 ResultRelInfo *resultRelInfo,
872 TupleTableSlot *slot,
873 bool canSetTag,
876{
877 ModifyTableState *mtstate = context->mtstate;
878 EState *estate = context->estate;
881 TupleTableSlot *planSlot = context->planSlot;
884 ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
885 OnConflictAction onconflict = node->onConflictAction;
888
889 /*
890 * If the input result relation is a partitioned table, find the leaf
891 * partition to insert the tuple into.
892 */
893 if (proute)
894 {
896
897 slot = ExecPrepareTupleRouting(mtstate, estate, proute,
898 resultRelInfo, slot,
899 &partRelInfo);
900 resultRelInfo = partRelInfo;
901 }
902
904
905 resultRelationDesc = resultRelInfo->ri_RelationDesc;
906
907 /*
908 * Open the table's indexes, if we have not done so already, so that we
909 * can add new index entries for the inserted tuple.
910 */
911 if (resultRelationDesc->rd_rel->relhasindex &&
912 resultRelInfo->ri_IndexRelationDescs == NULL)
913 ExecOpenIndices(resultRelInfo, onconflict != ONCONFLICT_NONE);
914
915 /*
916 * BEFORE ROW INSERT Triggers.
917 *
918 * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an
919 * INSERT ... ON CONFLICT statement. We cannot check for constraint
920 * violations before firing these triggers, because they can change the
921 * values to insert. Also, they can run arbitrary user-defined code with
922 * side-effects that we can't cancel by just not inserting the tuple.
923 */
924 if (resultRelInfo->ri_TrigDesc &&
925 resultRelInfo->ri_TrigDesc->trig_insert_before_row)
926 {
927 /* Flush any pending inserts, so rows are visible to the triggers */
929 ExecPendingInserts(estate);
930
931 if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
932 return NULL; /* "do nothing" */
933 }
934
935 /* INSTEAD OF ROW INSERT Triggers */
936 if (resultRelInfo->ri_TrigDesc &&
937 resultRelInfo->ri_TrigDesc->trig_insert_instead_row)
938 {
939 if (!ExecIRInsertTriggers(estate, resultRelInfo, slot))
940 return NULL; /* "do nothing" */
941 }
942 else if (resultRelInfo->ri_FdwRoutine)
943 {
944 /*
945 * GENERATED expressions might reference the tableoid column, so
946 * (re-)initialize tts_tableOid before evaluating them.
947 */
948 slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
949
950 /*
951 * Compute stored generated columns
952 */
953 if (resultRelationDesc->rd_att->constr &&
954 resultRelationDesc->rd_att->constr->has_generated_stored)
955 ExecComputeStoredGenerated(resultRelInfo, estate, slot,
956 CMD_INSERT);
957
958 /*
959 * If the FDW supports batching, and batching is requested, accumulate
960 * rows and insert them in batches. Otherwise use the per-row inserts.
961 */
962 if (resultRelInfo->ri_BatchSize > 1)
963 {
964 bool flushed = false;
965
966 /*
967 * When we've reached the desired batch size, perform the
968 * insertion.
969 */
970 if (resultRelInfo->ri_NumSlots == resultRelInfo->ri_BatchSize)
971 {
972 ExecBatchInsert(mtstate, resultRelInfo,
973 resultRelInfo->ri_Slots,
974 resultRelInfo->ri_PlanSlots,
975 resultRelInfo->ri_NumSlots,
976 estate, canSetTag);
977 flushed = true;
978 }
979
981
982 if (resultRelInfo->ri_Slots == NULL)
983 {
984 resultRelInfo->ri_Slots = palloc_array(TupleTableSlot *, resultRelInfo->ri_BatchSize);
985 resultRelInfo->ri_PlanSlots = palloc_array(TupleTableSlot *, resultRelInfo->ri_BatchSize);
986 }
987
988 /*
989 * Initialize the batch slots. We don't know how many slots will
990 * be needed, so we initialize them as the batch grows, and we
991 * keep them across batches. To mitigate an inefficiency in how
992 * resource owner handles objects with many references (as with
993 * many slots all referencing the same tuple descriptor) we copy
994 * the appropriate tuple descriptor for each slot.
995 */
996 if (resultRelInfo->ri_NumSlots >= resultRelInfo->ri_NumSlotsInitialized)
997 {
1001
1002 resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
1004
1005 resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots] =
1007
1008 /* remember how many batch slots we initialized */
1009 resultRelInfo->ri_NumSlotsInitialized++;
1010 }
1011
1012 ExecCopySlot(resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots],
1013 slot);
1014
1015 ExecCopySlot(resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots],
1016 planSlot);
1017
1018 /*
1019 * If these are the first tuples stored in the buffers, add the
1020 * target rel and the mtstate to the
1021 * es_insert_pending_result_relations and
1022 * es_insert_pending_modifytables lists respectively, except in
1023 * the case where flushing was done above, in which case they
1024 * would already have been added to the lists, so no need to do
1025 * this.
1026 */
1027 if (resultRelInfo->ri_NumSlots == 0 && !flushed)
1028 {
1030 resultRelInfo));
1033 resultRelInfo);
1035 lappend(estate->es_insert_pending_modifytables, mtstate);
1036 }
1038 resultRelInfo));
1039
1040 resultRelInfo->ri_NumSlots++;
1041
1043
1044 return NULL;
1045 }
1046
1047 /*
1048 * insert into foreign table: let the FDW do it
1049 */
1050 slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate,
1051 resultRelInfo,
1052 slot,
1053 planSlot);
1054
1055 if (slot == NULL) /* "do nothing" */
1056 return NULL;
1057
1058 /*
1059 * AFTER ROW Triggers or RETURNING expressions might reference the
1060 * tableoid column, so (re-)initialize tts_tableOid before evaluating
1061 * them. (This covers the case where the FDW replaced the slot.)
1062 */
1063 slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1064 }
1065 else
1066 {
1068
1069 /*
1070 * Constraints and GENERATED expressions might reference the tableoid
1071 * column, so (re-)initialize tts_tableOid before evaluating them.
1072 */
1074
1075 /*
1076 * Compute stored generated columns
1077 */
1078 if (resultRelationDesc->rd_att->constr &&
1079 resultRelationDesc->rd_att->constr->has_generated_stored)
1080 ExecComputeStoredGenerated(resultRelInfo, estate, slot,
1081 CMD_INSERT);
1082
1083 /*
1084 * Check any RLS WITH CHECK policies.
1085 *
1086 * Normally we should check INSERT policies. But if the insert is the
1087 * result of a partition key update that moved the tuple to a new
1088 * partition, we should instead check UPDATE policies, because we are
1089 * executing policies defined on the target table, and not those
1090 * defined on the child partitions.
1091 *
1092 * If we're running MERGE, we refer to the action that we're executing
1093 * to know if we're doing an INSERT or UPDATE to a partition table.
1094 */
1095 if (mtstate->operation == CMD_UPDATE)
1097 else if (mtstate->operation == CMD_MERGE)
1100 else
1102
1103 /*
1104 * ExecWithCheckOptions() will skip any WCOs which are not of the kind
1105 * we are looking for at this point.
1106 */
1107 if (resultRelInfo->ri_WithCheckOptions != NIL)
1108 ExecWithCheckOptions(wco_kind, resultRelInfo, slot, estate);
1109
1110 /*
1111 * Check the constraints of the tuple.
1112 */
1113 if (resultRelationDesc->rd_att->constr)
1114 ExecConstraints(resultRelInfo, slot, estate);
1115
1116 /*
1117 * Also check the tuple against the partition constraint, if there is
1118 * one; except that if we got here via tuple-routing, we don't need to
1119 * if there's no BR trigger defined on the partition.
1120 */
1121 if (resultRelationDesc->rd_rel->relispartition &&
1122 (resultRelInfo->ri_RootResultRelInfo == NULL ||
1123 (resultRelInfo->ri_TrigDesc &&
1124 resultRelInfo->ri_TrigDesc->trig_insert_before_row)))
1125 ExecPartitionCheck(resultRelInfo, slot, estate, true);
1126
1127 if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
1128 {
1129 /* Perform a speculative insertion. */
1133 bool specConflict;
1134 List *arbiterIndexes;
1135
1137 arbiterIndexes = resultRelInfo->ri_onConflictArbiterIndexes;
1138
1139 /*
1140 * Do a non-conclusive check for conflicts first.
1141 *
1142 * We're not holding any locks yet, so this doesn't guarantee that
1143 * the later insert won't conflict. But it avoids leaving behind
1144 * a lot of canceled speculative insertions, if you run a lot of
1145 * INSERT ON CONFLICT statements that do conflict.
1146 *
1147 * We loop back here if we find a conflict below, either during
1148 * the pre-check, or when we re-check after inserting the tuple
1149 * speculatively. Better allow interrupts in case some bug makes
1150 * this an infinite loop.
1151 */
1152 vlock:
1154 specConflict = false;
1155 if (!ExecCheckIndexConstraints(resultRelInfo, slot, estate,
1157 arbiterIndexes))
1158 {
1159 /* committed conflict tuple found */
1160 if (onconflict == ONCONFLICT_UPDATE)
1161 {
1162 /*
1163 * In case of ON CONFLICT DO UPDATE, execute the UPDATE
1164 * part. Be prepared to retry if the UPDATE fails because
1165 * of another concurrent UPDATE/DELETE to the conflict
1166 * tuple.
1167 */
1168 TupleTableSlot *returning = NULL;
1169
1170 if (ExecOnConflictUpdate(context, resultRelInfo,
1171 &conflictTid, slot, canSetTag,
1172 &returning))
1173 {
1174 InstrCountTuples2(&mtstate->ps, 1);
1175 return returning;
1176 }
1177 else
1178 goto vlock;
1179 }
1180 else if (onconflict == ONCONFLICT_SELECT)
1181 {
1182 /*
1183 * In case of ON CONFLICT DO SELECT, optionally lock the
1184 * conflicting tuple, fetch it and project RETURNING on
1185 * it. Be prepared to retry if locking fails because of a
1186 * concurrent UPDATE/DELETE to the conflict tuple.
1187 */
1188 TupleTableSlot *returning = NULL;
1189
1190 if (ExecOnConflictSelect(context, resultRelInfo,
1191 &conflictTid, slot, canSetTag,
1192 &returning))
1193 {
1194 InstrCountTuples2(&mtstate->ps, 1);
1195 return returning;
1196 }
1197 else
1198 goto vlock;
1199 }
1200 else
1201 {
1202 /*
1203 * In case of ON CONFLICT DO NOTHING, do nothing. However,
1204 * verify that the tuple is visible to the executor's MVCC
1205 * snapshot at higher isolation levels.
1206 *
1207 * Using ExecGetReturningSlot() to store the tuple for the
1208 * recheck isn't that pretty, but we can't trivially use
1209 * the input slot, because it might not be of a compatible
1210 * type. As there's no conflicting usage of
1211 * ExecGetReturningSlot() in the DO NOTHING case...
1212 */
1213 Assert(onconflict == ONCONFLICT_NOTHING);
1214 ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid,
1215 ExecGetReturningSlot(estate, resultRelInfo));
1216 InstrCountTuples2(&mtstate->ps, 1);
1217 return NULL;
1218 }
1219 }
1220
1221 /*
1222 * Before we start insertion proper, acquire our "speculative
1223 * insertion lock". Others can use that to wait for us to decide
1224 * if we're going to go ahead with the insertion, instead of
1225 * waiting for the whole transaction to complete.
1226 */
1227 INJECTION_POINT("exec-insert-before-insert-speculative", NULL);
1229
1230 /* insert the tuple, with the speculative token */
1232 estate->es_output_cid,
1233 0,
1234 NULL,
1235 specToken);
1236
1237 /* insert index entries for tuple */
1238 recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1239 estate, EIIT_NO_DUPE_ERROR,
1240 slot, arbiterIndexes,
1241 &specConflict);
1242
1243 /* adjust the tuple's state accordingly */
1246
1247 /*
1248 * Wake up anyone waiting for our decision. They will re-check
1249 * the tuple, see that it's no longer speculative, and wait on our
1250 * XID as if this was a regularly inserted tuple all along. Or if
1251 * we killed the tuple, they will see it's dead, and proceed as if
1252 * the tuple never existed.
1253 */
1255
1256 /*
1257 * If there was a conflict, start from the beginning. We'll do
1258 * the pre-check again, which will now find the conflicting tuple
1259 * (unless it aborts before we get there).
1260 */
1261 if (specConflict)
1262 {
1264 goto vlock;
1265 }
1266
1267 /* Since there was no insertion conflict, we're done */
1268 }
1269 else
1270 {
1271 /* insert the tuple normally */
1273 estate->es_output_cid,
1274 0, NULL);
1275
1276 /* insert index entries for tuple */
1277 if (resultRelInfo->ri_NumIndices > 0)
1278 recheckIndexes = ExecInsertIndexTuples(resultRelInfo, estate,
1279 0, slot, NIL,
1280 NULL);
1281 }
1282 }
1283
1284 if (canSetTag)
1285 (estate->es_processed)++;
1286
1287 /*
1288 * If this insert is the result of a partition key update that moved the
1289 * tuple to a new partition, put this row into the transition NEW TABLE,
1290 * if there is one. We need to do this separately for DELETE and INSERT
1291 * because they happen on different tables.
1292 */
1294 if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
1296 {
1297 ExecARUpdateTriggers(estate, resultRelInfo,
1298 NULL, NULL,
1299 NULL,
1300 NULL,
1301 slot,
1302 NULL,
1303 mtstate->mt_transition_capture,
1304 false);
1305
1306 /*
1307 * We've already captured the NEW TABLE row, so make sure any AR
1308 * INSERT trigger fired below doesn't capture it again.
1309 */
1311 }
1312
1313 /* AFTER ROW INSERT Triggers */
1314 ExecARInsertTriggers(estate, resultRelInfo, slot, recheckIndexes,
1316
1318
1319 /*
1320 * Check any WITH CHECK OPTION constraints from parent views. We are
1321 * required to do this after testing all constraints and uniqueness
1322 * violations per the SQL spec, so we do it after actually inserting the
1323 * record into the heap and all indexes.
1324 *
1325 * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
1326 * tuple will never be seen, if it violates the WITH CHECK OPTION.
1327 *
1328 * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
1329 * are looking for at this point.
1330 */
1331 if (resultRelInfo->ri_WithCheckOptions != NIL)
1332 ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1333
1334 /* Process RETURNING if present */
1335 if (resultRelInfo->ri_projectReturning)
1336 {
1338
1339 /*
1340 * If this is part of a cross-partition UPDATE, and the RETURNING list
1341 * refers to any OLD columns, ExecDelete() will have saved the tuple
1342 * deleted from the original partition, which we must use here to
1343 * compute the OLD column values. Otherwise, all OLD column values
1344 * will be NULL.
1345 */
1346 if (context->cpDeletedSlot)
1347 {
1349
1350 /*
1351 * Convert the OLD tuple to the new partition's format/slot, if
1352 * needed. Note that ExecDelete() already converted it to the
1353 * root's partition's format/slot.
1354 */
1355 oldSlot = context->cpDeletedSlot;
1356 tupconv_map = ExecGetRootToChildMap(resultRelInfo, estate);
1357 if (tupconv_map != NULL)
1358 {
1360 oldSlot,
1361 ExecGetReturningSlot(estate,
1362 resultRelInfo));
1363
1364 oldSlot->tts_tableOid = context->cpDeletedSlot->tts_tableOid;
1365 ItemPointerCopy(&context->cpDeletedSlot->tts_tid, &oldSlot->tts_tid);
1366 }
1367 }
1368
1369 result = ExecProcessReturning(context, resultRelInfo, false,
1370 oldSlot, slot, planSlot);
1371
1372 /*
1373 * For a cross-partition UPDATE, release the old tuple, first making
1374 * sure that the result slot has a local copy of any pass-by-reference
1375 * values.
1376 */
1377 if (context->cpDeletedSlot)
1378 {
1381 if (context->cpDeletedSlot != oldSlot)
1382 ExecClearTuple(context->cpDeletedSlot);
1383 context->cpDeletedSlot = NULL;
1384 }
1385 }
1386
1387 if (inserted_tuple)
1388 *inserted_tuple = slot;
1389 if (insert_destrel)
1390 *insert_destrel = resultRelInfo;
1391
1392 return result;
1393}
1394
1395/* ----------------------------------------------------------------
1396 * ExecForPortionOfLeftovers
1397 *
1398 * Insert tuples for the untouched portion of a row in a FOR
1399 * PORTION OF UPDATE/DELETE
1400 * ----------------------------------------------------------------
1401 */
1402static void
1404 EState *estate,
1405 ResultRelInfo *resultRelInfo,
1407{
1408 ModifyTableState *mtstate = context->mtstate;
1409 ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
1410 ForPortionOfExpr *forPortionOf = (ForPortionOfExpr *) node->forPortionOf;
1413 TypeCacheEntry *typcache;
1417 TupleConversionMap *map = NULL;
1418 HeapTuple oldtuple = NULL;
1421 FmgrInfo flinfo;
1422 ReturnSetInfo rsi;
1423 bool didInit = false;
1424 bool shouldFree = false;
1425
1426 LOCAL_FCINFO(fcinfo, 2);
1427
1428 if (!resultRelInfo->ri_forPortionOf)
1429 {
1430 /*
1431 * If we don't have a ForPortionOfState yet, we must be a partition
1432 * child being hit for the first time. Make a copy from the root, with
1433 * our own tupleTableSlot. We do this lazily so that we don't pay the
1434 * price of unused partitions.
1435 */
1437
1438 if (!mtstate->rootResultRelInfo)
1439 elog(ERROR, "no root relation but ri_forPortionOf is uninitialized");
1440
1443
1444 leafState->fp_rangeName = fpoState->fp_rangeName;
1445 leafState->fp_rangeType = fpoState->fp_rangeType;
1446 leafState->fp_rangeAttno = fpoState->fp_rangeAttno;
1447 leafState->fp_targetRange = fpoState->fp_targetRange;
1448 leafState->fp_Leftover = fpoState->fp_Leftover;
1449 /* Each partition needs a slot matching its tuple descriptor */
1450 leafState->fp_Existing =
1451 table_slot_create(resultRelInfo->ri_RelationDesc,
1452 &mtstate->ps.state->es_tupleTable);
1453
1454 resultRelInfo->ri_forPortionOf = leafState;
1455 }
1456 fpoState = resultRelInfo->ri_forPortionOf;
1458 leftoverSlot = fpoState->fp_Leftover;
1459
1460 /*
1461 * Get the old pre-UPDATE/DELETE tuple. We will use its range to compute
1462 * untouched parts of history, and if necessary we will insert copies with
1463 * truncated start/end times.
1464 *
1465 * We have already locked the tuple in ExecUpdate/ExecDelete, and it has
1466 * passed EvalPlanQual. This ensures that concurrent updates in READ
1467 * COMMITTED can't insert conflicting temporal leftovers.
1468 *
1469 * It does *not* protect against concurrent update/deletes overlooking
1470 * each others' leftovers though. See our isolation tests for details
1471 * about that and a viable workaround.
1472 */
1474 elog(ERROR, "failed to fetch tuple for FOR PORTION OF");
1475
1476 /*
1477 * Get the old range of the record being updated/deleted. Must read with
1478 * the attno of the leaf partition being updated.
1479 */
1480
1481 rangeAttno = forPortionOf->rangeVar->varattno;
1482 if (resultRelInfo->ri_RootResultRelInfo)
1483 map = ExecGetChildToRootMap(resultRelInfo);
1484 if (map != NULL)
1485 rangeAttno = map->attrMap->attnums[rangeAttno - 1];
1487
1488 if (oldtupleSlot->tts_isnull[rangeAttno - 1])
1489 elog(ERROR, "found a NULL range in a temporal table");
1490 oldRange = oldtupleSlot->tts_values[rangeAttno - 1];
1491
1492 /*
1493 * Get the range's type cache entry. This is worth caching for the whole
1494 * UPDATE/DELETE as range functions do.
1495 */
1496
1497 typcache = fpoState->fp_leftoverstypcache;
1498 if (typcache == NULL)
1499 {
1500 typcache = lookup_type_cache(forPortionOf->rangeType, 0);
1501 fpoState->fp_leftoverstypcache = typcache;
1502 }
1503
1504 /*
1505 * Get the ranges to the left/right of the targeted range. We call a SETOF
1506 * support function and insert as many temporal leftovers as it gives us.
1507 * Although rangetypes have 0/1/2 leftovers, multiranges have 0/1, and
1508 * other types may have more.
1509 */
1510
1511 fmgr_info(forPortionOf->withoutPortionProc, &flinfo);
1512 rsi.type = T_ReturnSetInfo;
1513 rsi.econtext = mtstate->ps.ps_ExprContext;
1514 rsi.expectedDesc = NULL;
1517 rsi.setResult = NULL;
1518 rsi.setDesc = NULL;
1519
1520 InitFunctionCallInfoData(*fcinfo, &flinfo, 2, InvalidOid, NULL, (Node *) &rsi);
1521 fcinfo->args[0].value = oldRange;
1522 fcinfo->args[0].isnull = false;
1523 fcinfo->args[1].value = fpoState->fp_targetRange;
1524 fcinfo->args[1].isnull = false;
1525
1526 /*
1527 * If there are partitions, we must insert into the root table, so we get
1528 * tuple routing. We already set up leftoverSlot with the root tuple
1529 * descriptor.
1530 */
1531 if (resultRelInfo->ri_RootResultRelInfo)
1532 resultRelInfo = resultRelInfo->ri_RootResultRelInfo;
1533
1534 /*
1535 * Insert a leftover for each value returned by the without_portion helper
1536 * function
1537 */
1538 while (true)
1539 {
1541
1542 /* Are we done? */
1543 if (rsi.isDone == ExprEndResult)
1544 break;
1545
1546 if (fcinfo->isnull)
1547 elog(ERROR, "Got a null from without_portion function");
1548
1549 /*
1550 * Does the new Datum violate domain checks? Row-level CHECK
1551 * constraints are validated by ExecInsert, so we don't need to do
1552 * anything here for those.
1553 */
1554 if (forPortionOf->isDomain)
1555 domain_check(leftover, false, forPortionOf->rangeVar->vartype, NULL, NULL);
1556
1557 if (!didInit)
1558 {
1559 /*
1560 * Make a copy of the pre-UPDATE row. Then we'll overwrite the
1561 * range column below. Convert oldtuple to the base table's format
1562 * if necessary. We need to insert temporal leftovers through the
1563 * root partition so they get routed correctly.
1564 */
1565 if (map != NULL)
1566 {
1569 leftoverSlot);
1570 }
1571 else
1572 {
1573 oldtuple = ExecFetchSlotHeapTuple(oldtupleSlot, false, &shouldFree);
1574 ExecForceStoreHeapTuple(oldtuple, leftoverSlot, false);
1575 }
1576
1577 /*
1578 * Save some mtstate things so we can restore them below. XXX:
1579 * Should we create our own ModifyTableState instead?
1580 */
1581 oldOperation = mtstate->operation;
1582 mtstate->operation = CMD_INSERT;
1583 oldTcs = mtstate->mt_transition_capture;
1584
1585 didInit = true;
1586 }
1587
1588 leftoverSlot->tts_values[forPortionOf->rangeVar->varattno - 1] = leftover;
1589 leftoverSlot->tts_isnull[forPortionOf->rangeVar->varattno - 1] = false;
1591
1592 /*
1593 * The standard says that each temporal leftover should execute its
1594 * own INSERT statement, firing all statement and row triggers, but
1595 * skipping insert permission checks. Therefore we give each insert
1596 * its own transition table. If we just push & pop a new trigger level
1597 * for each insert, we get exactly what we need.
1598 *
1599 * We have to make sure that the inserts don't add to the ROW_COUNT
1600 * diagnostic or the command tag, so we pass false for canSetTag.
1601 */
1603 ExecSetupTransitionCaptureState(mtstate, estate);
1604 fireBSTriggers(mtstate);
1605 ExecInsert(context, resultRelInfo, leftoverSlot, false, NULL, NULL);
1606 fireASTriggers(mtstate);
1607 AfterTriggerEndQuery(estate);
1608 }
1609
1610 if (didInit)
1611 {
1612 mtstate->operation = oldOperation;
1613 mtstate->mt_transition_capture = oldTcs;
1614
1615 if (shouldFree)
1616 heap_freetuple(oldtuple);
1617 }
1618}
1619
1620/* ----------------------------------------------------------------
1621 * ExecBatchInsert
1622 *
1623 * Insert multiple tuples in an efficient way.
1624 * Currently, this handles inserting into a foreign table without
1625 * RETURNING clause.
1626 * ----------------------------------------------------------------
1627 */
1628static void
1630 ResultRelInfo *resultRelInfo,
1631 TupleTableSlot **slots,
1633 int numSlots,
1634 EState *estate,
1635 bool canSetTag)
1636{
1637 int i;
1638 int numInserted = numSlots;
1639 TupleTableSlot *slot = NULL;
1641
1642 /*
1643 * insert into foreign table: let the FDW do it
1644 */
1645 rslots = resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert(estate,
1646 resultRelInfo,
1647 slots,
1648 planSlots,
1649 &numInserted);
1650
1651 for (i = 0; i < numInserted; i++)
1652 {
1653 slot = rslots[i];
1654
1655 /*
1656 * AFTER ROW Triggers might reference the tableoid column, so
1657 * (re-)initialize tts_tableOid before evaluating them.
1658 */
1659 slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1660
1661 /* AFTER ROW INSERT Triggers */
1662 ExecARInsertTriggers(estate, resultRelInfo, slot, NIL,
1663 mtstate->mt_transition_capture);
1664
1665 /*
1666 * Check any WITH CHECK OPTION constraints from parent views. See the
1667 * comment in ExecInsert.
1668 */
1669 if (resultRelInfo->ri_WithCheckOptions != NIL)
1670 ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1671 }
1672
1673 if (canSetTag && numInserted > 0)
1674 estate->es_processed += numInserted;
1675
1676 /* Clean up all the slots, ready for the next batch */
1677 for (i = 0; i < numSlots; i++)
1678 {
1679 ExecClearTuple(slots[i]);
1681 }
1682 resultRelInfo->ri_NumSlots = 0;
1683}
1684
1685/*
1686 * ExecPendingInserts -- flushes all pending inserts to the foreign tables
1687 */
1688static void
1690{
1691 ListCell *l1,
1692 *l2;
1693
1696 {
1697 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l1);
1698 ModifyTableState *mtstate = (ModifyTableState *) lfirst(l2);
1699
1700 Assert(mtstate);
1701 ExecBatchInsert(mtstate, resultRelInfo,
1702 resultRelInfo->ri_Slots,
1703 resultRelInfo->ri_PlanSlots,
1704 resultRelInfo->ri_NumSlots,
1705 estate, mtstate->canSetTag);
1706 }
1707
1712}
1713
1714/*
1715 * ExecDeletePrologue -- subroutine for ExecDelete
1716 *
1717 * Prepare executor state for DELETE. Actually, the only thing we have to do
1718 * here is execute BEFORE ROW triggers. We return false if one of them makes
1719 * the delete a no-op; otherwise, return true.
1720 */
1721static bool
1723 ItemPointer tupleid, HeapTuple oldtuple,
1725{
1726 if (result)
1727 *result = TM_Ok;
1728
1729 /* BEFORE ROW DELETE triggers */
1730 if (resultRelInfo->ri_TrigDesc &&
1731 resultRelInfo->ri_TrigDesc->trig_delete_before_row)
1732 {
1733 /* Flush any pending inserts, so rows are visible to the triggers */
1735 ExecPendingInserts(context->estate);
1736
1737 return ExecBRDeleteTriggers(context->estate, context->epqstate,
1738 resultRelInfo, tupleid, oldtuple,
1739 epqreturnslot, result, &context->tmfd,
1740 context->mtstate->operation == CMD_MERGE);
1741 }
1742
1743 return true;
1744}
1745
1746/*
1747 * ExecDeleteAct -- subroutine for ExecDelete
1748 *
1749 * Actually delete the tuple from a plain table.
1750 *
1751 * Caller is in charge of doing EvalPlanQual as necessary
1752 */
1753static TM_Result
1756{
1757 EState *estate = context->estate;
1758 uint32 options = 0;
1759
1760 if (changingPart)
1762
1763 return table_tuple_delete(resultRelInfo->ri_RelationDesc, tupleid,
1764 estate->es_output_cid,
1765 options,
1766 estate->es_snapshot,
1767 estate->es_crosscheck_snapshot,
1768 true /* wait for commit */ ,
1769 &context->tmfd);
1770}
1771
1772/*
1773 * ExecDeleteEpilogue -- subroutine for ExecDelete
1774 *
1775 * Closing steps of tuple deletion; this invokes AFTER FOR EACH ROW triggers,
1776 * including the UPDATE triggers if the deletion is being done as part of a
1777 * cross-partition tuple move. It also inserts temporal leftovers from a
1778 * DELETE FOR PORTION OF.
1779 */
1780static void
1782 ItemPointer tupleid, HeapTuple oldtuple, bool changingPart)
1783{
1784 ModifyTableState *mtstate = context->mtstate;
1785 EState *estate = context->estate;
1787
1788 /*
1789 * If this delete is the result of a partition key update that moved the
1790 * tuple to a new partition, put this row into the transition OLD TABLE,
1791 * if there is one. We need to do this separately for DELETE and INSERT
1792 * because they happen on different tables.
1793 */
1795 if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture &&
1797 {
1798 ExecARUpdateTriggers(estate, resultRelInfo,
1799 NULL, NULL,
1800 tupleid, oldtuple,
1801 NULL, NULL, mtstate->mt_transition_capture,
1802 false);
1803
1804 /*
1805 * We've already captured the OLD TABLE row, so make sure any AR
1806 * DELETE trigger fired below doesn't capture it again.
1807 */
1809 }
1810
1811 /* Compute temporal leftovers in FOR PORTION OF */
1812 if (((ModifyTable *) context->mtstate->ps.plan)->forPortionOf)
1813 ExecForPortionOfLeftovers(context, estate, resultRelInfo, tupleid);
1814
1815 /* AFTER ROW DELETE Triggers */
1816 ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
1818}
1819
1820/* ----------------------------------------------------------------
1821 * ExecDelete
1822 *
1823 * DELETE is like UPDATE, except that we delete the tuple and no
1824 * index modifications are needed.
1825 *
1826 * When deleting from a table, tupleid identifies the tuple to delete and
1827 * oldtuple is NULL. When deleting through a view INSTEAD OF trigger,
1828 * oldtuple is passed to the triggers and identifies what to delete, and
1829 * tupleid is invalid. When deleting from a foreign table, tupleid is
1830 * invalid; the FDW has to figure out which row to delete using data from
1831 * the planSlot. oldtuple is passed to foreign table triggers; it is
1832 * NULL when the foreign table has no relevant triggers. We use
1833 * tupleDeleted to indicate whether the tuple is actually deleted,
1834 * callers can use it to decide whether to continue the operation. When
1835 * this DELETE is a part of an UPDATE of partition-key, then the slot
1836 * returned by EvalPlanQual() is passed back using output parameter
1837 * epqreturnslot.
1838 *
1839 * Returns RETURNING result if any, otherwise NULL.
1840 * ----------------------------------------------------------------
1841 */
1842static TupleTableSlot *
1844 ResultRelInfo *resultRelInfo,
1846 HeapTuple oldtuple,
1847 bool processReturning,
1848 bool changingPart,
1849 bool canSetTag,
1851 bool *tupleDeleted,
1853{
1854 EState *estate = context->estate;
1856 TupleTableSlot *slot = NULL;
1858 bool saveOld;
1859
1860 if (tupleDeleted)
1861 *tupleDeleted = false;
1862
1863 /*
1864 * Prepare for the delete. This includes BEFORE ROW triggers, so we're
1865 * done if it says we are.
1866 */
1867 if (!ExecDeletePrologue(context, resultRelInfo, tupleid, oldtuple,
1869 return NULL;
1870
1871 /* INSTEAD OF ROW DELETE Triggers */
1872 if (resultRelInfo->ri_TrigDesc &&
1873 resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
1874 {
1875 bool dodelete;
1876
1877 Assert(oldtuple != NULL);
1878 dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple);
1879
1880 if (!dodelete) /* "do nothing" */
1881 return NULL;
1882 }
1883 else if (resultRelInfo->ri_FdwRoutine)
1884 {
1885 /*
1886 * delete from foreign table: let the FDW do it
1887 *
1888 * We offer the returning slot as a place to store RETURNING data,
1889 * although the FDW can return some other slot if it wants.
1890 */
1891 slot = ExecGetReturningSlot(estate, resultRelInfo);
1892 slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate,
1893 resultRelInfo,
1894 slot,
1895 context->planSlot);
1896
1897 if (slot == NULL) /* "do nothing" */
1898 return NULL;
1899
1900 /*
1901 * RETURNING expressions might reference the tableoid column, so
1902 * (re)initialize tts_tableOid before evaluating them.
1903 */
1904 if (TTS_EMPTY(slot))
1906
1908 }
1909 else
1910 {
1911 /*
1912 * delete the tuple
1913 *
1914 * Note: if context->estate->es_crosscheck_snapshot isn't
1915 * InvalidSnapshot, we check that the row to be deleted is visible to
1916 * that snapshot, and throw a can't-serialize error if not. This is a
1917 * special-case behavior needed for referential integrity updates in
1918 * transaction-snapshot mode transactions.
1919 */
1920ldelete:
1921 result = ExecDeleteAct(context, resultRelInfo, tupleid, changingPart);
1922
1923 if (tmresult)
1924 *tmresult = result;
1925
1926 switch (result)
1927 {
1928 case TM_SelfModified:
1929
1930 /*
1931 * The target tuple was already updated or deleted by the
1932 * current command, or by a later command in the current
1933 * transaction. The former case is possible in a join DELETE
1934 * where multiple tuples join to the same target tuple. This
1935 * is somewhat questionable, but Postgres has always allowed
1936 * it: we just ignore additional deletion attempts.
1937 *
1938 * The latter case arises if the tuple is modified by a
1939 * command in a BEFORE trigger, or perhaps by a command in a
1940 * volatile function used in the query. In such situations we
1941 * should not ignore the deletion, but it is equally unsafe to
1942 * proceed. We don't want to discard the original DELETE
1943 * while keeping the triggered actions based on its deletion;
1944 * and it would be no better to allow the original DELETE
1945 * while discarding updates that it triggered. The row update
1946 * carries some information that might be important according
1947 * to business rules; so throwing an error is the only safe
1948 * course.
1949 *
1950 * If a trigger actually intends this type of interaction, it
1951 * can re-execute the DELETE and then return NULL to cancel
1952 * the outer delete.
1953 */
1954 if (context->tmfd.cmax != estate->es_output_cid)
1955 ereport(ERROR,
1957 errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1958 errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1959
1960 /* Else, already deleted by self; nothing to do */
1961 return NULL;
1962
1963 case TM_Ok:
1964 break;
1965
1966 case TM_Updated:
1967 {
1968 TupleTableSlot *inputslot;
1970
1972 ereport(ERROR,
1974 errmsg("could not serialize access due to concurrent update")));
1975
1976 /*
1977 * Already know that we're going to need to do EPQ, so
1978 * fetch tuple directly into the right slot.
1979 */
1980 EvalPlanQualBegin(context->epqstate);
1981 inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
1982 resultRelInfo->ri_RangeTableIndex);
1983
1985 estate->es_snapshot,
1986 inputslot, estate->es_output_cid,
1989 &context->tmfd);
1990
1991 switch (result)
1992 {
1993 case TM_Ok:
1994 Assert(context->tmfd.traversed);
1995 epqslot = EvalPlanQual(context->epqstate,
1997 resultRelInfo->ri_RangeTableIndex,
1998 inputslot);
1999 if (TupIsNull(epqslot))
2000 /* Tuple not passing quals anymore, exiting... */
2001 return NULL;
2002
2003 /*
2004 * If requested, skip delete and pass back the
2005 * updated row.
2006 */
2007 if (epqreturnslot)
2008 {
2010 return NULL;
2011 }
2012 else
2013 goto ldelete;
2014
2015 case TM_SelfModified:
2016
2017 /*
2018 * This can be reached when following an update
2019 * chain from a tuple updated by another session,
2020 * reaching a tuple that was already updated in
2021 * this transaction. If previously updated by this
2022 * command, ignore the delete, otherwise error
2023 * out.
2024 *
2025 * See also TM_SelfModified response to
2026 * table_tuple_delete() above.
2027 */
2028 if (context->tmfd.cmax != estate->es_output_cid)
2029 ereport(ERROR,
2031 errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
2032 errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2033 return NULL;
2034
2035 case TM_Deleted:
2036 /* tuple already deleted; nothing to do */
2037 return NULL;
2038
2039 default:
2040
2041 /*
2042 * TM_Invisible should be impossible because we're
2043 * waiting for updated row versions, and would
2044 * already have errored out if the first version
2045 * is invisible.
2046 *
2047 * TM_Updated should be impossible, because we're
2048 * locking the latest version via
2049 * TUPLE_LOCK_FLAG_FIND_LAST_VERSION.
2050 */
2051 elog(ERROR, "unexpected table_tuple_lock status: %u",
2052 result);
2053 return NULL;
2054 }
2055
2056 Assert(false);
2057 break;
2058 }
2059
2060 case TM_Deleted:
2062 ereport(ERROR,
2064 errmsg("could not serialize access due to concurrent delete")));
2065 /* tuple already deleted; nothing to do */
2066 return NULL;
2067
2068 default:
2069 elog(ERROR, "unrecognized table_tuple_delete status: %u",
2070 result);
2071 return NULL;
2072 }
2073
2074 /*
2075 * Note: Normally one would think that we have to delete index tuples
2076 * associated with the heap tuple now...
2077 *
2078 * ... but in POSTGRES, we have no need to do this because VACUUM will
2079 * take care of it later. We can't delete index tuples immediately
2080 * anyway, since the tuple is still visible to other transactions.
2081 */
2082 }
2083
2084 if (canSetTag)
2085 (estate->es_processed)++;
2086
2087 /* Tell caller that the delete actually happened. */
2088 if (tupleDeleted)
2089 *tupleDeleted = true;
2090
2091 ExecDeleteEpilogue(context, resultRelInfo, tupleid, oldtuple, changingPart);
2092
2093 /*
2094 * Process RETURNING if present and if requested.
2095 *
2096 * If this is part of a cross-partition UPDATE, and the RETURNING list
2097 * refers to any OLD column values, save the old tuple here for later
2098 * processing of the RETURNING list by ExecInsert().
2099 */
2100 saveOld = changingPart && resultRelInfo->ri_projectReturning &&
2102
2103 if (resultRelInfo->ri_projectReturning && (processReturning || saveOld))
2104 {
2105 /*
2106 * We have to put the target tuple into a slot, which means first we
2107 * gotta fetch it. We can use the trigger tuple slot.
2108 */
2110
2111 if (resultRelInfo->ri_FdwRoutine)
2112 {
2113 /* FDW must have provided a slot containing the deleted row */
2114 Assert(!TupIsNull(slot));
2115 }
2116 else
2117 {
2118 slot = ExecGetReturningSlot(estate, resultRelInfo);
2119 if (oldtuple != NULL)
2120 {
2121 ExecForceStoreHeapTuple(oldtuple, slot, false);
2122 }
2123 else
2124 {
2126 SnapshotAny, slot))
2127 elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
2128 }
2129 }
2130
2131 /*
2132 * If required, save the old tuple for later processing of the
2133 * RETURNING list by ExecInsert().
2134 */
2135 if (saveOld)
2136 {
2138
2139 /*
2140 * Convert the tuple into the root partition's format/slot, if
2141 * needed. ExecInsert() will then convert it to the new
2142 * partition's format/slot, if necessary.
2143 */
2144 tupconv_map = ExecGetChildToRootMap(resultRelInfo);
2145 if (tupconv_map != NULL)
2146 {
2148 TupleTableSlot *oldSlot = slot;
2149
2150 slot = execute_attr_map_slot(tupconv_map->attrMap,
2151 slot,
2152 ExecGetReturningSlot(estate,
2153 rootRelInfo));
2154
2155 slot->tts_tableOid = oldSlot->tts_tableOid;
2156 ItemPointerCopy(&oldSlot->tts_tid, &slot->tts_tid);
2157 }
2158
2159 context->cpDeletedSlot = slot;
2160
2161 return NULL;
2162 }
2163
2164 rslot = ExecProcessReturning(context, resultRelInfo, true,
2165 slot, NULL, context->planSlot);
2166
2167 /*
2168 * Before releasing the target tuple again, make sure rslot has a
2169 * local copy of any pass-by-reference values.
2170 */
2172
2173 ExecClearTuple(slot);
2174
2175 return rslot;
2176 }
2177
2178 return NULL;
2179}
2180
2181/*
2182 * ExecCrossPartitionUpdate --- Move an updated tuple to another partition.
2183 *
2184 * This works by first deleting the old tuple from the current partition,
2185 * followed by inserting the new tuple into the root parent table, that is,
2186 * mtstate->rootResultRelInfo. It will be re-routed from there to the
2187 * correct partition.
2188 *
2189 * Returns true if the tuple has been successfully moved, or if it's found
2190 * that the tuple was concurrently deleted so there's nothing more to do
2191 * for the caller.
2192 *
2193 * False is returned if the tuple we're trying to move is found to have been
2194 * concurrently updated. In that case, the caller must check if the updated
2195 * tuple that's returned in *retry_slot still needs to be re-routed, and call
2196 * this function again or perform a regular update accordingly. For MERGE,
2197 * the updated tuple is not returned in *retry_slot; it has its own retry
2198 * logic.
2199 */
2200static bool
2202 ResultRelInfo *resultRelInfo,
2203 ItemPointer tupleid, HeapTuple oldtuple,
2204 TupleTableSlot *slot,
2205 bool canSetTag,
2211{
2212 ModifyTableState *mtstate = context->mtstate;
2213 EState *estate = mtstate->ps.state;
2215 bool tuple_deleted;
2217
2218 context->cpDeletedSlot = NULL;
2219 context->cpUpdateReturningSlot = NULL;
2220 *retry_slot = NULL;
2221
2222 /*
2223 * Disallow an INSERT ON CONFLICT DO UPDATE that causes the original row
2224 * to migrate to a different partition. Maybe this can be implemented
2225 * some day, but it seems a fringe feature with little redeeming value.
2226 */
2227 if (((ModifyTable *) mtstate->ps.plan)->onConflictAction == ONCONFLICT_UPDATE)
2228 ereport(ERROR,
2230 errmsg("invalid ON UPDATE specification"),
2231 errdetail("The result tuple would appear in a different partition than the original tuple.")));
2232
2233 /*
2234 * When an UPDATE is run directly on a leaf partition, simply fail with a
2235 * partition constraint violation error.
2236 */
2237 if (resultRelInfo == mtstate->rootResultRelInfo)
2238 ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
2239
2240 /*
2241 * Initialize tuple routing info if not already done. Note whatever we do
2242 * here must be done in ExecInitModifyTable for FOR PORTION OF as well.
2243 */
2244 if (mtstate->mt_partition_tuple_routing == NULL)
2245 {
2248
2249 /* Things built here have to last for the query duration. */
2251
2254
2255 /*
2256 * Before a partition's tuple can be re-routed, it must first be
2257 * converted to the root's format, so we'll need a slot for storing
2258 * such tuples.
2259 */
2260 Assert(mtstate->mt_root_tuple_slot == NULL);
2262
2264 }
2265
2266 /*
2267 * Row movement, part 1. Delete the tuple, but skip RETURNING processing.
2268 * We want to return rows from INSERT.
2269 */
2270 ExecDelete(context, resultRelInfo,
2271 tupleid, oldtuple,
2272 false, /* processReturning */
2273 true, /* changingPart */
2274 false, /* canSetTag */
2276
2277 /*
2278 * For some reason if DELETE didn't happen (e.g. trigger prevented it, or
2279 * it was already deleted by self, or it was concurrently deleted by
2280 * another transaction), then we should skip the insert as well;
2281 * otherwise, an UPDATE could cause an increase in the total number of
2282 * rows across all partitions, which is clearly wrong.
2283 *
2284 * For a normal UPDATE, the case where the tuple has been the subject of a
2285 * concurrent UPDATE or DELETE would be handled by the EvalPlanQual
2286 * machinery, but for an UPDATE that we've translated into a DELETE from
2287 * this partition and an INSERT into some other partition, that's not
2288 * available, because CTID chains can't span relation boundaries. We
2289 * mimic the semantics to a limited extent by skipping the INSERT if the
2290 * DELETE fails to find a tuple. This ensures that two concurrent
2291 * attempts to UPDATE the same tuple at the same time can't turn one tuple
2292 * into two, and that an UPDATE of a just-deleted tuple can't resurrect
2293 * it.
2294 */
2295 if (!tuple_deleted)
2296 {
2297 /*
2298 * epqslot will be typically NULL. But when ExecDelete() finds that
2299 * another transaction has concurrently updated the same row, it
2300 * re-fetches the row, skips the delete, and epqslot is set to the
2301 * re-fetched tuple slot. In that case, we need to do all the checks
2302 * again. For MERGE, we leave everything to the caller (it must do
2303 * additional rechecking, and might end up executing a different
2304 * action entirely).
2305 */
2306 if (mtstate->operation == CMD_MERGE)
2307 return *tmresult == TM_Ok;
2308 else if (TupIsNull(epqslot))
2309 return true;
2310 else
2311 {
2312 /* Fetch the most recent version of old tuple. */
2314
2315 /* ... but first, make sure ri_oldTupleSlot is initialized. */
2316 if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
2317 ExecInitUpdateProjection(mtstate, resultRelInfo);
2318 oldSlot = resultRelInfo->ri_oldTupleSlot;
2320 tupleid,
2322 oldSlot))
2323 elog(ERROR, "failed to fetch tuple being updated");
2324 /* and project the new tuple to retry the UPDATE with */
2325 *retry_slot = ExecGetUpdateNewTuple(resultRelInfo, epqslot,
2326 oldSlot);
2327 return false;
2328 }
2329 }
2330
2331 /*
2332 * resultRelInfo is one of the per-relation resultRelInfos. So we should
2333 * convert the tuple into root's tuple descriptor if needed, since
2334 * ExecInsert() starts the search from root.
2335 */
2336 tupconv_map = ExecGetChildToRootMap(resultRelInfo);
2337 if (tupconv_map != NULL)
2338 slot = execute_attr_map_slot(tupconv_map->attrMap,
2339 slot,
2340 mtstate->mt_root_tuple_slot);
2341
2342 /* Tuple routing starts from the root table. */
2343 context->cpUpdateReturningSlot =
2344 ExecInsert(context, mtstate->rootResultRelInfo, slot, canSetTag,
2346
2347 /*
2348 * Reset the transition state that may possibly have been written by
2349 * INSERT.
2350 */
2351 if (mtstate->mt_transition_capture)
2353
2354 /* We're done moving. */
2355 return true;
2356}
2357
2358/*
2359 * ExecUpdatePrologue -- subroutine for ExecUpdate
2360 *
2361 * Prepare executor state for UPDATE. This includes running BEFORE ROW
2362 * triggers. We return false if one of them makes the update a no-op;
2363 * otherwise, return true.
2364 */
2365static bool
2369{
2371
2372 if (result)
2373 *result = TM_Ok;
2374
2375 ExecMaterializeSlot(slot);
2376
2377 /*
2378 * Open the table's indexes, if we have not done so already, so that we
2379 * can add new index entries for the updated tuple.
2380 */
2381 if (resultRelationDesc->rd_rel->relhasindex &&
2382 resultRelInfo->ri_IndexRelationDescs == NULL)
2383 ExecOpenIndices(resultRelInfo, false);
2384
2385 /* BEFORE ROW UPDATE triggers */
2386 if (resultRelInfo->ri_TrigDesc &&
2387 resultRelInfo->ri_TrigDesc->trig_update_before_row)
2388 {
2389 /* Flush any pending inserts, so rows are visible to the triggers */
2391 ExecPendingInserts(context->estate);
2392
2393 return ExecBRUpdateTriggers(context->estate, context->epqstate,
2394 resultRelInfo, tupleid, oldtuple, slot,
2395 result, &context->tmfd,
2396 context->mtstate->operation == CMD_MERGE);
2397 }
2398
2399 return true;
2400}
2401
2402/*
2403 * ExecUpdatePrepareSlot -- subroutine for ExecUpdateAct
2404 *
2405 * Apply the final modifications to the tuple slot before the update.
2406 * (This is split out because we also need it in the foreign-table code path.)
2407 */
2408static void
2410 TupleTableSlot *slot,
2411 EState *estate)
2412{
2414
2415 /*
2416 * Constraints and GENERATED expressions might reference the tableoid
2417 * column, so (re-)initialize tts_tableOid before evaluating them.
2418 */
2420
2421 /*
2422 * Compute stored generated columns
2423 */
2424 if (resultRelationDesc->rd_att->constr &&
2425 resultRelationDesc->rd_att->constr->has_generated_stored)
2426 ExecComputeStoredGenerated(resultRelInfo, estate, slot,
2427 CMD_UPDATE);
2428}
2429
2430/*
2431 * ExecUpdateAct -- subroutine for ExecUpdate
2432 *
2433 * Actually update the tuple, when operating on a plain table. If the
2434 * table is a partition, and the command was called referencing an ancestor
2435 * partitioned table, this routine migrates the resulting tuple to another
2436 * partition.
2437 *
2438 * The caller is in charge of keeping indexes current as necessary. The
2439 * caller is also in charge of doing EvalPlanQual if the tuple is found to
2440 * be concurrently updated. However, in case of a cross-partition update,
2441 * this routine does it.
2442 */
2443static TM_Result
2446 bool canSetTag, UpdateContext *updateCxt)
2447{
2448 EState *estate = context->estate;
2452
2453 updateCxt->crossPartUpdate = false;
2454
2455 /*
2456 * If we move the tuple to a new partition, we loop back here to recompute
2457 * GENERATED values (which are allowed to be different across partitions)
2458 * and recheck any RLS policies and constraints. We do not fire any
2459 * BEFORE triggers of the new partition, however.
2460 */
2461lreplace:
2462 /* Fill in GENERATEd columns */
2463 ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2464
2465 /* ensure slot is independent, consider e.g. EPQ */
2466 ExecMaterializeSlot(slot);
2467
2468 /*
2469 * If partition constraint fails, this row might get moved to another
2470 * partition, in which case we should check the RLS CHECK policy just
2471 * before inserting into the new partition, rather than doing it here.
2472 * This is because a trigger on that partition might again change the row.
2473 * So skip the WCO checks if the partition constraint fails.
2474 */
2476 resultRelationDesc->rd_rel->relispartition &&
2477 !ExecPartitionCheck(resultRelInfo, slot, estate, false);
2478
2479 /* Check any RLS UPDATE WITH CHECK policies */
2481 resultRelInfo->ri_WithCheckOptions != NIL)
2482 {
2483 /*
2484 * ExecWithCheckOptions() will skip any WCOs which are not of the kind
2485 * we are looking for at this point.
2486 */
2488 resultRelInfo, slot, estate);
2489 }
2490
2491 /*
2492 * If a partition check failed, try to move the row into the right
2493 * partition.
2494 */
2496 {
2498 *retry_slot;
2500
2501 /*
2502 * ExecCrossPartitionUpdate will first DELETE the row from the
2503 * partition it's currently in and then insert it back into the root
2504 * table, which will re-route it to the correct partition. However,
2505 * if the tuple has been concurrently updated, a retry is needed.
2506 */
2507 if (ExecCrossPartitionUpdate(context, resultRelInfo,
2508 tupleid, oldtuple, slot,
2509 canSetTag, updateCxt,
2510 &result,
2511 &retry_slot,
2514 {
2515 /* success! */
2516 updateCxt->crossPartUpdate = true;
2517
2518 /*
2519 * If the partitioned table being updated is referenced in foreign
2520 * keys, queue up trigger events to check that none of them were
2521 * violated. No special treatment is needed in
2522 * non-cross-partition update situations, because the leaf
2523 * partition's AR update triggers will take care of that. During
2524 * cross-partition updates implemented as delete on the source
2525 * partition followed by insert on the destination partition,
2526 * AR-UPDATE triggers of the root table (that is, the table
2527 * mentioned in the query) must be fired.
2528 *
2529 * NULL insert_destrel means that the move failed to occur, that
2530 * is, the update failed, so no need to anything in that case.
2531 */
2532 if (insert_destrel &&
2533 resultRelInfo->ri_TrigDesc &&
2534 resultRelInfo->ri_TrigDesc->trig_update_after_row)
2536 resultRelInfo,
2538 tupleid, slot,
2540
2541 return TM_Ok;
2542 }
2543
2544 /*
2545 * No luck, a retry is needed. If running MERGE, we do not do so
2546 * here; instead let it handle that on its own rules.
2547 */
2548 if (context->mtstate->operation == CMD_MERGE)
2549 return result;
2550
2551 /*
2552 * ExecCrossPartitionUpdate installed an updated version of the new
2553 * tuple in the retry slot; start over.
2554 */
2555 slot = retry_slot;
2556 goto lreplace;
2557 }
2558
2559 /*
2560 * Check the constraints of the tuple. We've already checked the
2561 * partition constraint above; however, we must still ensure the tuple
2562 * passes all other constraints, so we will call ExecConstraints() and
2563 * have it validate all remaining checks.
2564 */
2565 if (resultRelationDesc->rd_att->constr)
2566 ExecConstraints(resultRelInfo, slot, estate);
2567
2568 /*
2569 * replace the heap tuple
2570 *
2571 * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
2572 * the row to be updated is visible to that snapshot, and throw a
2573 * can't-serialize error if not. This is a special-case behavior needed
2574 * for referential integrity updates in transaction-snapshot mode
2575 * transactions.
2576 */
2578 estate->es_output_cid,
2579 0,
2580 estate->es_snapshot,
2581 estate->es_crosscheck_snapshot,
2582 true /* wait for commit */ ,
2583 &context->tmfd, &updateCxt->lockmode,
2584 &updateCxt->updateIndexes);
2585
2586 return result;
2587}
2588
2589/*
2590 * ExecUpdateEpilogue -- subroutine for ExecUpdate
2591 *
2592 * Closing steps of updating a tuple. Must be called if ExecUpdateAct
2593 * returns indicating that the tuple was updated. It also inserts temporal
2594 * leftovers from an UPDATE FOR PORTION OF.
2595 */
2596static void
2598 ResultRelInfo *resultRelInfo, ItemPointer tupleid,
2599 HeapTuple oldtuple, TupleTableSlot *slot)
2600{
2601 ModifyTableState *mtstate = context->mtstate;
2603
2604 /* insert index entries for tuple if necessary */
2605 if (resultRelInfo->ri_NumIndices > 0 && (updateCxt->updateIndexes != TU_None))
2606 {
2607 uint32 flags = EIIT_IS_UPDATE;
2608
2609 if (updateCxt->updateIndexes == TU_Summarizing)
2610 flags |= EIIT_ONLY_SUMMARIZING;
2611 recheckIndexes = ExecInsertIndexTuples(resultRelInfo, context->estate,
2612 flags, slot, NIL,
2613 NULL);
2614 }
2615
2616 /* Compute temporal leftovers in FOR PORTION OF */
2617 if (((ModifyTable *) context->mtstate->ps.plan)->forPortionOf)
2618 ExecForPortionOfLeftovers(context, context->estate, resultRelInfo, tupleid);
2619
2620 /* AFTER ROW UPDATE Triggers */
2621 ExecARUpdateTriggers(context->estate, resultRelInfo,
2622 NULL, NULL,
2623 tupleid, oldtuple, slot,
2625 mtstate->operation == CMD_INSERT ?
2626 mtstate->mt_oc_transition_capture :
2627 mtstate->mt_transition_capture,
2628 false);
2629
2631
2632 /*
2633 * Check any WITH CHECK OPTION constraints from parent views. We are
2634 * required to do this after testing all constraints and uniqueness
2635 * violations per the SQL spec, so we do it after actually updating the
2636 * record in the heap and all indexes.
2637 *
2638 * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
2639 * are looking for at this point.
2640 */
2641 if (resultRelInfo->ri_WithCheckOptions != NIL)
2642 ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo,
2643 slot, context->estate);
2644}
2645
2646/*
2647 * Queues up an update event using the target root partitioned table's
2648 * trigger to check that a cross-partition update hasn't broken any foreign
2649 * keys pointing into it.
2650 */
2651static void
2658{
2659 ListCell *lc;
2662
2663 rootRelInfo = sourcePartInfo->ri_RootResultRelInfo;
2665
2666 /*
2667 * For any foreign keys that point directly into a non-root ancestors of
2668 * the source partition, we can in theory fire an update event to enforce
2669 * those constraints using their triggers, if we could tell that both the
2670 * source and the destination partitions are under the same ancestor. But
2671 * for now, we simply report an error that those cannot be enforced.
2672 */
2673 foreach(lc, ancestorRels)
2674 {
2676 TriggerDesc *trigdesc = rInfo->ri_TrigDesc;
2677 bool has_noncloned_fkey = false;
2678
2679 /* Root ancestor's triggers will be processed. */
2680 if (rInfo == rootRelInfo)
2681 continue;
2682
2683 if (trigdesc && trigdesc->trig_update_after_row)
2684 {
2685 for (int i = 0; i < trigdesc->numtriggers; i++)
2686 {
2687 Trigger *trig = &trigdesc->triggers[i];
2688
2689 if (!trig->tgisclone &&
2691 {
2692 has_noncloned_fkey = true;
2693 break;
2694 }
2695 }
2696 }
2697
2699 ereport(ERROR,
2701 errmsg("cannot move tuple across partitions when a non-root ancestor of the source partition is directly referenced in a foreign key"),
2702 errdetail("A foreign key points to ancestor \"%s\" but not the root ancestor \"%s\".",
2703 RelationGetRelationName(rInfo->ri_RelationDesc),
2704 RelationGetRelationName(rootRelInfo->ri_RelationDesc)),
2705 errhint("Consider defining the foreign key on table \"%s\".",
2706 RelationGetRelationName(rootRelInfo->ri_RelationDesc))));
2707 }
2708
2709 /* Perform the root table's triggers. */
2712 tupleid, NULL, newslot, NIL, NULL, true);
2713}
2714
2715/* ----------------------------------------------------------------
2716 * ExecUpdate
2717 *
2718 * note: we can't run UPDATE queries with transactions
2719 * off because UPDATEs are actually INSERTs and our
2720 * scan will mistakenly loop forever, updating the tuple
2721 * it just inserted.. This should be fixed but until it
2722 * is, we don't want to get stuck in an infinite loop
2723 * which corrupts your database..
2724 *
2725 * When updating a table, tupleid identifies the tuple to update and
2726 * oldtuple is NULL. When updating through a view INSTEAD OF trigger,
2727 * oldtuple is passed to the triggers and identifies what to update, and
2728 * tupleid is invalid. When updating a foreign table, tupleid is
2729 * invalid; the FDW has to figure out which row to update using data from
2730 * the planSlot. oldtuple is passed to foreign table triggers; it is
2731 * NULL when the foreign table has no relevant triggers.
2732 *
2733 * oldSlot contains the old tuple value.
2734 * slot contains the new tuple value to be stored.
2735 * planSlot is the output of the ModifyTable's subplan; we use it
2736 * to access values from other input tables (for RETURNING),
2737 * row-ID junk columns, etc.
2738 *
2739 * Returns RETURNING result if any, otherwise NULL. On exit, if tupleid
2740 * had identified the tuple to update, it will identify the tuple
2741 * actually updated after EvalPlanQual.
2742 * ----------------------------------------------------------------
2743 */
2744static TupleTableSlot *
2747 TupleTableSlot *slot, bool canSetTag)
2748{
2749 EState *estate = context->estate;
2753
2754 /*
2755 * abort the operation if not running transactions
2756 */
2758 elog(ERROR, "cannot UPDATE during bootstrap");
2759
2760 /*
2761 * Prepare for the update. This includes BEFORE ROW triggers, so we're
2762 * done if it says we are.
2763 */
2764 if (!ExecUpdatePrologue(context, resultRelInfo, tupleid, oldtuple, slot, NULL))
2765 return NULL;
2766
2767 /* INSTEAD OF ROW UPDATE Triggers */
2768 if (resultRelInfo->ri_TrigDesc &&
2769 resultRelInfo->ri_TrigDesc->trig_update_instead_row)
2770 {
2771 if (!ExecIRUpdateTriggers(estate, resultRelInfo,
2772 oldtuple, slot))
2773 return NULL; /* "do nothing" */
2774 }
2775 else if (resultRelInfo->ri_FdwRoutine)
2776 {
2777 /* Fill in GENERATEd columns */
2778 ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2779
2780 /*
2781 * update in foreign table: let the FDW do it
2782 */
2783 slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate,
2784 resultRelInfo,
2785 slot,
2786 context->planSlot);
2787
2788 if (slot == NULL) /* "do nothing" */
2789 return NULL;
2790
2791 /*
2792 * AFTER ROW Triggers or RETURNING expressions might reference the
2793 * tableoid column, so (re-)initialize tts_tableOid before evaluating
2794 * them. (This covers the case where the FDW replaced the slot.)
2795 */
2797 }
2798 else
2799 {
2801
2802 /*
2803 * If we generate a new candidate tuple after EvalPlanQual testing, we
2804 * must loop back here to try again. (We don't need to redo triggers,
2805 * however. If there are any BEFORE triggers then trigger.c will have
2806 * done table_tuple_lock to lock the correct tuple, so there's no need
2807 * to do them again.)
2808 */
2809redo_act:
2810 lockedtid = *tupleid;
2811 result = ExecUpdateAct(context, resultRelInfo, tupleid, oldtuple, slot,
2812 canSetTag, &updateCxt);
2813
2814 /*
2815 * If ExecUpdateAct reports that a cross-partition update was done,
2816 * then the RETURNING tuple (if any) has been projected and there's
2817 * nothing else for us to do.
2818 */
2819 if (updateCxt.crossPartUpdate)
2820 return context->cpUpdateReturningSlot;
2821
2822 switch (result)
2823 {
2824 case TM_SelfModified:
2825
2826 /*
2827 * The target tuple was already updated or deleted by the
2828 * current command, or by a later command in the current
2829 * transaction. The former case is possible in a join UPDATE
2830 * where multiple tuples join to the same target tuple. This
2831 * is pretty questionable, but Postgres has always allowed it:
2832 * we just execute the first update action and ignore
2833 * additional update attempts.
2834 *
2835 * The latter case arises if the tuple is modified by a
2836 * command in a BEFORE trigger, or perhaps by a command in a
2837 * volatile function used in the query. In such situations we
2838 * should not ignore the update, but it is equally unsafe to
2839 * proceed. We don't want to discard the original UPDATE
2840 * while keeping the triggered actions based on it; and we
2841 * have no principled way to merge this update with the
2842 * previous ones. So throwing an error is the only safe
2843 * course.
2844 *
2845 * If a trigger actually intends this type of interaction, it
2846 * can re-execute the UPDATE (assuming it can figure out how)
2847 * and then return NULL to cancel the outer update.
2848 */
2849 if (context->tmfd.cmax != estate->es_output_cid)
2850 ereport(ERROR,
2852 errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2853 errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2854
2855 /* Else, already updated by self; nothing to do */
2856 return NULL;
2857
2858 case TM_Ok:
2859 break;
2860
2861 case TM_Updated:
2862 {
2863 TupleTableSlot *inputslot;
2865
2867 ereport(ERROR,
2869 errmsg("could not serialize access due to concurrent update")));
2870
2871 /*
2872 * Already know that we're going to need to do EPQ, so
2873 * fetch tuple directly into the right slot.
2874 */
2875 inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
2876 resultRelInfo->ri_RangeTableIndex);
2877
2879 estate->es_snapshot,
2880 inputslot, estate->es_output_cid,
2881 updateCxt.lockmode, LockWaitBlock,
2883 &context->tmfd);
2884
2885 switch (result)
2886 {
2887 case TM_Ok:
2888 Assert(context->tmfd.traversed);
2889
2890 epqslot = EvalPlanQual(context->epqstate,
2892 resultRelInfo->ri_RangeTableIndex,
2893 inputslot);
2894 if (TupIsNull(epqslot))
2895 /* Tuple not passing quals anymore, exiting... */
2896 return NULL;
2897
2898 /* Make sure ri_oldTupleSlot is initialized. */
2899 if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
2901 resultRelInfo);
2902
2903 if (resultRelInfo->ri_needLockTagTuple)
2904 {
2909 }
2910
2911 /* Fetch the most recent version of old tuple. */
2912 oldSlot = resultRelInfo->ri_oldTupleSlot;
2914 tupleid,
2916 oldSlot))
2917 elog(ERROR, "failed to fetch tuple being updated");
2918 slot = ExecGetUpdateNewTuple(resultRelInfo,
2919 epqslot, oldSlot);
2920 goto redo_act;
2921
2922 case TM_Deleted:
2923 /* tuple already deleted; nothing to do */
2924 return NULL;
2925
2926 case TM_SelfModified:
2927
2928 /*
2929 * This can be reached when following an update
2930 * chain from a tuple updated by another session,
2931 * reaching a tuple that was already updated in
2932 * this transaction. If previously modified by
2933 * this command, ignore the redundant update,
2934 * otherwise error out.
2935 *
2936 * See also TM_SelfModified response to
2937 * table_tuple_update() above.
2938 */
2939 if (context->tmfd.cmax != estate->es_output_cid)
2940 ereport(ERROR,
2942 errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2943 errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2944 return NULL;
2945
2946 default:
2947 /* see table_tuple_lock call in ExecDelete() */
2948 elog(ERROR, "unexpected table_tuple_lock status: %u",
2949 result);
2950 return NULL;
2951 }
2952 }
2953
2954 break;
2955
2956 case TM_Deleted:
2958 ereport(ERROR,
2960 errmsg("could not serialize access due to concurrent delete")));
2961 /* tuple already deleted; nothing to do */
2962 return NULL;
2963
2964 default:
2965 elog(ERROR, "unrecognized table_tuple_update status: %u",
2966 result);
2967 return NULL;
2968 }
2969 }
2970
2971 if (canSetTag)
2972 (estate->es_processed)++;
2973
2974 ExecUpdateEpilogue(context, &updateCxt, resultRelInfo, tupleid, oldtuple,
2975 slot);
2976
2977 /* Process RETURNING if present */
2978 if (resultRelInfo->ri_projectReturning)
2979 return ExecProcessReturning(context, resultRelInfo, false,
2980 oldSlot, slot, context->planSlot);
2981
2982 return NULL;
2983}
2984
2985/*
2986 * ExecOnConflictLockRow --- lock the row for ON CONFLICT DO SELECT/UPDATE
2987 *
2988 * Try to lock tuple for update as part of speculative insertion for ON
2989 * CONFLICT DO UPDATE or ON CONFLICT DO SELECT FOR UPDATE/SHARE.
2990 *
2991 * Returns true if the row is successfully locked, or false if the caller must
2992 * retry the INSERT from scratch.
2993 */
2994static bool
2998 Relation relation,
2999 LockTupleMode lockmode,
3000 bool isUpdate)
3001{
3002 TM_FailureData tmfd;
3005 TransactionId xmin;
3006 bool isnull;
3007
3008 /*
3009 * Lock tuple with lockmode. Don't follow updates when tuple cannot be
3010 * locked without doing so. A row locking conflict here means our
3011 * previous conclusion that the tuple is conclusively committed is not
3012 * true anymore.
3013 */
3014 test = table_tuple_lock(relation, conflictTid,
3015 context->estate->es_snapshot,
3016 existing, context->estate->es_output_cid,
3017 lockmode, LockWaitBlock, 0,
3018 &tmfd);
3019 switch (test)
3020 {
3021 case TM_Ok:
3022 /* success! */
3023 break;
3024
3025 case TM_Invisible:
3026
3027 /*
3028 * This can occur when a just inserted tuple is updated again in
3029 * the same command. E.g. because multiple rows with the same
3030 * conflicting key values are inserted.
3031 *
3032 * This is somewhat similar to the ExecUpdate() TM_SelfModified
3033 * case. We do not want to proceed because it would lead to the
3034 * same row being updated a second time in some unspecified order,
3035 * and in contrast to plain UPDATEs there's no historical behavior
3036 * to break.
3037 *
3038 * It is the user's responsibility to prevent this situation from
3039 * occurring. These problems are why the SQL standard similarly
3040 * specifies that for SQL MERGE, an exception must be raised in
3041 * the event of an attempt to update the same row twice.
3042 */
3045 &isnull);
3046 Assert(!isnull);
3048
3050 ereport(ERROR,
3052 /* translator: %s is a SQL command name */
3053 errmsg("%s command cannot affect row a second time",
3054 isUpdate ? "ON CONFLICT DO UPDATE" : "ON CONFLICT DO SELECT"),
3055 errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
3056
3057 /* This shouldn't happen */
3058 elog(ERROR, "attempted to lock invisible tuple");
3059 break;
3060
3061 case TM_SelfModified:
3062
3063 /*
3064 * This state should never be reached. As a dirty snapshot is used
3065 * to find conflicting tuples, speculative insertion wouldn't have
3066 * seen this row to conflict with.
3067 */
3068 elog(ERROR, "unexpected self-updated tuple");
3069 break;
3070
3071 case TM_Updated:
3073 ereport(ERROR,
3075 errmsg("could not serialize access due to concurrent update")));
3076
3077 /*
3078 * Tell caller to try again from the very start.
3079 *
3080 * It does not make sense to use the usual EvalPlanQual() style
3081 * loop here, as the new version of the row might not conflict
3082 * anymore, or the conflicting tuple has actually been deleted.
3083 */
3085 return false;
3086
3087 case TM_Deleted:
3089 ereport(ERROR,
3091 errmsg("could not serialize access due to concurrent delete")));
3092
3093 /* see TM_Updated case */
3095 return false;
3096
3097 default:
3098 elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
3099 }
3100
3101 /* Success, the tuple is locked. */
3102 return true;
3103}
3104
3105/*
3106 * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE
3107 *
3108 * Try to lock tuple for update as part of speculative insertion. If
3109 * a qual originating from ON CONFLICT DO UPDATE is satisfied, update
3110 * (but still lock row, even though it may not satisfy estate's
3111 * snapshot).
3112 *
3113 * Returns true if we're done (with or without an update), or false if
3114 * the caller must retry the INSERT from scratch.
3115 */
3116static bool
3118 ResultRelInfo *resultRelInfo,
3121 bool canSetTag,
3122 TupleTableSlot **returning)
3123{
3124 ModifyTableState *mtstate = context->mtstate;
3125 ExprContext *econtext = mtstate->ps.ps_ExprContext;
3126 Relation relation = resultRelInfo->ri_RelationDesc;
3129 LockTupleMode lockmode;
3130
3131 /*
3132 * Parse analysis should have blocked ON CONFLICT for all system
3133 * relations, which includes these. There's no fundamental obstacle to
3134 * supporting this; we'd just need to handle LOCKTAG_TUPLE like the other
3135 * ExecUpdate() caller.
3136 */
3137 Assert(!resultRelInfo->ri_needLockTagTuple);
3138
3139 /* Determine lock mode to use */
3140 lockmode = ExecUpdateLockMode(context->estate, resultRelInfo);
3141
3142 /* Lock tuple for update */
3144 resultRelInfo->ri_RelationDesc, lockmode, true))
3145 return false;
3146
3147 /*
3148 * Verify that the tuple is visible to our MVCC snapshot if the current
3149 * isolation level mandates that.
3150 *
3151 * It's not sufficient to rely on the check within ExecUpdate() as e.g.
3152 * CONFLICT ... WHERE clause may prevent us from reaching that.
3153 *
3154 * This means we only ever continue when a new command in the current
3155 * transaction could see the row, even though in READ COMMITTED mode the
3156 * tuple will not be visible according to the current statement's
3157 * snapshot. This is in line with the way UPDATE deals with newer tuple
3158 * versions.
3159 */
3160 ExecCheckTupleVisible(context->estate, relation, existing);
3161
3162 /*
3163 * Make tuple and any needed join variables available to ExecQual and
3164 * ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while
3165 * the target's existing tuple is installed in the scantuple. EXCLUDED
3166 * has been made to reference INNER_VAR in setrefs.c, but there is no
3167 * other redirection.
3168 */
3169 econtext->ecxt_scantuple = existing;
3170 econtext->ecxt_innertuple = excludedSlot;
3171 econtext->ecxt_outertuple = NULL;
3172
3173 if (!ExecQual(onConflictSetWhere, econtext))
3174 {
3175 ExecClearTuple(existing); /* see return below */
3176 InstrCountFiltered1(&mtstate->ps, 1);
3177 return true; /* done with the tuple */
3178 }
3179
3180 if (resultRelInfo->ri_WithCheckOptions != NIL)
3181 {
3182 /*
3183 * Check target's existing tuple against UPDATE-applicable USING
3184 * security barrier quals (if any), enforced here as RLS checks/WCOs.
3185 *
3186 * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security
3187 * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK.
3188 * Since SELECT permission on the target table is always required for
3189 * INSERT ... ON CONFLICT DO UPDATE, the rewriter also adds SELECT RLS
3190 * checks/WCOs for SELECT security quals, using WCOs of the same kind,
3191 * and this check enforces them too.
3192 *
3193 * The rewriter will also have associated UPDATE-applicable straight
3194 * RLS checks/WCOs for the benefit of the ExecUpdate() call that
3195 * follows. INSERTs and UPDATEs naturally have mutually exclusive WCO
3196 * kinds, so there is no danger of spurious over-enforcement in the
3197 * INSERT or UPDATE path.
3198 */
3200 existing,
3201 mtstate->ps.state);
3202 }
3203
3204 /* Project the new tuple version */
3205 ExecProject(resultRelInfo->ri_onConflict->oc_ProjInfo);
3206
3207 /*
3208 * Note that it is possible that the target tuple has been modified in
3209 * this session, after the above table_tuple_lock. We choose to not error
3210 * out in that case, in line with ExecUpdate's treatment of similar cases.
3211 * This can happen if an UPDATE is triggered from within ExecQual(),
3212 * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
3213 * wCTE in the ON CONFLICT's SET.
3214 */
3215
3216 /* Execute UPDATE with projection */
3217 *returning = ExecUpdate(context, resultRelInfo,
3219 resultRelInfo->ri_onConflict->oc_ProjSlot,
3220 canSetTag);
3221
3222 /*
3223 * Clear out existing tuple, as there might not be another conflict among
3224 * the next input rows. Don't want to hold resources till the end of the
3225 * query. First though, make sure that the returning slot, if any, has a
3226 * local copy of any OLD pass-by-reference values, if it refers to any OLD
3227 * columns.
3228 */
3229 if (*returning != NULL &&
3231 ExecMaterializeSlot(*returning);
3232
3234
3235 return true;
3236}
3237
3238/*
3239 * ExecOnConflictSelect --- execute SELECT of INSERT ON CONFLICT DO SELECT
3240 *
3241 * If SELECT FOR UPDATE/SHARE is specified, try to lock tuple as part of
3242 * speculative insertion. If a qual originating from ON CONFLICT DO SELECT is
3243 * satisfied, select (but still lock row, even though it may not satisfy
3244 * estate's snapshot).
3245 *
3246 * Returns true if we're done (with or without a select), or false if the
3247 * caller must retry the INSERT from scratch.
3248 */
3249static bool
3251 ResultRelInfo *resultRelInfo,
3254 bool canSetTag,
3255 TupleTableSlot **returning)
3256{
3257 ModifyTableState *mtstate = context->mtstate;
3258 ExprContext *econtext = mtstate->ps.ps_ExprContext;
3259 Relation relation = resultRelInfo->ri_RelationDesc;
3262 LockClauseStrength lockStrength = resultRelInfo->ri_onConflict->oc_LockStrength;
3263
3264 /*
3265 * Parse analysis should have blocked ON CONFLICT for all system
3266 * relations, which includes these. There's no fundamental obstacle to
3267 * supporting this; we'd just need to handle LOCKTAG_TUPLE appropriately.
3268 */
3269 Assert(!resultRelInfo->ri_needLockTagTuple);
3270
3271 /* Fetch/lock existing tuple, according to the requested lock strength */
3272 if (lockStrength == LCS_NONE)
3273 {
3274 if (!table_tuple_fetch_row_version(relation,
3277 existing))
3278 elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
3279 }
3280 else
3281 {
3282 LockTupleMode lockmode;
3283
3284 switch (lockStrength)
3285 {
3286 case LCS_FORKEYSHARE:
3287 lockmode = LockTupleKeyShare;
3288 break;
3289 case LCS_FORSHARE:
3290 lockmode = LockTupleShare;
3291 break;
3292 case LCS_FORNOKEYUPDATE:
3293 lockmode = LockTupleNoKeyExclusive;
3294 break;
3295 case LCS_FORUPDATE:
3296 lockmode = LockTupleExclusive;
3297 break;
3298 default:
3299 elog(ERROR, "Unexpected lock strength %d", (int) lockStrength);
3300 }
3301
3303 resultRelInfo->ri_RelationDesc, lockmode, false))
3304 return false;
3305 }
3306
3307 /*
3308 * Verify that the tuple is visible to our MVCC snapshot if the current
3309 * isolation level mandates that. See comments in ExecOnConflictUpdate().
3310 */
3311 ExecCheckTupleVisible(context->estate, relation, existing);
3312
3313 /*
3314 * Make tuple and any needed join variables available to ExecQual. The
3315 * EXCLUDED tuple is installed in ecxt_innertuple, while the target's
3316 * existing tuple is installed in the scantuple. EXCLUDED has been made
3317 * to reference INNER_VAR in setrefs.c, but there is no other redirection.
3318 */
3319 econtext->ecxt_scantuple = existing;
3320 econtext->ecxt_innertuple = excludedSlot;
3321 econtext->ecxt_outertuple = NULL;
3322
3323 if (!ExecQual(onConflictSelectWhere, econtext))
3324 {
3325 ExecClearTuple(existing); /* see return below */
3326 InstrCountFiltered1(&mtstate->ps, 1);
3327 return true; /* done with the tuple */
3328 }
3329
3330 if (resultRelInfo->ri_WithCheckOptions != NIL)
3331 {
3332 /*
3333 * Check target's existing tuple against SELECT-applicable USING
3334 * security barrier quals (if any), enforced here as RLS checks/WCOs.
3335 *
3336 * The rewriter creates WCOs from the USING quals of SELECT policies,
3337 * and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK. If FOR
3338 * UPDATE/SHARE was specified, UPDATE permissions are required on the
3339 * target table, and the rewriter also adds WCOs built from the USING
3340 * quals of UPDATE policies, using WCOs of the same kind, and this
3341 * check enforces them too.
3342 */
3344 existing,
3345 mtstate->ps.state);
3346 }
3347
3348 /* RETURNING is required for DO SELECT */
3349 Assert(resultRelInfo->ri_projectReturning);
3350
3351 *returning = ExecProcessReturning(context, resultRelInfo, false,
3352 existing, existing, context->planSlot);
3353
3354 if (canSetTag)
3355 context->estate->es_processed++;
3356
3357 /*
3358 * Before releasing the existing tuple, make sure that the returning slot
3359 * has a local copy of any pass-by-reference values.
3360 */
3361 ExecMaterializeSlot(*returning);
3362
3363 /*
3364 * Clear out existing tuple, as there might not be another conflict among
3365 * the next input rows. Don't want to hold resources till the end of the
3366 * query.
3367 */
3369
3370 return true;
3371}
3372
3373/*
3374 * Perform MERGE.
3375 */
3376static TupleTableSlot *
3378 ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag)
3379{
3381 bool matched;
3382
3383 /*-----
3384 * If we are dealing with a WHEN MATCHED case, tupleid or oldtuple is
3385 * valid, depending on whether the result relation is a table or a view.
3386 * We execute the first action for which the additional WHEN MATCHED AND
3387 * quals pass. If an action without quals is found, that action is
3388 * executed.
3389 *
3390 * Similarly, in the WHEN NOT MATCHED BY SOURCE case, tupleid or oldtuple
3391 * is valid, and we look at the given WHEN NOT MATCHED BY SOURCE actions
3392 * in sequence until one passes. This is almost identical to the WHEN
3393 * MATCHED case, and both cases are handled by ExecMergeMatched().
3394 *
3395 * Finally, in the WHEN NOT MATCHED [BY TARGET] case, both tupleid and
3396 * oldtuple are invalid, and we look at the given WHEN NOT MATCHED [BY
3397 * TARGET] actions in sequence until one passes.
3398 *
3399 * Things get interesting in case of concurrent update/delete of the
3400 * target tuple. Such concurrent update/delete is detected while we are
3401 * executing a WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action.
3402 *
3403 * A concurrent update can:
3404 *
3405 * 1. modify the target tuple so that the results from checking any
3406 * additional quals attached to WHEN MATCHED or WHEN NOT MATCHED BY
3407 * SOURCE actions potentially change, but the result from the join
3408 * quals does not change.
3409 *
3410 * In this case, we are still dealing with the same kind of match
3411 * (MATCHED or NOT MATCHED BY SOURCE). We recheck the same list of
3412 * actions from the start and choose the first one that satisfies the
3413 * new target tuple.
3414 *
3415 * 2. modify the target tuple in the WHEN MATCHED case so that the join
3416 * quals no longer pass and hence the source and target tuples no
3417 * longer match.
3418 *
3419 * In this case, we are now dealing with a NOT MATCHED case, and we
3420 * process both WHEN NOT MATCHED BY SOURCE and WHEN NOT MATCHED [BY
3421 * TARGET] actions. First ExecMergeMatched() processes the list of
3422 * WHEN NOT MATCHED BY SOURCE actions in sequence until one passes,
3423 * then ExecMergeNotMatched() processes any WHEN NOT MATCHED [BY
3424 * TARGET] actions in sequence until one passes. Thus we may execute
3425 * two actions; one of each kind.
3426 *
3427 * Thus we support concurrent updates that turn MATCHED candidate rows
3428 * into NOT MATCHED rows. However, we do not attempt to support cases
3429 * that would turn NOT MATCHED rows into MATCHED rows, or which would
3430 * cause a target row to match a different source row.
3431 *
3432 * A concurrent delete changes a WHEN MATCHED case to WHEN NOT MATCHED
3433 * [BY TARGET].
3434 *
3435 * ExecMergeMatched() takes care of following the update chain and
3436 * re-finding the qualifying WHEN MATCHED or WHEN NOT MATCHED BY SOURCE
3437 * action, as long as the target tuple still exists. If the target tuple
3438 * gets deleted or a concurrent update causes the join quals to fail, it
3439 * returns a matched status of false and we call ExecMergeNotMatched().
3440 * Given that ExecMergeMatched() always makes progress by following the
3441 * update chain and we never switch from ExecMergeNotMatched() to
3442 * ExecMergeMatched(), there is no risk of a livelock.
3443 */
3444 matched = tupleid != NULL || oldtuple != NULL;
3445 if (matched)
3446 rslot = ExecMergeMatched(context, resultRelInfo, tupleid, oldtuple,
3447 canSetTag, &matched);
3448
3449 /*
3450 * Deal with the NOT MATCHED case (either a NOT MATCHED tuple from the
3451 * join, or a previously MATCHED tuple for which ExecMergeMatched() set
3452 * "matched" to false, indicating that it no longer matches).
3453 */
3454 if (!matched)
3455 {
3456 /*
3457 * If a concurrent update turned a MATCHED case into a NOT MATCHED
3458 * case, and we have both WHEN NOT MATCHED BY SOURCE and WHEN NOT
3459 * MATCHED [BY TARGET] actions, and there is a RETURNING clause,
3460 * ExecMergeMatched() may have already executed a WHEN NOT MATCHED BY
3461 * SOURCE action, and computed the row to return. If so, we cannot
3462 * execute a WHEN NOT MATCHED [BY TARGET] action now, so mark it as
3463 * pending (to be processed on the next call to ExecModifyTable()).
3464 * Otherwise, just process the action now.
3465 */
3466 if (rslot == NULL)
3467 rslot = ExecMergeNotMatched(context, resultRelInfo, canSetTag);
3468 else
3469 context->mtstate->mt_merge_pending_not_matched = context->planSlot;
3470 }
3471
3472 return rslot;
3473}
3474
3475/*
3476 * Check and execute the first qualifying MATCHED or NOT MATCHED BY SOURCE
3477 * action, depending on whether the join quals are satisfied. If the target
3478 * relation is a table, the current target tuple is identified by tupleid.
3479 * Otherwise, if the target relation is a view, oldtuple is the current target
3480 * tuple from the view.
3481 *
3482 * We start from the first WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action
3483 * and check if the WHEN quals pass, if any. If the WHEN quals for the first
3484 * action do not pass, we check the second, then the third and so on. If we
3485 * reach the end without finding a qualifying action, we return NULL.
3486 * Otherwise, we execute the qualifying action and return its RETURNING
3487 * result, if any, or NULL.
3488 *
3489 * On entry, "*matched" is assumed to be true. If a concurrent update or
3490 * delete is detected that causes the join quals to no longer pass, we set it
3491 * to false, indicating that the caller should process any NOT MATCHED [BY
3492 * TARGET] actions.
3493 *
3494 * After a concurrent update, we restart from the first action to look for a
3495 * new qualifying action to execute. If the join quals originally passed, and
3496 * the concurrent update caused them to no longer pass, then we switch from
3497 * the MATCHED to the NOT MATCHED BY SOURCE list of actions before restarting
3498 * (and setting "*matched" to false). As a result we may execute a WHEN NOT
3499 * MATCHED BY SOURCE action, and set "*matched" to false, causing the caller
3500 * to also execute a WHEN NOT MATCHED [BY TARGET] action.
3501 */
3502static TupleTableSlot *
3504 ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag,
3505 bool *matched)
3506{
3507 ModifyTableState *mtstate = context->mtstate;
3508 List **mergeActions = resultRelInfo->ri_MergeActions;
3513 EState *estate = context->estate;
3514 ExprContext *econtext = mtstate->ps.ps_ExprContext;
3515 bool isNull;
3516 EPQState *epqstate = &mtstate->mt_epqstate;
3517 ListCell *l;
3518
3519 /* Expect matched to be true on entry */
3520 Assert(*matched);
3521
3522 /*
3523 * If there are no WHEN MATCHED or WHEN NOT MATCHED BY SOURCE actions, we
3524 * are done.
3525 */
3528 return NULL;
3529
3530 /*
3531 * Make tuple and any needed join variables available to ExecQual and
3532 * ExecProject. The target's existing tuple is installed in the scantuple.
3533 * This target relation's slot is required only in the case of a MATCHED
3534 * or NOT MATCHED BY SOURCE tuple and UPDATE/DELETE actions.
3535 */
3536 econtext->ecxt_scantuple = resultRelInfo->ri_oldTupleSlot;
3537 econtext->ecxt_innertuple = context->planSlot;
3538 econtext->ecxt_outertuple = NULL;
3539
3540 /*
3541 * This routine is only invoked for matched target rows, so we should
3542 * either have the tupleid of the target row, or an old tuple from the
3543 * target wholerow junk attr.
3544 */
3545 Assert(tupleid != NULL || oldtuple != NULL);
3547 if (oldtuple != NULL)
3548 {
3549 Assert(!resultRelInfo->ri_needLockTagTuple);
3550 ExecForceStoreHeapTuple(oldtuple, resultRelInfo->ri_oldTupleSlot,
3551 false);
3552 }
3553 else
3554 {
3555 if (resultRelInfo->ri_needLockTagTuple)
3556 {
3557 /*
3558 * This locks even for CMD_DELETE, for CMD_NOTHING, and for tuples
3559 * that don't match mas_whenqual. MERGE on system catalogs is a
3560 * minor use case, so don't bother optimizing those.
3561 */
3562 LockTuple(resultRelInfo->ri_RelationDesc, tupleid,
3564 lockedtid = *tupleid;
3565 }
3567 tupleid,
3569 resultRelInfo->ri_oldTupleSlot))
3570 elog(ERROR, "failed to fetch the target tuple");
3571 }
3572
3573 /*
3574 * Test the join condition. If it's satisfied, perform a MATCHED action.
3575 * Otherwise, perform a NOT MATCHED BY SOURCE action.
3576 *
3577 * Note that this join condition will be NULL if there are no NOT MATCHED
3578 * BY SOURCE actions --- see transform_MERGE_to_join(). In that case, we
3579 * need only consider MATCHED actions here.
3580 */
3581 if (ExecQual(resultRelInfo->ri_MergeJoinCondition, econtext))
3583 else
3585
3587
3588 foreach(l, actionStates)
3589 {
3591 CmdType commandType = relaction->mas_action->commandType;
3594
3595 /*
3596 * Test condition, if any.
3597 *
3598 * In the absence of any condition, we perform the action
3599 * unconditionally (no need to check separately since ExecQual() will
3600 * return true if there are no conditions to evaluate).
3601 */
3602 if (!ExecQual(relaction->mas_whenqual, econtext))
3603 continue;
3604
3605 /*
3606 * Check if the existing target tuple meets the USING checks of
3607 * UPDATE/DELETE RLS policies. If those checks fail, we throw an
3608 * error.
3609 *
3610 * The WITH CHECK quals for UPDATE RLS policies are applied in
3611 * ExecUpdateAct() and hence we need not do anything special to handle
3612 * them.
3613 *
3614 * NOTE: We must do this after WHEN quals are evaluated, so that we
3615 * check policies only when they matter.
3616 */
3617 if (resultRelInfo->ri_WithCheckOptions && commandType != CMD_NOTHING)
3618 {
3619 ExecWithCheckOptions(commandType == CMD_UPDATE ?
3621 resultRelInfo,
3622 resultRelInfo->ri_oldTupleSlot,
3623 context->mtstate->ps.state);
3624 }
3625
3626 /* Perform stated action */
3627 switch (commandType)
3628 {
3629 case CMD_UPDATE:
3630
3631 /*
3632 * Project the output tuple, and use that to update the table.
3633 * We don't need to filter out junk attributes, because the
3634 * UPDATE action's targetlist doesn't have any.
3635 */
3636 newslot = ExecProject(relaction->mas_proj);
3637
3638 mtstate->mt_merge_action = relaction;
3639 if (!ExecUpdatePrologue(context, resultRelInfo,
3641 {
3642 if (result == TM_Ok)
3643 goto out; /* "do nothing" */
3644
3645 break; /* concurrent update/delete */
3646 }
3647
3648 /* INSTEAD OF ROW UPDATE Triggers */
3649 if (resultRelInfo->ri_TrigDesc &&
3650 resultRelInfo->ri_TrigDesc->trig_update_instead_row)
3651 {
3652 if (!ExecIRUpdateTriggers(estate, resultRelInfo,
3653 oldtuple, newslot))
3654 goto out; /* "do nothing" */
3655 }
3656 else
3657 {
3658 /* checked ri_needLockTagTuple above */
3659 Assert(oldtuple == NULL);
3660
3661 result = ExecUpdateAct(context, resultRelInfo, tupleid,
3662 NULL, newslot, canSetTag,
3663 &updateCxt);
3664
3665 /*
3666 * As in ExecUpdate(), if ExecUpdateAct() reports that a
3667 * cross-partition update was done, then there's nothing
3668 * else for us to do --- the UPDATE has been turned into a
3669 * DELETE and an INSERT, and we must not perform any of
3670 * the usual post-update tasks. Also, the RETURNING tuple
3671 * (if any) has been projected, so we can just return
3672 * that.
3673 */
3674 if (updateCxt.crossPartUpdate)
3675 {
3676 mtstate->mt_merge_updated += 1;
3677 rslot = context->cpUpdateReturningSlot;
3678 goto out;
3679 }
3680 }
3681
3682 if (result == TM_Ok)
3683 {
3684 ExecUpdateEpilogue(context, &updateCxt, resultRelInfo,
3685 tupleid, NULL, newslot);
3686 mtstate->mt_merge_updated += 1;
3687 }
3688 break;
3689
3690 case CMD_DELETE:
3691 mtstate->mt_merge_action = relaction;
3692 if (!ExecDeletePrologue(context, resultRelInfo, tupleid,
3693 NULL, NULL, &result))
3694 {
3695 if (result == TM_Ok)
3696 goto out; /* "do nothing" */
3697
3698 break; /* concurrent update/delete */
3699 }
3700
3701 /* INSTEAD OF ROW DELETE Triggers */
3702 if (resultRelInfo->ri_TrigDesc &&
3703 resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
3704 {
3705 if (!ExecIRDeleteTriggers(estate, resultRelInfo,
3706 oldtuple))
3707 goto out; /* "do nothing" */
3708 }
3709 else
3710 {
3711 /* checked ri_needLockTagTuple above */
3712 Assert(oldtuple == NULL);
3713
3714 result = ExecDeleteAct(context, resultRelInfo, tupleid,
3715 false);
3716 }
3717
3718 if (result == TM_Ok)
3719 {
3720 ExecDeleteEpilogue(context, resultRelInfo, tupleid, NULL,
3721 false);
3722 mtstate->mt_merge_deleted += 1;
3723 }
3724 break;
3725
3726 case CMD_NOTHING:
3727 /* Doing nothing is always OK */
3728 result = TM_Ok;
3729 break;
3730
3731 default:
3732 elog(ERROR, "unknown action in MERGE WHEN clause");
3733 }
3734
3735 switch (result)
3736 {
3737 case TM_Ok:
3738 /* all good; perform final actions */
3739 if (canSetTag && commandType != CMD_NOTHING)
3740 (estate->es_processed)++;
3741
3742 break;
3743
3744 case TM_SelfModified:
3745
3746 /*
3747 * The target tuple was already updated or deleted by the
3748 * current command, or by a later command in the current
3749 * transaction. The former case is explicitly disallowed by
3750 * the SQL standard for MERGE, which insists that the MERGE
3751 * join condition should not join a target row to more than
3752 * one source row.
3753 *
3754 * The latter case arises if the tuple is modified by a
3755 * command in a BEFORE trigger, or perhaps by a command in a
3756 * volatile function used in the query. In such situations we
3757 * should not ignore the MERGE action, but it is equally
3758 * unsafe to proceed. We don't want to discard the original
3759 * MERGE action while keeping the triggered actions based on
3760 * it; and it would be no better to allow the original MERGE
3761 * action while discarding the updates that it triggered. So
3762 * throwing an error is the only safe course.
3763 */
3764 if (context->tmfd.cmax != estate->es_output_cid)
3765 ereport(ERROR,
3767 errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3768 errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3769
3771 ereport(ERROR,
3773 /* translator: %s is a SQL command name */
3774 errmsg("%s command cannot affect row a second time",
3775 "MERGE"),
3776 errhint("Ensure that not more than one source row matches any one target row.")));
3777
3778 /* This shouldn't happen */
3779 elog(ERROR, "attempted to update or delete invisible tuple");
3780 break;
3781
3782 case TM_Deleted:
3784 ereport(ERROR,
3786 errmsg("could not serialize access due to concurrent delete")));
3787
3788 /*
3789 * If the tuple was already deleted, set matched to false to
3790 * let caller handle it under NOT MATCHED [BY TARGET] clauses.
3791 */
3792 *matched = false;
3793 goto out;
3794
3795 case TM_Updated:
3796 {
3797 bool was_matched;
3800 *inputslot;
3801 LockTupleMode lockmode;
3802
3804 ereport(ERROR,
3806 errmsg("could not serialize access due to concurrent update")));
3807
3808 /*
3809 * The target tuple was concurrently updated by some other
3810 * transaction. If we are currently processing a MATCHED
3811 * action, use EvalPlanQual() with the new version of the
3812 * tuple and recheck the join qual, to detect a change
3813 * from the MATCHED to the NOT MATCHED cases. If we are
3814 * already processing a NOT MATCHED BY SOURCE action, we
3815 * skip this (cannot switch from NOT MATCHED BY SOURCE to
3816 * MATCHED).
3817 */
3818 was_matched = relaction->mas_action->matchKind == MERGE_WHEN_MATCHED;
3819 resultRelationDesc = resultRelInfo->ri_RelationDesc;
3820 lockmode = ExecUpdateLockMode(estate, resultRelInfo);
3821
3822 if (was_matched)
3823 inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
3824 resultRelInfo->ri_RangeTableIndex);
3825 else
3826 inputslot = resultRelInfo->ri_oldTupleSlot;
3827
3829 estate->es_snapshot,
3830 inputslot, estate->es_output_cid,
3831 lockmode, LockWaitBlock,
3833 &context->tmfd);
3834 switch (result)
3835 {
3836 case TM_Ok:
3837
3838 /*
3839 * If the tuple was updated and migrated to
3840 * another partition concurrently, the current
3841 * MERGE implementation can't follow. There's
3842 * probably a better way to handle this case, but
3843 * it'd require recognizing the relation to which
3844 * the tuple moved, and setting our current
3845 * resultRelInfo to that.
3846 */
3848 ereport(ERROR,
3850 errmsg("tuple to be merged was already moved to another partition due to concurrent update")));
3851
3852 /*
3853 * If this was a MATCHED case, use EvalPlanQual()
3854 * to recheck the join condition.
3855 */
3856 if (was_matched)
3857 {
3858 epqslot = EvalPlanQual(epqstate,
3860 resultRelInfo->ri_RangeTableIndex,
3861 inputslot);
3862
3863 /*
3864 * If the subplan didn't return a tuple, then
3865 * we must be dealing with an inner join for
3866 * which the join condition no longer matches.
3867 * This can only happen if there are no NOT
3868 * MATCHED actions, and so there is nothing
3869 * more to do.
3870 */
3871 if (TupIsNull(epqslot))
3872 goto out;
3873
3874 /*
3875 * If we got a NULL ctid from the subplan, the
3876 * join quals no longer pass and we switch to
3877 * the NOT MATCHED BY SOURCE case.
3878 */
3880 resultRelInfo->ri_RowIdAttNo,
3881 &isNull);
3882 if (isNull)
3883 *matched = false;
3884
3885 /*
3886 * Otherwise, recheck the join quals to see if
3887 * we need to switch to the NOT MATCHED BY
3888 * SOURCE case.
3889 */
3890 if (resultRelInfo->ri_needLockTagTuple)
3891 {
3893 UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid,
3895 LockTuple(resultRelInfo->ri_RelationDesc, tupleid,
3897 lockedtid = *tupleid;
3898 }
3899
3901 tupleid,
3903 resultRelInfo->ri_oldTupleSlot))
3904 elog(ERROR, "failed to fetch the target tuple");
3905
3906 if (*matched)
3907 *matched = ExecQual(resultRelInfo->ri_MergeJoinCondition,
3908 econtext);
3909
3910 /* Switch lists, if necessary */
3911 if (!*matched)
3912 {
3914
3915 /*
3916 * If we have both NOT MATCHED BY SOURCE
3917 * and NOT MATCHED BY TARGET actions (a
3918 * full join between the source and target
3919 * relations), the single previously
3920 * matched tuple from the outer plan node
3921 * is treated as two not matched tuples,
3922 * in the same way as if they had not
3923 * matched to start with. Therefore, we
3924 * must adjust the outer plan node's tuple
3925 * count, if we're instrumenting the
3926 * query, to get the correct "skipped" row
3927 * count --- see show_modifytable_info().
3928 */
3929 if (outerPlanState(mtstate)->instrument &&
3932 InstrUpdateTupleCount(outerPlanState(mtstate)->instrument, 1.0);
3933 }
3934 }
3935
3936 /*
3937 * Loop back and process the MATCHED or NOT
3938 * MATCHED BY SOURCE actions from the start.
3939 */
3940 goto lmerge_matched;
3941
3942 case TM_Deleted:
3943
3944 /*
3945 * tuple already deleted; tell caller to run NOT
3946 * MATCHED [BY TARGET] actions
3947 */
3948 *matched = false;
3949 goto out;
3950
3951 case TM_SelfModified:
3952
3953 /*
3954 * This can be reached when following an update
3955 * chain from a tuple updated by another session,
3956 * reaching a tuple that was already updated or
3957 * deleted by the current command, or by a later
3958 * command in the current transaction. As above,
3959 * this should always be treated as an error.
3960 */
3961 if (context->tmfd.cmax != estate->es_output_cid)
3962 ereport(ERROR,
3964 errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3965 errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3966
3968 ereport(ERROR,
3970 /* translator: %s is a SQL command name */
3971 errmsg("%s command cannot affect row a second time",
3972 "MERGE"),
3973 errhint("Ensure that not more than one source row matches any one target row.")));
3974
3975 /* This shouldn't happen */
3976 elog(ERROR, "attempted to update or delete invisible tuple");
3977 goto out;
3978
3979 default:
3980 /* see table_tuple_lock call in ExecDelete() */
3981 elog(ERROR, "unexpected table_tuple_lock status: %u",
3982 result);
3983 goto out;
3984 }
3985 }
3986
3987 case TM_Invisible:
3988 case TM_WouldBlock:
3989 case TM_BeingModified:
3990 /* these should not occur */
3991 elog(ERROR, "unexpected tuple operation result: %d", result);
3992 break;
3993 }
3994
3995 /* Process RETURNING if present */
3996 if (resultRelInfo->ri_projectReturning)
3997 {
3998 switch (commandType)
3999 {
4000 case CMD_UPDATE:
4001 rslot = ExecProcessReturning(context,
4002 resultRelInfo,
4003 false,
4004 resultRelInfo->ri_oldTupleSlot,
4005 newslot,
4006 context->planSlot);
4007 break;
4008
4009 case CMD_DELETE:
4010 rslot = ExecProcessReturning(context,
4011 resultRelInfo,
4012 true,
4013 resultRelInfo->ri_oldTupleSlot,
4014 NULL,
4015 context->planSlot);
4016 break;
4017
4018 case CMD_NOTHING:
4019 break;
4020
4021 default:
4022 elog(ERROR, "unrecognized commandType: %d",
4023 (int) commandType);
4024 }
4025 }
4026
4027 /*
4028 * We've activated one of the WHEN clauses, so we don't search
4029 * further. This is required behaviour, not an optimization.
4030 */
4031 break;
4032 }
4033
4034 /*
4035 * Successfully executed an action or no qualifying action was found.
4036 */
4037out:
4039 UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid,
4041 return rslot;
4042}
4043
4044/*
4045 * Execute the first qualifying NOT MATCHED [BY TARGET] action.
4046 */
4047static TupleTableSlot *
4049 bool canSetTag)
4050{
4051 ModifyTableState *mtstate = context->mtstate;
4052 ExprContext *econtext = mtstate->ps.ps_ExprContext;
4055 ListCell *l;
4056
4057 /*
4058 * For INSERT actions, the root relation's merge action is OK since the
4059 * INSERT's targetlist and the WHEN conditions can only refer to the
4060 * source relation and hence it does not matter which result relation we
4061 * work with.
4062 *
4063 * XXX does this mean that we can avoid creating copies of actionStates on
4064 * partitioned tables, for not-matched actions?
4065 */
4067
4068 /*
4069 * Make source tuple available to ExecQual and ExecProject. We don't need
4070 * the target tuple, since the WHEN quals and targetlist can't refer to
4071 * the target columns.
4072 */
4073 econtext->ecxt_scantuple = NULL;
4074 econtext->ecxt_innertuple = context->planSlot;
4075 econtext->ecxt_outertuple = NULL;
4076
4077 foreach(l, actionStates)
4078 {
4079 MergeActionState *action = (MergeActionState *) lfirst(l);
4080 CmdType commandType = action->mas_action->commandType;
4082
4083 /*
4084 * Test condition, if any.
4085 *
4086 * In the absence of any condition, we perform the action
4087 * unconditionally (no need to check separately since ExecQual() will
4088 * return true if there are no conditions to evaluate).
4089 */
4090 if (!ExecQual(action->mas_whenqual, econtext))
4091 continue;
4092
4093 /* Perform stated action */
4094 switch (commandType)
4095 {
4096 case CMD_INSERT:
4097
4098 /*
4099 * Project the tuple. In case of a partitioned table, the
4100 * projection was already built to use the root's descriptor,
4101 * so we don't need to map the tuple here.
4102 */
4103 newslot = ExecProject(action->mas_proj);
4104 mtstate->mt_merge_action = action;
4105
4106 rslot = ExecInsert(context, mtstate->rootResultRelInfo,
4107 newslot, canSetTag, NULL, NULL);
4108 mtstate->mt_merge_inserted += 1;
4109 break;
4110 case CMD_NOTHING:
4111 /* Do nothing */
4112 break;
4113 default:
4114 elog(ERROR, "unknown action in MERGE WHEN NOT MATCHED clause");
4115 }
4116
4117 /*
4118 * We've activated one of the WHEN clauses, so we don't search
4119 * further. This is required behaviour, not an optimization.
4120 */
4121 break;
4122 }
4123
4124 return rslot;
4125}
4126
4127/*
4128 * Initialize state for execution of MERGE.
4129 */
4130void
4132{
4133 List *mergeActionLists = mtstate->mt_mergeActionLists;
4134 List *mergeJoinConditions = mtstate->mt_mergeJoinConditions;
4136 ResultRelInfo *resultRelInfo;
4137 ExprContext *econtext;
4138 ListCell *lc;
4139 int i;
4140
4141 if (mergeActionLists == NIL)
4142 return;
4143
4144 mtstate->mt_merge_subcommands = 0;
4145
4146 if (mtstate->ps.ps_ExprContext == NULL)
4147 ExecAssignExprContext(estate, &mtstate->ps);
4148 econtext = mtstate->ps.ps_ExprContext;
4149
4150 /*
4151 * Create a MergeActionState for each action on the mergeActionList and
4152 * add it to either a list of matched actions or not-matched actions.
4153 *
4154 * Similar logic appears in ExecInitPartitionInfo(), so if changing
4155 * anything here, do so there too.
4156 */
4157 i = 0;
4158 foreach(lc, mergeActionLists)
4159 {
4160 List *mergeActionList = lfirst(lc);
4161 Node *joinCondition;
4163 ListCell *l;
4164
4165 joinCondition = (Node *) list_nth(mergeJoinConditions, i);
4166 resultRelInfo = mtstate->resultRelInfo + i;
4167 i++;
4169
4170 /* initialize slots for MERGE fetches from this rel */
4171 if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4172 ExecInitMergeTupleSlots(mtstate, resultRelInfo);
4173
4174 /* initialize state for join condition checking */
4175 resultRelInfo->ri_MergeJoinCondition =
4176 ExecInitQual((List *) joinCondition, &mtstate->ps);
4177
4178 foreach(l, mergeActionList)
4179 {
4180 MergeAction *action = (MergeAction *) lfirst(l);
4181 MergeActionState *action_state;
4184
4185 /*
4186 * Build action merge state for this rel. (For partitions,
4187 * equivalent code exists in ExecInitPartitionInfo.)
4188 */
4189 action_state = makeNode(MergeActionState);
4190 action_state->mas_action = action;
4191 action_state->mas_whenqual = ExecInitQual((List *) action->qual,
4192 &mtstate->ps);
4193
4194 /*
4195 * We create three lists - one for each MergeMatchKind - and stick
4196 * the MergeActionState into the appropriate list.
4197 */
4198 resultRelInfo->ri_MergeActions[action->matchKind] =
4199 lappend(resultRelInfo->ri_MergeActions[action->matchKind],
4200 action_state);
4201
4202 switch (action->commandType)
4203 {
4204 case CMD_INSERT:
4205 /* INSERT actions always use rootRelInfo */
4206 ExecCheckPlanOutput(rootRelInfo->ri_RelationDesc,
4207 action->targetList);
4208
4209 /*
4210 * If the MERGE targets a partitioned table, any INSERT
4211 * actions must be routed through it, not the child
4212 * relations. Initialize the routing struct and the root
4213 * table's "new" tuple slot for that, if not already done.
4214 * The projection we prepare, for all relations, uses the
4215 * root relation descriptor, and targets the plan's root
4216 * slot. (This is consistent with the fact that we
4217 * checked the plan output to match the root relation,
4218 * above.)
4219 */
4220 if (rootRelInfo->ri_RelationDesc->rd_rel->relkind ==
4222 {
4223 if (mtstate->mt_partition_tuple_routing == NULL)
4224 {
4225 /*
4226 * Initialize planstate for routing if not already
4227 * done.
4228 *
4229 * Note that the slot is managed as a standalone
4230 * slot belonging to ModifyTableState, so we pass
4231 * NULL for the 2nd argument.
4232 */
4233 mtstate->mt_root_tuple_slot =
4234 table_slot_create(rootRelInfo->ri_RelationDesc,
4235 NULL);
4238 rootRelInfo->ri_RelationDesc);
4239 }
4240 tgtslot = mtstate->mt_root_tuple_slot;
4241 tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
4242 }
4243 else
4244 {
4245 /*
4246 * If the MERGE targets an inherited table, we insert
4247 * into the root table, so we must initialize its
4248 * "new" tuple slot, if not already done, and use its
4249 * relation descriptor for the projection.
4250 *
4251 * For non-inherited tables, rootRelInfo and
4252 * resultRelInfo are the same, and the "new" tuple
4253 * slot will already have been initialized.
4254 */
4255 if (rootRelInfo->ri_newTupleSlot == NULL)
4256 rootRelInfo->ri_newTupleSlot =
4257 table_slot_create(rootRelInfo->ri_RelationDesc,
4258 &estate->es_tupleTable);
4259
4260 tgtslot = rootRelInfo->ri_newTupleSlot;
4261 tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
4262 }
4263
4264 action_state->mas_proj =
4265 ExecBuildProjectionInfo(action->targetList, econtext,
4266 tgtslot,
4267 &mtstate->ps,
4268 tgtdesc);
4269
4271 break;
4272 case CMD_UPDATE:
4273 action_state->mas_proj =
4274 ExecBuildUpdateProjection(action->targetList,
4275 true,
4276 action->updateColnos,
4278 econtext,
4279 resultRelInfo->ri_newTupleSlot,
4280 &mtstate->ps);
4282 break;
4283 case CMD_DELETE:
4285 break;
4286 case CMD_NOTHING:
4287 break;
4288 default:
4289 elog(ERROR, "unknown action in MERGE WHEN clause");
4290 break;
4291 }
4292 }
4293 }
4294
4295 /*
4296 * If the MERGE targets an inherited table, any INSERT actions will use
4297 * rootRelInfo, and rootRelInfo will not be in the resultRelInfo array.
4298 * Therefore we must initialize its WITH CHECK OPTION constraints and
4299 * RETURNING projection, as ExecInitModifyTable did for the resultRelInfo
4300 * entries.
4301 *
4302 * Note that the planner does not build a withCheckOptionList or
4303 * returningList for the root relation, but as in ExecInitPartitionInfo,
4304 * we can use the first resultRelInfo entry as a reference to calculate
4305 * the attno's for the root table.
4306 */
4307 if (rootRelInfo != mtstate->resultRelInfo &&
4309 (mtstate->mt_merge_subcommands & MERGE_INSERT) != 0)
4310 {
4311 ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
4312 Relation rootRelation = rootRelInfo->ri_RelationDesc;
4316 bool found_whole_row;
4317
4318 if (node->withCheckOptionLists != NIL)
4319 {
4320 List *wcoList;
4321 List *wcoExprs = NIL;
4322
4323 /* There should be as many WCO lists as result rels */
4326
4327 /*
4328 * Use the first WCO list as a reference. In the most common case,
4329 * this will be for the same relation as rootRelInfo, and so there
4330 * will be no need to adjust its attno's.
4331 */
4333 if (rootRelation != firstResultRel)
4334 {
4335 /* Convert any Vars in it to contain the root's attno's */
4336 part_attmap =
4339 false);
4340
4341 wcoList = (List *)
4343 firstVarno, 0,
4345 RelationGetForm(rootRelation)->reltype,
4346 &found_whole_row);
4347 }
4348
4349 foreach(lc, wcoList)
4350 {
4353 &mtstate->ps);
4354
4356 }
4357
4358 rootRelInfo->ri_WithCheckOptions = wcoList;
4359 rootRelInfo->ri_WithCheckOptionExprs = wcoExprs;
4360 }
4361
4362 if (node->returningLists != NIL)
4363 {
4364 List *returningList;
4365
4366 /* There should be as many returning lists as result rels */
4369
4370 /*
4371 * Use the first returning list as a reference. In the most common
4372 * case, this will be for the same relation as rootRelInfo, and so
4373 * there will be no need to adjust its attno's.
4374 */
4375 returningList = linitial(node->returningLists);
4376 if (rootRelation != firstResultRel)
4377 {
4378 /* Convert any Vars in it to contain the root's attno's */
4379 if (part_attmap == NULL)
4380 part_attmap =
4383 false);
4384
4385 returningList = (List *)
4386 map_variable_attnos((Node *) returningList,
4387 firstVarno, 0,
4389 RelationGetForm(rootRelation)->reltype,
4390 &found_whole_row);
4391 }
4392 rootRelInfo->ri_returningList = returningList;
4393
4394 /* Initialize the RETURNING projection */
4395 rootRelInfo->ri_projectReturning =
4396 ExecBuildProjectionInfo(returningList, econtext,
4397 mtstate->ps.ps_ResultTupleSlot,
4398 &mtstate->ps,
4399 RelationGetDescr(rootRelation));
4400 }
4401 }
4402}
4403
4404/*
4405 * Initializes the tuple slots in a ResultRelInfo for any MERGE action.
4406 *
4407 * We mark 'projectNewInfoValid' even though the projections themselves
4408 * are not initialized here.
4409 */
4410void
4412 ResultRelInfo *resultRelInfo)
4413{
4414 EState *estate = mtstate->ps.state;
4415
4416 Assert(!resultRelInfo->ri_projectNewInfoValid);
4417
4418 resultRelInfo->ri_oldTupleSlot =
4419 table_slot_create(resultRelInfo->ri_RelationDesc,
4420 &estate->es_tupleTable);
4421 resultRelInfo->ri_newTupleSlot =
4422 table_slot_create(resultRelInfo->ri_RelationDesc,
4423 &estate->es_tupleTable);
4424 resultRelInfo->ri_projectNewInfoValid = true;
4425}
4426
4427/*
4428 * Process BEFORE EACH STATEMENT triggers
4429 */
4430static void
4432{
4433 ModifyTable *plan = (ModifyTable *) node->ps.plan;
4434 ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
4435
4436 switch (node->operation)
4437 {
4438 case CMD_INSERT:
4439 ExecBSInsertTriggers(node->ps.state, resultRelInfo);
4440 if (plan->onConflictAction == ONCONFLICT_UPDATE)
4442 resultRelInfo);
4443 break;
4444 case CMD_UPDATE:
4445 ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
4446 break;
4447 case CMD_DELETE:
4448 ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
4449 break;
4450 case CMD_MERGE:
4452 ExecBSInsertTriggers(node->ps.state, resultRelInfo);
4454 ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
4456 ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
4457 break;
4458 default:
4459 elog(ERROR, "unknown operation");
4460 break;
4461 }
4462}
4463
4464/*
4465 * Process AFTER EACH STATEMENT triggers
4466 */
4467static void
4469{
4470 ModifyTable *plan = (ModifyTable *) node->ps.plan;
4471 ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
4472
4473 switch (node->operation)
4474 {
4475 case CMD_INSERT:
4476 if (plan->onConflictAction == ONCONFLICT_UPDATE)
4478 resultRelInfo,
4480 ExecASInsertTriggers(node->ps.state, resultRelInfo,
4481 node->mt_transition_capture);
4482 break;
4483 case CMD_UPDATE:
4484 ExecASUpdateTriggers(node->ps.state, resultRelInfo,
4485 node->mt_transition_capture);
4486 break;
4487 case CMD_DELETE:
4488 ExecASDeleteTriggers(node->ps.state, resultRelInfo,
4489 node->mt_transition_capture);
4490 break;
4491 case CMD_MERGE:
4493 ExecASDeleteTriggers(node->ps.state, resultRelInfo,
4494 node->mt_transition_capture);
4496 ExecASUpdateTriggers(node->ps.state, resultRelInfo,
4497 node->mt_transition_capture);
4499 ExecASInsertTriggers(node->ps.state, resultRelInfo,
4500 node->mt_transition_capture);
4501 break;
4502 default:
4503 elog(ERROR, "unknown operation");
4504 break;
4505 }
4506}
4507
4508/*
4509 * Set up the state needed for collecting transition tuples for AFTER
4510 * triggers.
4511 */
4512static void
4514{
4515 ModifyTable *plan = (ModifyTable *) mtstate->ps.plan;
4516 ResultRelInfo *targetRelInfo = mtstate->rootResultRelInfo;
4517
4518 /* Check for transition tables on the directly targeted relation. */
4519 mtstate->mt_transition_capture =
4521 RelationGetRelid(targetRelInfo->ri_RelationDesc),
4522 mtstate->operation);
4523 if (plan->operation == CMD_INSERT &&
4524 plan->onConflictAction == ONCONFLICT_UPDATE)
4525 mtstate->mt_oc_transition_capture =
4527 RelationGetRelid(targetRelInfo->ri_RelationDesc),
4528 CMD_UPDATE);
4529}
4530
4531/*
4532 * ExecPrepareTupleRouting --- prepare for routing one tuple
4533 *
4534 * Determine the partition in which the tuple in slot is to be inserted,
4535 * and return its ResultRelInfo in *partRelInfo. The return value is
4536 * a slot holding the tuple of the partition rowtype.
4537 *
4538 * This also sets the transition table information in mtstate based on the
4539 * selected partition.
4540 */
4541static TupleTableSlot *
4543 EState *estate,
4544 PartitionTupleRouting *proute,
4545 ResultRelInfo *targetRelInfo,
4546 TupleTableSlot *slot,
4548{
4549 ResultRelInfo *partrel;
4550 TupleConversionMap *map;
4551
4552 /*
4553 * Lookup the target partition's ResultRelInfo. If ExecFindPartition does
4554 * not find a valid partition for the tuple in 'slot' then an error is
4555 * raised. An error may also be raised if the found partition is not a
4556 * valid target for INSERTs. This is required since a partitioned table
4557 * UPDATE to another partition becomes a DELETE+INSERT.
4558 */
4559 partrel = ExecFindPartition(mtstate, targetRelInfo, proute, slot, estate);
4560
4561 /*
4562 * If we're capturing transition tuples, we might need to convert from the
4563 * partition rowtype to root partitioned table's rowtype. But if there
4564 * are no BEFORE triggers on the partition that could change the tuple, we
4565 * can just remember the original unconverted tuple to avoid a needless
4566 * round trip conversion.
4567 */
4568 if (mtstate->mt_transition_capture != NULL)
4569 {
4571
4574
4577 }
4578
4579 /*
4580 * Convert the tuple, if necessary.
4581 */
4582 map = ExecGetRootToChildMap(partrel, estate);
4583 if (map != NULL)
4584 {
4585 TupleTableSlot *new_slot = partrel->ri_PartitionTupleSlot;
4586
4587 slot = execute_attr_map_slot(map->attrMap, slot, new_slot);
4588 }
4589
4590 *partRelInfo = partrel;
4591 return slot;
4592}
4593
4594/* ----------------------------------------------------------------
4595 * ExecModifyTable
4596 *
4597 * Perform table modifications as required, and return RETURNING results
4598 * if needed.
4599 * ----------------------------------------------------------------
4600 */
4601static TupleTableSlot *
4603{
4605 ModifyTableContext context;
4606 EState *estate = node->ps.state;
4607 CmdType operation = node->operation;
4608 ResultRelInfo *resultRelInfo;
4610 TupleTableSlot *slot;
4614 HeapTuple oldtuple;
4616 bool tuplock;
4617
4619
4620 /*
4621 * This should NOT get called during EvalPlanQual; we should have passed a
4622 * subplan tree to EvalPlanQual, instead. Use a runtime test not just
4623 * Assert because this condition is easy to miss in testing. (Note:
4624 * although ModifyTable should not get executed within an EvalPlanQual
4625 * operation, we do have to allow it to be initialized and shut down in
4626 * case it is within a CTE subplan. Hence this test must be here, not in
4627 * ExecInitModifyTable.)
4628 */
4629 if (estate->es_epq_active != NULL)
4630 elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
4631
4632 /*
4633 * If we've already completed processing, don't try to do more. We need
4634 * this test because ExecPostprocessPlan might call us an extra time, and
4635 * our subplan's nodes aren't necessarily robust against being called
4636 * extra times.
4637 */
4638 if (node->mt_done)
4639 return NULL;
4640
4641 /*
4642 * On first call, fire BEFORE STATEMENT triggers before proceeding.
4643 */
4644 if (node->fireBSTriggers)
4645 {
4646 fireBSTriggers(node);
4647 node->fireBSTriggers = false;
4648 }
4649
4650 /* Preload local variables */
4651 resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex;
4653
4654 /* Set global context */
4655 context.mtstate = node;
4656 context.epqstate = &node->mt_epqstate;
4657 context.estate = estate;
4658
4659 /*
4660 * Fetch rows from subplan, and execute the required table modification
4661 * for each row.
4662 */
4663 for (;;)
4664 {
4665 /*
4666 * Reset the per-output-tuple exprcontext. This is needed because
4667 * triggers expect to use that context as workspace. It's a bit ugly
4668 * to do this below the top level of the plan, however. We might need
4669 * to rethink this later.
4670 */
4672
4673 /*
4674 * Reset per-tuple memory context used for processing on conflict and
4675 * returning clauses, to free any expression evaluation storage
4676 * allocated in the previous cycle.
4677 */
4678 if (pstate->ps_ExprContext)
4680
4681 /*
4682 * If there is a pending MERGE ... WHEN NOT MATCHED [BY TARGET] action
4683 * to execute, do so now --- see the comments in ExecMerge().
4684 */
4686 {
4687 context.planSlot = node->mt_merge_pending_not_matched;
4688 context.cpDeletedSlot = NULL;
4689
4690 slot = ExecMergeNotMatched(&context, node->resultRelInfo,
4691 node->canSetTag);
4692
4693 /* Clear the pending action */
4695
4696 /*
4697 * If we got a RETURNING result, return it to the caller. We'll
4698 * continue the work on next call.
4699 */
4700 if (slot)
4701 return slot;
4702
4703 continue; /* continue with the next tuple */
4704 }
4705
4706 /* Fetch the next row from subplan */
4708 context.cpDeletedSlot = NULL;
4709
4710 /* No more tuples to process? */
4711 if (TupIsNull(context.planSlot))
4712 break;
4713
4714 /*
4715 * When there are multiple result relations, each tuple contains a
4716 * junk column that gives the OID of the rel from which it came.
4717 * Extract it and select the correct result relation.
4718 */
4720 {
4721 Datum datum;
4722 bool isNull;
4723 Oid resultoid;
4724
4725 datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno,
4726 &isNull);
4727 if (isNull)
4728 {
4729 /*
4730 * For commands other than MERGE, any tuples having InvalidOid
4731 * for tableoid are errors. For MERGE, we may need to handle
4732 * them as WHEN NOT MATCHED clauses if any, so do that.
4733 *
4734 * Note that we use the node's toplevel resultRelInfo, not any
4735 * specific partition's.
4736 */
4737 if (operation == CMD_MERGE)
4738 {
4739 EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4740
4741 slot = ExecMerge(&context, node->resultRelInfo,
4742 NULL, NULL, node->canSetTag);
4743
4744 /*
4745 * If we got a RETURNING result, return it to the caller.
4746 * We'll continue the work on next call.
4747 */
4748 if (slot)
4749 return slot;
4750
4751 continue; /* continue with the next tuple */
4752 }
4753
4754 elog(ERROR, "tableoid is NULL");
4755 }
4756 resultoid = DatumGetObjectId(datum);
4757
4758 /* If it's not the same as last time, we need to locate the rel */
4759 if (resultoid != node->mt_lastResultOid)
4760 resultRelInfo = ExecLookupResultRelByOid(node, resultoid,
4761 false, true);
4762 }
4763
4764 /*
4765 * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do
4766 * here is compute the RETURNING expressions.
4767 */
4768 if (resultRelInfo->ri_usesFdwDirectModify)
4769 {
4770 Assert(resultRelInfo->ri_projectReturning);
4771
4772 /*
4773 * A scan slot containing the data that was actually inserted,
4774 * updated or deleted has already been made available to
4775 * ExecProcessReturning by IterateDirectModify, so no need to
4776 * provide it here. The individual old and new slots are not
4777 * needed, since direct-modify is disabled if the RETURNING list
4778 * refers to OLD/NEW values.
4779 */
4780 Assert((resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD) == 0 &&
4781 (resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_NEW) == 0);
4782
4783 slot = ExecProcessReturning(&context, resultRelInfo,
4785 NULL, NULL, context.planSlot);
4786
4787 return slot;
4788 }
4789
4790 EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4791 slot = context.planSlot;
4792
4793 tupleid = NULL;
4794 oldtuple = NULL;
4795
4796 /*
4797 * For UPDATE/DELETE/MERGE, fetch the row identity info for the tuple
4798 * to be updated/deleted/merged. For a heap relation, that's a TID;
4799 * otherwise we may have a wholerow junk attr that carries the old
4800 * tuple in toto. Keep this in step with the part of
4801 * ExecInitModifyTable that sets up ri_RowIdAttNo.
4802 */
4805 {
4806 char relkind;
4807 Datum datum;
4808 bool isNull;
4809
4810 relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
4811 if (relkind == RELKIND_RELATION ||
4812 relkind == RELKIND_MATVIEW ||
4813 relkind == RELKIND_PARTITIONED_TABLE)
4814 {
4815 /*
4816 * ri_RowIdAttNo refers to a ctid attribute. See the comment
4817 * in ExecInitModifyTable().
4818 */
4820 relkind == RELKIND_PARTITIONED_TABLE);
4821 datum = ExecGetJunkAttribute(slot,
4822 resultRelInfo->ri_RowIdAttNo,
4823 &isNull);
4824
4825 /*
4826 * For commands other than MERGE, any tuples having a null row
4827 * identifier are errors. For MERGE, we may need to handle
4828 * them as WHEN NOT MATCHED clauses if any, so do that.
4829 *
4830 * Note that we use the node's toplevel resultRelInfo, not any
4831 * specific partition's.
4832 */
4833 if (isNull)
4834 {
4835 if (operation == CMD_MERGE)
4836 {
4837 EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4838
4839 slot = ExecMerge(&context, node->resultRelInfo,
4840 NULL, NULL, node->canSetTag);
4841
4842 /*
4843 * If we got a RETURNING result, return it to the
4844 * caller. We'll continue the work on next call.
4845 */
4846 if (slot)
4847 return slot;
4848
4849 continue; /* continue with the next tuple */
4850 }
4851
4852 elog(ERROR, "ctid is NULL");
4853 }
4854
4856 tuple_ctid = *tupleid; /* be sure we don't free ctid!! */
4858 }
4859
4860 /*
4861 * Use the wholerow attribute, when available, to reconstruct the
4862 * old relation tuple. The old tuple serves one or both of two
4863 * purposes: 1) it serves as the OLD tuple for row triggers, 2) it
4864 * provides values for any unchanged columns for the NEW tuple of
4865 * an UPDATE, because the subplan does not produce all the columns
4866 * of the target table.
4867 *
4868 * Note that the wholerow attribute does not carry system columns,
4869 * so foreign table triggers miss seeing those, except that we
4870 * know enough here to set t_tableOid. Quite separately from
4871 * this, the FDW may fetch its own junk attrs to identify the row.
4872 *
4873 * Other relevant relkinds, currently limited to views, always
4874 * have a wholerow attribute.
4875 */
4876 else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4877 {
4878 datum = ExecGetJunkAttribute(slot,
4879 resultRelInfo->ri_RowIdAttNo,
4880 &isNull);
4881
4882 /*
4883 * For commands other than MERGE, any tuples having a null row
4884 * identifier are errors. For MERGE, we may need to handle
4885 * them as WHEN NOT MATCHED clauses if any, so do that.
4886 *
4887 * Note that we use the node's toplevel resultRelInfo, not any
4888 * specific partition's.
4889 */
4890 if (isNull)
4891 {
4892 if (operation == CMD_MERGE)
4893 {
4894 EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4895
4896 slot = ExecMerge(&context, node->resultRelInfo,
4897 NULL, NULL, node->canSetTag);
4898
4899 /*
4900 * If we got a RETURNING result, return it to the
4901 * caller. We'll continue the work on next call.
4902 */
4903 if (slot)
4904 return slot;
4905
4906 continue; /* continue with the next tuple */
4907 }
4908
4909 elog(ERROR, "wholerow is NULL");
4910 }
4911
4912 oldtupdata.t_data = DatumGetHeapTupleHeader(datum);
4913 oldtupdata.t_len =
4916 /* Historically, view triggers see invalid t_tableOid. */
4917 oldtupdata.t_tableOid =
4918 (relkind == RELKIND_VIEW) ? InvalidOid :
4919 RelationGetRelid(resultRelInfo->ri_RelationDesc);
4920
4921 oldtuple = &oldtupdata;
4922 }
4923 else
4924 {
4925 /* Only foreign tables are allowed to omit a row-ID attr */
4926 Assert(relkind == RELKIND_FOREIGN_TABLE);
4927 }
4928 }
4929
4930 switch (operation)
4931 {
4932 case CMD_INSERT:
4933 /* Initialize projection info if first time for this table */
4934 if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4935 ExecInitInsertProjection(node, resultRelInfo);
4936 slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot);
4937 slot = ExecInsert(&context, resultRelInfo, slot,
4938 node->canSetTag, NULL, NULL);
4939 break;
4940
4941 case CMD_UPDATE:
4942 tuplock = false;
4943
4944 /* Initialize projection info if first time for this table */
4945 if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4946 ExecInitUpdateProjection(node, resultRelInfo);
4947
4948 /*
4949 * Make the new tuple by combining plan's output tuple with
4950 * the old tuple being updated.
4951 */
4952 oldSlot = resultRelInfo->ri_oldTupleSlot;
4953 if (oldtuple != NULL)
4954 {
4955 Assert(!resultRelInfo->ri_needLockTagTuple);
4956 /* Use the wholerow junk attr as the old tuple. */
4957 ExecForceStoreHeapTuple(oldtuple, oldSlot, false);
4958 }
4959 else
4960 {
4961 /* Fetch the most recent version of old tuple. */
4962 Relation relation = resultRelInfo->ri_RelationDesc;
4963
4964 if (resultRelInfo->ri_needLockTagTuple)
4965 {
4967 tuplock = true;
4968 }
4971 oldSlot))
4972 elog(ERROR, "failed to fetch tuple being updated");
4973 }
4974 slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot,
4975 oldSlot);
4976
4977 /* Now apply the update. */
4978 slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple,
4979 oldSlot, slot, node->canSetTag);
4980 if (tuplock)
4981 UnlockTuple(resultRelInfo->ri_RelationDesc, tupleid,
4983 break;
4984
4985 case CMD_DELETE:
4986 slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple,
4987 true, false, node->canSetTag, NULL, NULL, NULL);
4988 break;
4989
4990 case CMD_MERGE:
4991 slot = ExecMerge(&context, resultRelInfo, tupleid, oldtuple,
4992 node->canSetTag);
4993 break;
4994
4995 default:
4996 elog(ERROR, "unknown operation");
4997 break;
4998 }
4999
5000 /*
5001 * If we got a RETURNING result, return it to caller. We'll continue
5002 * the work on next call.
5003 */
5004 if (slot)
5005 return slot;
5006 }
5007
5008 /*
5009 * Insert remaining tuples for batch insert.
5010 */
5012 ExecPendingInserts(estate);
5013
5014 /*
5015 * We're done, but fire AFTER STATEMENT triggers before exiting.
5016 */
5017 fireASTriggers(node);
5018
5019 node->mt_done = true;
5020
5021 return NULL;
5022}
5023
5024/*
5025 * ExecLookupResultRelByOid
5026 * If the table with given OID is among the result relations to be
5027 * updated by the given ModifyTable node, return its ResultRelInfo.
5028 *
5029 * If not found, return NULL if missing_ok, else raise error.
5030 *
5031 * If update_cache is true, then upon successful lookup, update the node's
5032 * one-element cache. ONLY ExecModifyTable may pass true for this.
5033 */
5036 bool missing_ok, bool update_cache)
5037{
5038 if (node->mt_resultOidHash)
5039 {
5040 /* Use the pre-built hash table to locate the rel */
5042
5045 if (mtlookup)
5046 {
5047 if (update_cache)
5048 {
5050 node->mt_lastResultIndex = mtlookup->relationIndex;
5051 }
5052 return node->resultRelInfo + mtlookup->relationIndex;
5053 }
5054 }
5055 else
5056 {
5057 /* With few target rels, just search the ResultRelInfo array */
5058 for (int ndx = 0; ndx < node->mt_nrels; ndx++)
5059 {
5061
5062 if (RelationGetRelid(rInfo->ri_RelationDesc) == resultoid)
5063 {
5064 if (update_cache)
5065 {
5067 node->mt_lastResultIndex = ndx;
5068 }
5069 return rInfo;
5070 }
5071 }
5072 }
5073
5074 if (!missing_ok)
5075 elog(ERROR, "incorrect result relation OID %u", resultoid);
5076 return NULL;
5077}
5078
5079/* ----------------------------------------------------------------
5080 * ExecInitModifyTable
5081 * ----------------------------------------------------------------
5082 */
5084ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
5085{
5086 ModifyTableState *mtstate;
5087 Plan *subplan = outerPlan(node);
5088 CmdType operation = node->operation;
5090 int nrels;
5091 List *resultRelations = NIL;
5092 List *withCheckOptionLists = NIL;
5093 List *returningLists = NIL;
5094 List *updateColnosLists = NIL;
5095 List *mergeActionLists = NIL;
5096 List *mergeJoinConditions = NIL;
5097 ResultRelInfo *resultRelInfo;
5098 List *arowmarks;
5099 ListCell *l;
5100 int i;
5101 Relation rel;
5102
5103 /* check for unsupported flags */
5104 Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
5105
5106 /*
5107 * Only consider unpruned relations for initializing their ResultRelInfo
5108 * struct and other fields such as withCheckOptions, etc.
5109 *
5110 * Note: We must avoid pruning every result relation. This is important
5111 * for MERGE, since even if every result relation is pruned from the
5112 * subplan, there might still be NOT MATCHED rows, for which there may be
5113 * INSERT actions to perform. To allow these actions to be found, at
5114 * least one result relation must be kept. Also, when inserting into a
5115 * partitioned table, ExecInitPartitionInfo() needs a ResultRelInfo struct
5116 * as a reference for building the ResultRelInfo of the target partition.
5117 * In either case, it doesn't matter which result relation is kept, so we
5118 * just keep the first one, if all others have been pruned. See also,
5119 * ExecDoInitialPruning(), which ensures that this first result relation
5120 * has been locked.
5121 */
5122 i = 0;
5123 foreach(l, node->resultRelations)
5124 {
5125 Index rti = lfirst_int(l);
5126 bool keep_rel;
5127
5129 if (!keep_rel && i == total_nrels - 1 && resultRelations == NIL)
5130 {
5131 /* all result relations pruned; keep the first one */
5132 keep_rel = true;
5133 rti = linitial_int(node->resultRelations);
5134 i = 0;
5135 }
5136
5137 if (keep_rel)
5138 {
5139 resultRelations = lappend_int(resultRelations, rti);
5140 if (node->withCheckOptionLists)
5141 {
5144 i);
5145
5146 withCheckOptionLists = lappend(withCheckOptionLists, withCheckOptions);
5147 }
5148 if (node->returningLists)
5149 {
5150 List *returningList = list_nth_node(List,
5151 node->returningLists,
5152 i);
5153
5154 returningLists = lappend(returningLists, returningList);
5155 }
5156 if (node->updateColnosLists)
5157 {
5159
5160 updateColnosLists = lappend(updateColnosLists, updateColnosList);
5161 }
5162 if (node->mergeActionLists)
5163 {
5164 List *mergeActionList = list_nth(node->mergeActionLists, i);
5165
5166 mergeActionLists = lappend(mergeActionLists, mergeActionList);
5167 }
5168 if (node->mergeJoinConditions)
5169 {
5170 List *mergeJoinCondition = list_nth(node->mergeJoinConditions, i);
5171
5172 mergeJoinConditions = lappend(mergeJoinConditions, mergeJoinCondition);
5173 }
5174 }
5175 i++;
5176 }
5177 nrels = list_length(resultRelations);
5178 Assert(nrels > 0);
5179
5180 /*
5181 * create state structure
5182 */
5183 mtstate = makeNode(ModifyTableState);
5184 mtstate->ps.plan = (Plan *) node;
5185 mtstate->ps.state = estate;
5186 mtstate->ps.ExecProcNode = ExecModifyTable;
5187
5188 mtstate->operation = operation;
5189 mtstate->canSetTag = node->canSetTag;
5190 mtstate->mt_done = false;
5191
5192 mtstate->mt_nrels = nrels;
5193 mtstate->resultRelInfo = palloc_array(ResultRelInfo, nrels);
5194
5196 mtstate->mt_merge_inserted = 0;
5197 mtstate->mt_merge_updated = 0;
5198 mtstate->mt_merge_deleted = 0;
5199 mtstate->mt_updateColnosLists = updateColnosLists;
5200 mtstate->mt_mergeActionLists = mergeActionLists;
5201 mtstate->mt_mergeJoinConditions = mergeJoinConditions;
5202
5203 /*----------
5204 * Resolve the target relation. This is the same as:
5205 *
5206 * - the relation for which we will fire FOR STATEMENT triggers,
5207 * - the relation into whose tuple format all captured transition tuples
5208 * must be converted, and
5209 * - the root partitioned table used for tuple routing.
5210 *
5211 * If it's a partitioned or inherited table, the root partition or
5212 * appendrel RTE doesn't appear elsewhere in the plan and its RT index is
5213 * given explicitly in node->rootRelation. Otherwise, the target relation
5214 * is the sole relation in the node->resultRelations list and, since it can
5215 * never be pruned, also in the resultRelations list constructed above.
5216 *----------
5217 */
5218 if (node->rootRelation > 0)
5219 {
5223 node->rootRelation);
5224 }
5225 else
5226 {
5227 Assert(list_length(node->resultRelations) == 1);
5228 Assert(list_length(resultRelations) == 1);
5229 mtstate->rootResultRelInfo = mtstate->resultRelInfo;
5230 ExecInitResultRelation(estate, mtstate->resultRelInfo,
5231 linitial_int(resultRelations));
5232 }
5233
5234 /* set up epqstate with dummy subplan data for the moment */
5235 EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL,
5236 node->epqParam, resultRelations);
5237 mtstate->fireBSTriggers = true;
5238
5239 /*
5240 * Build state for collecting transition tuples. This requires having a
5241 * valid trigger query context, so skip it in explain-only mode.
5242 */
5243 if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
5244 ExecSetupTransitionCaptureState(mtstate, estate);
5245
5246 /*
5247 * Open all the result relations and initialize the ResultRelInfo structs.
5248 * (But root relation was initialized above, if it's part of the array.)
5249 * We must do this before initializing the subplan, because direct-modify
5250 * FDWs expect their ResultRelInfos to be available.
5251 */
5252 resultRelInfo = mtstate->resultRelInfo;
5253 i = 0;
5254 foreach(l, resultRelations)
5255 {
5256 Index resultRelation = lfirst_int(l);
5258
5259 if (mergeActionLists)
5260 mergeActions = list_nth(mergeActionLists, i);
5261
5262 if (resultRelInfo != mtstate->rootResultRelInfo)
5263 {
5264 ExecInitResultRelation(estate, resultRelInfo, resultRelation);
5265
5266 /*
5267 * For child result relations, store the root result relation
5268 * pointer. We do so for the convenience of places that want to
5269 * look at the query's original target relation but don't have the
5270 * mtstate handy.
5271 */
5272 resultRelInfo->ri_RootResultRelInfo = mtstate->rootResultRelInfo;
5273 }
5274
5275 /* Initialize the usesFdwDirectModify flag */
5276 resultRelInfo->ri_usesFdwDirectModify =
5278
5279 /*
5280 * Verify result relation is a valid target for the current operation
5281 */
5282 CheckValidResultRel(resultRelInfo, operation, node->onConflictAction,
5283 mergeActions);
5284
5285 resultRelInfo++;
5286 i++;
5287 }
5288
5289 /*
5290 * Now we may initialize the subplan.
5291 */
5292 outerPlanState(mtstate) = ExecInitNode(subplan, estate, eflags);
5293
5294 /*
5295 * Do additional per-result-relation initialization.
5296 */
5297 for (i = 0; i < nrels; i++)
5298 {
5299 resultRelInfo = &mtstate->resultRelInfo[i];
5300
5301 /* Let FDWs init themselves for foreign-table result rels */
5302 if (!resultRelInfo->ri_usesFdwDirectModify &&
5303 resultRelInfo->ri_FdwRoutine != NULL &&
5304 resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL)
5305 {
5306 List *fdw_private = (List *) list_nth(node->fdwPrivLists, i);
5307
5308 resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate,
5309 resultRelInfo,
5310 fdw_private,
5311 i,
5312 eflags);
5313 }
5314
5315 /*
5316 * For UPDATE/DELETE/MERGE, find the appropriate junk attr now, either
5317 * a 'ctid' or 'wholerow' attribute depending on relkind. For foreign
5318 * tables, the FDW might have created additional junk attr(s), but
5319 * those are no concern of ours.
5320 */
5323 {
5324 char relkind;
5325
5326 relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
5327 if (relkind == RELKIND_RELATION ||
5328 relkind == RELKIND_MATVIEW ||
5329 relkind == RELKIND_PARTITIONED_TABLE)
5330 {
5331 resultRelInfo->ri_RowIdAttNo =
5332 ExecFindJunkAttributeInTlist(subplan->targetlist, "ctid");
5333
5334 /*
5335 * For heap relations, a ctid junk attribute must be present.
5336 * Partitioned tables should only appear here when all leaf
5337 * partitions were pruned, in which case no rows can be
5338 * produced and ctid is not needed.
5339 */
5340 if (relkind == RELKIND_PARTITIONED_TABLE)
5341 Assert(nrels == 1);
5342 else if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
5343 elog(ERROR, "could not find junk ctid column");
5344 }
5345 else if (relkind == RELKIND_FOREIGN_TABLE)
5346 {
5347 /*
5348 * We don't support MERGE with foreign tables for now. (It's
5349 * problematic because the implementation uses CTID.)
5350 */
5352
5353 /*
5354 * When there is a row-level trigger, there should be a
5355 * wholerow attribute. We also require it to be present in
5356 * UPDATE and MERGE, so we can get the values of unchanged
5357 * columns.
5358 */
5359 resultRelInfo->ri_RowIdAttNo =
5361 "wholerow");
5362 if ((mtstate->operation == CMD_UPDATE || mtstate->operation == CMD_MERGE) &&
5363 !AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
5364 elog(ERROR, "could not find junk wholerow column");
5365 }
5366 else
5367 {
5368 /* Other valid target relkinds must provide wholerow */
5369 resultRelInfo->ri_RowIdAttNo =
5371 "wholerow");
5372 if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
5373 elog(ERROR, "could not find junk wholerow column");
5374 }
5375 }
5376 }
5377
5378 /*
5379 * If this is an inherited update/delete/merge, there will be a junk
5380 * attribute named "tableoid" present in the subplan's targetlist. It
5381 * will be used to identify the result relation for a given tuple to be
5382 * updated/deleted/merged.
5383 */
5384 mtstate->mt_resultOidAttno =
5385 ExecFindJunkAttributeInTlist(subplan->targetlist, "tableoid");
5387 mtstate->mt_lastResultOid = InvalidOid; /* force lookup at first tuple */
5388 mtstate->mt_lastResultIndex = 0; /* must be zero if no such attr */
5389
5390 /* Get the root target relation */
5391 rel = mtstate->rootResultRelInfo->ri_RelationDesc;
5392
5393 /*
5394 * Build state for tuple routing if it's a partitioned INSERT. An UPDATE
5395 * or MERGE might need this too, but only if it actually moves tuples
5396 * between partitions; in that case setup is done by
5397 * ExecCrossPartitionUpdate.
5398 */
5399 if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
5402 ExecSetupPartitionTupleRouting(estate, rel);
5403
5404 /*
5405 * Initialize any WITH CHECK OPTION constraints if needed.
5406 */
5407 resultRelInfo = mtstate->resultRelInfo;
5408 foreach(l, withCheckOptionLists)
5409 {
5410 List *wcoList = (List *) lfirst(l);
5411 List *wcoExprs = NIL;
5412 ListCell *ll;
5413
5414 foreach(ll, wcoList)
5415 {
5417 ExprState *wcoExpr = ExecInitQual((List *) wco->qual,
5418 &mtstate->ps);
5419
5421 }
5422
5423 resultRelInfo->ri_WithCheckOptions = wcoList;
5424 resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
5425 resultRelInfo++;
5426 }
5427
5428 /*
5429 * Initialize RETURNING projections if needed.
5430 */
5431 if (returningLists)
5432 {
5433 TupleTableSlot *slot;
5434 ExprContext *econtext;
5435
5436 /*
5437 * Initialize result tuple slot and assign its rowtype using the plan
5438 * node's declared targetlist, which the planner set up to be the same
5439 * as the first (before runtime pruning) RETURNING list. We assume
5440 * all the result rels will produce compatible output.
5441 */
5443 slot = mtstate->ps.ps_ResultTupleSlot;
5444
5445 /* Need an econtext too */
5446 if (mtstate->ps.ps_ExprContext == NULL)
5447 ExecAssignExprContext(estate, &mtstate->ps);
5448 econtext = mtstate->ps.ps_ExprContext;
5449
5450 /*
5451 * Build a projection for each result rel.
5452 */
5453 resultRelInfo = mtstate->resultRelInfo;
5454 foreach(l, returningLists)
5455 {
5456 List *rlist = (List *) lfirst(l);
5457
5458 resultRelInfo->ri_returningList = rlist;
5459 resultRelInfo->ri_projectReturning =
5460 ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps,
5461 resultRelInfo->ri_RelationDesc->rd_att);
5462 resultRelInfo++;
5463 }
5464 }
5465 else
5466 {
5467 /*
5468 * We still must construct a dummy result tuple type, because InitPlan
5469 * expects one (maybe should change that?).
5470 */
5471 ExecInitResultTypeTL(&mtstate->ps);
5472
5473 mtstate->ps.ps_ExprContext = NULL;
5474 }
5475
5476 /* Set the list of arbiter indexes if needed for ON CONFLICT */
5477 resultRelInfo = mtstate->resultRelInfo;
5478 if (node->onConflictAction != ONCONFLICT_NONE)
5479 {
5480 /* insert may only have one relation, inheritance is not expanded */
5481 Assert(total_nrels == 1);
5482 resultRelInfo->ri_onConflictArbiterIndexes = node->arbiterIndexes;
5483 }
5484
5485 /*
5486 * For ON CONFLICT DO SELECT/UPDATE, initialize the ON CONFLICT action
5487 * state.
5488 */
5489 if (node->onConflictAction == ONCONFLICT_UPDATE ||
5491 {
5493
5494 /* already exists if created by RETURNING processing above */
5495 if (mtstate->ps.ps_ExprContext == NULL)
5496 ExecAssignExprContext(estate, &mtstate->ps);
5497
5498 /* action state for DO SELECT/UPDATE */
5499 resultRelInfo->ri_onConflict = onconfl;
5500
5501 /* lock strength for DO SELECT [FOR UPDATE/SHARE] */
5503
5504 /* initialize slot for the existing tuple */
5505 onconfl->oc_Existing =
5506 table_slot_create(resultRelInfo->ri_RelationDesc,
5507 &mtstate->ps.state->es_tupleTable);
5508
5509 /*
5510 * For ON CONFLICT DO UPDATE, initialize target list and projection.
5511 */
5513 {
5514 ExprContext *econtext;
5516
5517 econtext = mtstate->ps.ps_ExprContext;
5518 relationDesc = resultRelInfo->ri_RelationDesc->rd_att;
5519
5520 /*
5521 * Create the tuple slot for the UPDATE SET projection. We want a
5522 * slot of the table's type here, because the slot will be used to
5523 * insert into the table, and for RETURNING processing - which may
5524 * access system attributes.
5525 */
5526 onconfl->oc_ProjSlot =
5527 table_slot_create(resultRelInfo->ri_RelationDesc,
5528 &mtstate->ps.state->es_tupleTable);
5529
5530 /* build UPDATE SET projection state */
5531 onconfl->oc_ProjInfo =
5533 true,
5534 node->onConflictCols,
5536 econtext,
5537 onconfl->oc_ProjSlot,
5538 &mtstate->ps);
5539 }
5540
5541 /* initialize state to evaluate the WHERE clause, if any */
5542 if (node->onConflictWhere)
5543 {
5544 ExprState *qualexpr;
5545
5546 qualexpr = ExecInitQual((List *) node->onConflictWhere,
5547 &mtstate->ps);
5548 onconfl->oc_WhereClause = qualexpr;
5549 }
5550 }
5551
5552 /*
5553 * If needed, initialize the target range for FOR PORTION OF.
5554 */
5555 if (node->forPortionOf)
5556 {
5558 TupleDesc tupDesc;
5559 ForPortionOfExpr *forPortionOf;
5560 Datum targetRange;
5561 bool isNull;
5562 ExprContext *econtext;
5565
5566 rootRelInfo = mtstate->resultRelInfo;
5567 if (rootRelInfo->ri_RootResultRelInfo)
5569
5570 tupDesc = rootRelInfo->ri_RelationDesc->rd_att;
5571 forPortionOf = (ForPortionOfExpr *) node->forPortionOf;
5572
5573 /* Eval the FOR PORTION OF target */
5574 if (mtstate->ps.ps_ExprContext == NULL)
5575 ExecAssignExprContext(estate, &mtstate->ps);
5576 econtext = mtstate->ps.ps_ExprContext;
5577
5578 exprState = ExecPrepareExpr((Expr *) forPortionOf->targetRange, estate);
5579 targetRange = ExecEvalExpr(exprState, econtext, &isNull);
5580
5581 /*
5582 * FOR PORTION OF ... TO ... FROM should never give us a NULL target,
5583 * but FOR PORTION OF (...) could.
5584 */
5585 if (isNull)
5586 ereport(ERROR,
5587 (errmsg("FOR PORTION OF target was null")),
5588 executor_errposition(estate, forPortionOf->targetLocation));
5589
5590 /* Create state for FOR PORTION OF operation */
5591
5593 fpoState->fp_rangeName = forPortionOf->range_name;
5594 fpoState->fp_rangeType = forPortionOf->rangeType;
5595 fpoState->fp_rangeAttno = forPortionOf->rangeVar->varattno;
5596 fpoState->fp_targetRange = targetRange;
5597
5598 /* Initialize slot for the existing tuple */
5599
5600 fpoState->fp_Existing =
5601 table_slot_create(rootRelInfo->ri_RelationDesc,
5602 &mtstate->ps.state->es_tupleTable);
5603
5604 /* Create the tuple slot for INSERTing the temporal leftovers */
5605
5606 fpoState->fp_Leftover =
5607 ExecInitExtraTupleSlot(mtstate->ps.state, tupDesc, &TTSOpsVirtual);
5608
5609 rootRelInfo->ri_forPortionOf = fpoState;
5610
5611 /*
5612 * Make sure the root relation has the FOR PORTION OF clause too. Each
5613 * partition needs its own TupleTableSlot, since they can have
5614 * different descriptors, so they'll use the root fpoState to
5615 * initialize one if necessary.
5616 */
5617 if (node->rootRelation > 0)
5619
5620 if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
5621 mtstate->mt_partition_tuple_routing == NULL)
5622 {
5623 /*
5624 * We will need tuple routing to insert temporal leftovers. Since
5625 * we are initializing things before ExecCrossPartitionUpdate
5626 * runs, we must do everything it needs as well.
5627 */
5630
5631 /* Things built here have to last for the query duration. */
5633
5636
5637 /*
5638 * Before a partition's tuple can be re-routed, it must first be
5639 * converted to the root's format, so we'll need a slot for
5640 * storing such tuples.
5641 */
5642 Assert(mtstate->mt_root_tuple_slot == NULL);
5644
5646 }
5647
5648 /*
5649 * Don't free the ExprContext here because the result must last for
5650 * the whole query.
5651 */
5652 }
5653
5654 /*
5655 * If we have any secondary relations in an UPDATE or DELETE, they need to
5656 * be treated like non-locked relations in SELECT FOR UPDATE, i.e., the
5657 * EvalPlanQual mechanism needs to be told about them. This also goes for
5658 * the source relations in a MERGE. Locate the relevant ExecRowMarks.
5659 */
5660 arowmarks = NIL;
5661 foreach(l, node->rowMarks)
5662 {
5664 RangeTblEntry *rte = exec_rt_fetch(rc->rti, estate);
5667
5668 /* ignore "parent" rowmarks; they are irrelevant at runtime */
5669 if (rc->isParent)
5670 continue;
5671
5672 /*
5673 * Also ignore rowmarks belonging to child tables that have been
5674 * pruned in ExecDoInitialPruning().
5675 */
5676 if (rte->rtekind == RTE_RELATION &&
5677 !bms_is_member(rc->rti, estate->es_unpruned_relids))
5678 continue;
5679
5680 /* Find ExecRowMark and build ExecAuxRowMark */
5681 erm = ExecFindRowMark(estate, rc->rti, false);
5684 }
5685
5686 /* For a MERGE command, initialize its state */
5687 if (mtstate->operation == CMD_MERGE)
5688 ExecInitMerge(mtstate, estate);
5689
5690 EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan, arowmarks);
5691
5692 /*
5693 * If there are a lot of result relations, use a hash table to speed the
5694 * lookups. If there are not a lot, a simple linear search is faster.
5695 *
5696 * It's not clear where the threshold is, but try 64 for starters. In a
5697 * debugging build, use a small threshold so that we get some test
5698 * coverage of both code paths.
5699 */
5700#ifdef USE_ASSERT_CHECKING
5701#define MT_NRELS_HASH 4
5702#else
5703#define MT_NRELS_HASH 64
5704#endif
5705 if (nrels >= MT_NRELS_HASH)
5706 {
5708
5709 hash_ctl.keysize = sizeof(Oid);
5710 hash_ctl.entrysize = sizeof(MTTargetRelLookup);
5712 mtstate->mt_resultOidHash =
5713 hash_create("ModifyTable target hash",
5714 nrels, &hash_ctl,
5716 for (i = 0; i < nrels; i++)
5717 {
5718 Oid hashkey;
5720 bool found;
5721
5722 resultRelInfo = &mtstate->resultRelInfo[i];
5723 hashkey = RelationGetRelid(resultRelInfo->ri_RelationDesc);
5726 HASH_ENTER, &found);
5727 Assert(!found);
5728 mtlookup->relationIndex = i;
5729 }
5730 }
5731 else
5732 mtstate->mt_resultOidHash = NULL;
5733
5734 /*
5735 * Determine if the FDW supports batch insert and determine the batch size
5736 * (a FDW may support batching, but it may be disabled for the
5737 * server/table).
5738 *
5739 * We only do this for INSERT, so that for UPDATE/DELETE the batch size
5740 * remains set to 0.
5741 */
5742 if (operation == CMD_INSERT)
5743 {
5744 /* insert may only have one relation, inheritance is not expanded */
5745 Assert(total_nrels == 1);
5746 resultRelInfo = mtstate->resultRelInfo;
5747 if (!resultRelInfo->ri_usesFdwDirectModify &&
5748 resultRelInfo->ri_FdwRoutine != NULL &&
5749 resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize &&
5750 resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert)
5751 {
5752 resultRelInfo->ri_BatchSize =
5753 resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize(resultRelInfo);
5754 Assert(resultRelInfo->ri_BatchSize >= 1);
5755 }
5756 else
5757 resultRelInfo->ri_BatchSize = 1;
5758 }
5759
5760 /*
5761 * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
5762 * to estate->es_auxmodifytables so that it will be run to completion by
5763 * ExecPostprocessPlan. (It'd actually work fine to add the primary
5764 * ModifyTable node too, but there's no need.) Note the use of lcons not
5765 * lappend: we need later-initialized ModifyTable nodes to be shut down
5766 * before earlier ones. This ensures that we don't throw away RETURNING
5767 * rows that need to be seen by a later CTE subplan.
5768 */
5769 if (!mtstate->canSetTag)
5770 estate->es_auxmodifytables = lcons(mtstate,
5771 estate->es_auxmodifytables);
5772
5773 return mtstate;
5774}
5775
5776/* ----------------------------------------------------------------
5777 * ExecEndModifyTable
5778 *
5779 * Shuts down the plan.
5780 *
5781 * Returns nothing of interest.
5782 * ----------------------------------------------------------------
5783 */
5784void
5786{
5787 int i;
5788
5789 /*
5790 * Allow any FDWs to shut down
5791 */
5792 for (i = 0; i < node->mt_nrels; i++)
5793 {
5794 int j;
5795 ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
5796
5797 if (!resultRelInfo->ri_usesFdwDirectModify &&
5798 resultRelInfo->ri_FdwRoutine != NULL &&
5799 resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL)
5800 resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state,
5801 resultRelInfo);
5802
5803 /*
5804 * Cleanup the initialized batch slots. This only matters for FDWs
5805 * with batching, but the other cases will have ri_NumSlotsInitialized
5806 * == 0.
5807 */
5808 for (j = 0; j < resultRelInfo->ri_NumSlotsInitialized; j++)
5809 {
5810 ExecDropSingleTupleTableSlot(resultRelInfo->ri_Slots[j]);
5812 }
5813 }
5814
5815 /*
5816 * Close all the partitioned tables, leaf partitions, and their indices
5817 * and release the slot used for tuple routing, if set.
5818 */
5820 {
5822
5823 if (node->mt_root_tuple_slot)
5825 }
5826
5827 /*
5828 * Terminate EPQ execution if active
5829 */
5831
5832 /*
5833 * shut down subplan
5834 */
5836}
5837
5838void
5840{
5841 /*
5842 * Currently, we don't need to support rescan on ModifyTable nodes. The
5843 * semantics of that would be a bit debatable anyway.
5844 */
5845 elog(ERROR, "ExecReScanModifyTable is not implemented");
5846}
AttrMap * build_attrmap_by_name(TupleDesc indesc, TupleDesc outdesc, bool missing_ok)
Definition attmap.c:175
int16 AttrNumber
Definition attnum.h:21
#define AttributeNumberIsValid(attributeNumber)
Definition attnum.h:34
bool bms_is_member(int x, const Bitmapset *a)
Definition bitmapset.c:510
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition bitmapset.c:799
bool bms_overlap(const Bitmapset *a, const Bitmapset *b)
Definition bitmapset.c:575
static Datum values[MAXATTR]
Definition bootstrap.c:190
#define Assert(condition)
Definition c.h:943
#define unlikely(x)
Definition c.h:438
uint32_t uint32
Definition c.h:624
unsigned int Index
Definition c.h:698
uint32 TransactionId
Definition c.h:736
uint32 result
memcpy(sums, checksumBaseOffsets, sizeof(checksumBaseOffsets))
static DataChecksumsWorkerOperation operation
Datum datumCopy(Datum value, bool typByVal, int typLen)
Definition datum.c:132
void domain_check(Datum value, bool isnull, Oid domainType, void **extra, MemoryContext mcxt)
Definition domains.c:346
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition dynahash.c:889
HTAB * hash_create(const char *tabname, int64 nelem, const HASHCTL *info, int flags)
Definition dynahash.c:360
int errcode(int sqlerrcode)
Definition elog.c:874
int errhint(const char *fmt,...) pg_attribute_printf(1
int errdetail(const char *fmt,...) pg_attribute_printf(1
#define ERROR
Definition elog.h:40
#define elog(elevel,...)
Definition elog.h:228
#define ereport(elevel,...)
Definition elog.h:152
ExprState * ExecPrepareExpr(Expr *node, EState *estate)
Definition execExpr.c:786
ProjectionInfo * ExecBuildProjectionInfo(List *targetList, ExprContext *econtext, TupleTableSlot *slot, PlanState *parent, TupleDesc inputDesc)
Definition execExpr.c:391
ExprState * ExecInitQual(List *qual, PlanState *parent)
Definition execExpr.c:250
ProjectionInfo * ExecBuildUpdateProjection(List *targetList, bool evalTargetList, List *targetColnos, TupleDesc relDesc, ExprContext *econtext, TupleTableSlot *slot, PlanState *parent)
Definition execExpr.c:568
List * ExecInsertIndexTuples(ResultRelInfo *resultRelInfo, EState *estate, uint32 flags, TupleTableSlot *slot, List *arbiterIndexes, bool *specConflict)
void ExecOpenIndices(ResultRelInfo *resultRelInfo, bool speculative)
bool ExecCheckIndexConstraints(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate, ItemPointer conflictTid, const ItemPointerData *tupleid, List *arbiterIndexes)
AttrNumber ExecFindJunkAttributeInTlist(List *targetlist, const char *attrName)
Definition execJunk.c:222
LockTupleMode ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo)
Definition execMain.c:2559
ExecRowMark * ExecFindRowMark(EState *estate, Index rti, bool missing_ok)
Definition execMain.c:2585
ExecAuxRowMark * ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
Definition execMain.c:2608
TupleTableSlot * EvalPlanQualSlot(EPQState *epqstate, Relation relation, Index rti)
Definition execMain.c:2805
void CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation, OnConflictAction onConflictAction, List *mergeActions)
Definition execMain.c:1065
void EvalPlanQualBegin(EPQState *epqstate)
Definition execMain.c:2960
bool ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate, bool emitError)
Definition execMain.c:1885
void EvalPlanQualInit(EPQState *epqstate, EState *parentestate, Plan *subplan, List *auxrowmarks, int epqParam, List *resultRelations)
Definition execMain.c:2747
void ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate)
Definition execMain.c:2257
void EvalPlanQualEnd(EPQState *epqstate)
Definition execMain.c:3208
void EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks)
Definition execMain.c:2788
TupleTableSlot * EvalPlanQual(EPQState *epqstate, Relation relation, Index rti, TupleTableSlot *inputslot)
Definition execMain.c:2678
void ExecPartitionCheckEmitError(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate)
Definition execMain.c:1938
void ExecConstraints(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate)
Definition execMain.c:2009
List * ExecGetAncestorResultRels(EState *estate, ResultRelInfo *resultRelInfo)
Definition execMain.c:1459
PartitionTupleRouting * ExecSetupPartitionTupleRouting(EState *estate, Relation rel)
ResultRelInfo * ExecFindPartition(ModifyTableState *mtstate, ResultRelInfo *rootResultRelInfo, PartitionTupleRouting *proute, TupleTableSlot *slot, EState *estate)
void ExecCleanupTupleRouting(ModifyTableState *mtstate, PartitionTupleRouting *proute)
void ExecEndNode(PlanState *node)
PlanState * ExecInitNode(Plan *node, EState *estate, int eflags)
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
const TupleTableSlotOps TTSOpsVirtual
Definition execTuples.c:84
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
TupleTableSlot * ExecStoreVirtualTuple(TupleTableSlot *slot)
void ExecInitResultTypeTL(PlanState *planstate)
HeapTuple ExecFetchSlotHeapTuple(TupleTableSlot *slot, bool materialize, bool *shouldFree)
TupleTableSlot * ExecInitExtraTupleSlot(EState *estate, TupleDesc tupledesc, const TupleTableSlotOps *tts_ops)
void ExecInitResultTupleSlotTL(PlanState *planstate, const TupleTableSlotOps *tts_ops)
TupleTableSlot * ExecStoreAllNullTuple(TupleTableSlot *slot)
void ExecForceStoreHeapTuple(HeapTuple tuple, TupleTableSlot *slot, bool shouldFree)
TupleConversionMap * ExecGetRootToChildMap(ResultRelInfo *resultRelInfo, EState *estate)
Definition execUtils.c:1352
TupleConversionMap * ExecGetChildToRootMap(ResultRelInfo *resultRelInfo)
Definition execUtils.c:1326
int executor_errposition(EState *estate, int location)
Definition execUtils.c:962
Bitmapset * ExecGetUpdatedCols(ResultRelInfo *relinfo, EState *estate)
Definition execUtils.c:1408
void ExecInitResultRelation(EState *estate, ResultRelInfo *resultRelInfo, Index rti)
Definition execUtils.c:906
void ExecAssignExprContext(EState *estate, PlanState *planstate)
Definition execUtils.c:490
TupleTableSlot * ExecGetAllNullSlot(EState *estate, ResultRelInfo *relInfo)
Definition execUtils.c:1299
TupleTableSlot * ExecGetReturningSlot(EState *estate, ResultRelInfo *relInfo)
Definition execUtils.c:1274
#define MERGE_UPDATE
Definition execnodes.h:1432
#define InstrCountFiltered1(node, delta)
Definition execnodes.h:1307
#define EEO_FLAG_HAS_OLD
Definition execnodes.h:90
#define outerPlanState(node)
Definition execnodes.h:1299
#define InstrCountTuples2(node, delta)
Definition execnodes.h:1302
#define MERGE_INSERT
Definition execnodes.h:1431
#define EEO_FLAG_NEW_IS_NULL
Definition execnodes.h:96
@ ExprEndResult
Definition execnodes.h:343
#define EEO_FLAG_OLD_IS_NULL
Definition execnodes.h:94
#define EEO_FLAG_HAS_NEW
Definition execnodes.h:92
@ SFRM_ValuePerCall
Definition execnodes.h:354
#define MERGE_DELETE
Definition execnodes.h:1433
#define EXEC_FLAG_BACKWARD
Definition executor.h:70
#define ResetPerTupleExprContext(estate)
Definition executor.h:676
static TupleTableSlot * ExecProject(ProjectionInfo *projInfo)
Definition executor.h:493
#define GetPerTupleExprContext(estate)
Definition executor.h:667
#define EIIT_IS_UPDATE
Definition executor.h:757
static RangeTblEntry * exec_rt_fetch(Index rti, EState *estate)
Definition executor.h:710
#define ResetExprContext(econtext)
Definition executor.h:661
#define GetPerTupleMemoryContext(estate)
Definition executor.h:672
#define EIIT_ONLY_SUMMARIZING
Definition executor.h:759
static bool ExecQual(ExprState *state, ExprContext *econtext)
Definition executor.h:529
static TupleTableSlot * ExecProcNode(PlanState *node)
Definition executor.h:322
#define EvalPlanQualSetSlot(epqstate, slot)
Definition executor.h:290
static Datum ExecEvalExpr(ExprState *state, ExprContext *econtext, bool *isNull)
Definition executor.h:403
#define EXEC_FLAG_EXPLAIN_ONLY
Definition executor.h:67
static Datum ExecGetJunkAttribute(TupleTableSlot *slot, AttrNumber attno, bool *isNull)
Definition executor.h:226
#define EXEC_FLAG_MARK
Definition executor.h:71
#define EIIT_NO_DUPE_ERROR
Definition executor.h:758
#define palloc_array(type, count)
Definition fe_memutils.h:76
void fmgr_info(Oid functionId, FmgrInfo *finfo)
Definition fmgr.c:129
#define DatumGetHeapTupleHeader(X)
Definition fmgr.h:296
#define InitFunctionCallInfoData(Fcinfo, Flinfo, Nargs, Collation, Context, Resultinfo)
Definition fmgr.h:150
#define LOCAL_FCINFO(name, nargs)
Definition fmgr.h:110
#define FunctionCallInvoke(fcinfo)
Definition fmgr.h:172
char * format_type_be(Oid type_oid)
void heap_freetuple(HeapTuple htup)
Definition heaptuple.c:1372
@ HASH_FIND
Definition hsearch.h:108
@ HASH_ENTER
Definition hsearch.h:109
#define HASH_CONTEXT
Definition hsearch.h:97
#define HASH_ELEM
Definition hsearch.h:90
#define HASH_BLOBS
Definition hsearch.h:92
static uint32 HeapTupleHeaderGetDatumLength(const HeapTupleHeaderData *tup)
long val
Definition informix.c:689
#define INJECTION_POINT(name, arg)
void InstrUpdateTupleCount(NodeInstrumentation *instr, double nTuples)
Definition instrument.c:196
int j
Definition isn.c:78
int i
Definition isn.c:77
static void ItemPointerSetInvalid(ItemPointerData *pointer)
Definition itemptr.h:184
static bool ItemPointerIndicatesMovedPartitions(const ItemPointerData *pointer)
Definition itemptr.h:197
ItemPointerData * ItemPointer
Definition itemptr.h:49
static void ItemPointerCopy(const ItemPointerData *fromPointer, ItemPointerData *toPointer)
Definition itemptr.h:172
static bool ItemPointerIsValid(const ItemPointerData *pointer)
Definition itemptr.h:83
List * lappend(List *list, void *datum)
Definition list.c:339
List * lappend_int(List *list, int datum)
Definition list.c:357
List * lcons(void *datum, List *list)
Definition list.c:495
bool list_member_ptr(const List *list, const void *datum)
Definition list.c:682
void list_free(List *list)
Definition list.c:1546
void UnlockTuple(Relation relation, const ItemPointerData *tid, LOCKMODE lockmode)
Definition lmgr.c:601
uint32 SpeculativeInsertionLockAcquire(TransactionId xid)
Definition lmgr.c:786
void SpeculativeInsertionLockRelease(TransactionId xid)
Definition lmgr.c:812
void LockTuple(Relation relation, const ItemPointerData *tid, LOCKMODE lockmode)
Definition lmgr.c:562
#define InplaceUpdateTupleLock
Definition lockdefs.h:48
@ LockWaitBlock
Definition lockoptions.h:40
LockTupleMode
Definition lockoptions.h:51
@ LockTupleExclusive
Definition lockoptions.h:59
@ LockTupleNoKeyExclusive
Definition lockoptions.h:57
@ LockTupleShare
Definition lockoptions.h:55
@ LockTupleKeyShare
Definition lockoptions.h:53
LockClauseStrength
Definition lockoptions.h:22
@ LCS_FORUPDATE
Definition lockoptions.h:28
@ LCS_NONE
Definition lockoptions.h:23
@ LCS_FORSHARE
Definition lockoptions.h:26
@ LCS_FORKEYSHARE
Definition lockoptions.h:25
@ LCS_FORNOKEYUPDATE
Definition lockoptions.h:27
void pfree(void *pointer)
Definition mcxt.c:1616
void * palloc0(Size size)
Definition mcxt.c:1417
MemoryContext CurrentMemoryContext
Definition mcxt.c:160
#define IsBootstrapProcessingMode()
Definition miscadmin.h:495
#define CHECK_FOR_INTERRUPTS()
Definition miscadmin.h:125
Oid exprType(const Node *expr)
Definition nodeFuncs.c:42
static bool ExecOnConflictSelect(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer conflictTid, TupleTableSlot *excludedSlot, bool canSetTag, TupleTableSlot **returning)
static void ExecInitInsertProjection(ModifyTableState *mtstate, ResultRelInfo *resultRelInfo)
ResultRelInfo * ExecLookupResultRelByOid(ModifyTableState *node, Oid resultoid, bool missing_ok, bool update_cache)
static void ExecPendingInserts(EState *estate)
static void ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
static void ExecForPortionOfLeftovers(ModifyTableContext *context, EState *estate, ResultRelInfo *resultRelInfo, ItemPointer tupleid)
void ExecInitMergeTupleSlots(ModifyTableState *mtstate, ResultRelInfo *resultRelInfo)
static void ExecUpdatePrepareSlot(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate)
static TupleTableSlot * ExecInsert(ModifyTableContext *context, ResultRelInfo *resultRelInfo, TupleTableSlot *slot, bool canSetTag, TupleTableSlot **inserted_tuple, ResultRelInfo **insert_destrel)
ModifyTableState * ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
static TupleTableSlot * ExecMergeMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag, bool *matched)
static TupleTableSlot * ExecModifyTable(PlanState *pstate)
static bool ExecDeletePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot **epqreturnslot, TM_Result *result)
static void ExecCheckPlanOutput(Relation resultRel, List *targetList)
TupleTableSlot * ExecGetUpdateNewTuple(ResultRelInfo *relinfo, TupleTableSlot *planSlot, TupleTableSlot *oldSlot)
static void ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context, ResultRelInfo *sourcePartInfo, ResultRelInfo *destPartInfo, ItemPointer tupleid, TupleTableSlot *oldslot, TupleTableSlot *newslot)
static void ExecInitUpdateProjection(ModifyTableState *mtstate, ResultRelInfo *resultRelInfo)
static void ExecCheckTIDVisible(EState *estate, ResultRelInfo *relinfo, ItemPointer tid, TupleTableSlot *tempSlot)
static TM_Result ExecDeleteAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, bool changingPart)
static void ExecCheckTupleVisible(EState *estate, Relation rel, TupleTableSlot *slot)
static TupleTableSlot * ExecUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *oldSlot, TupleTableSlot *slot, bool canSetTag)
void ExecComputeStoredGenerated(ResultRelInfo *resultRelInfo, EState *estate, TupleTableSlot *slot, CmdType cmdtype)
static TupleTableSlot * ExecPrepareTupleRouting(ModifyTableState *mtstate, EState *estate, PartitionTupleRouting *proute, ResultRelInfo *targetRelInfo, TupleTableSlot *slot, ResultRelInfo **partRelInfo)
static TupleTableSlot * ExecGetInsertNewTuple(ResultRelInfo *relinfo, TupleTableSlot *planSlot)
#define MT_NRELS_HASH
static TM_Result ExecUpdateAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot, bool canSetTag, UpdateContext *updateCxt)
static void ExecUpdateEpilogue(ModifyTableContext *context, UpdateContext *updateCxt, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot)
void ExecInitGenerated(ResultRelInfo *resultRelInfo, EState *estate, CmdType cmdtype)
static bool ExecOnConflictLockRow(ModifyTableContext *context, TupleTableSlot *existing, ItemPointer conflictTid, Relation relation, LockTupleMode lockmode, bool isUpdate)
static void fireBSTriggers(ModifyTableState *node)
void ExecReScanModifyTable(ModifyTableState *node)
static TupleTableSlot * ExecDelete(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, bool processReturning, bool changingPart, bool canSetTag, TM_Result *tmresult, bool *tupleDeleted, TupleTableSlot **epqreturnslot)
void ExecEndModifyTable(ModifyTableState *node)
static void fireASTriggers(ModifyTableState *node)
static bool ExecOnConflictUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer conflictTid, TupleTableSlot *excludedSlot, bool canSetTag, TupleTableSlot **returning)
static void ExecBatchInsert(ModifyTableState *mtstate, ResultRelInfo *resultRelInfo, TupleTableSlot **slots, TupleTableSlot **planSlots, int numSlots, EState *estate, bool canSetTag)
static bool ExecUpdatePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot, TM_Result *result)
static void ExecDeleteEpilogue(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, bool changingPart)
static TupleTableSlot * ExecMergeNotMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo, bool canSetTag)
static TupleTableSlot * ExecMerge(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag)
static TupleTableSlot * ExecProcessReturning(ModifyTableContext *context, ResultRelInfo *resultRelInfo, bool isDelete, TupleTableSlot *oldSlot, TupleTableSlot *newSlot, TupleTableSlot *planSlot)
static bool ExecCrossPartitionUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot, bool canSetTag, UpdateContext *updateCxt, TM_Result *tmresult, TupleTableSlot **retry_slot, TupleTableSlot **inserted_tuple, ResultRelInfo **insert_destrel)
static void ExecInitMerge(ModifyTableState *mtstate, EState *estate)
#define IsA(nodeptr, _type_)
Definition nodes.h:164
OnConflictAction
Definition nodes.h:427
@ ONCONFLICT_NONE
Definition nodes.h:428
@ ONCONFLICT_SELECT
Definition nodes.h:431
@ ONCONFLICT_UPDATE
Definition nodes.h:430
@ ONCONFLICT_NOTHING
Definition nodes.h:429
CmdType
Definition nodes.h:273
@ CMD_MERGE
Definition nodes.h:279
@ CMD_INSERT
Definition nodes.h:277
@ CMD_DELETE
Definition nodes.h:278
@ CMD_UPDATE
Definition nodes.h:276
@ CMD_NOTHING
Definition nodes.h:282
#define makeNode(_type_)
Definition nodes.h:161
#define castNode(_type_, nodeptr)
Definition nodes.h:182
static char * errmsg
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition palloc.h:124
WCOKind
@ WCO_RLS_MERGE_UPDATE_CHECK
@ WCO_RLS_CONFLICT_CHECK
@ WCO_RLS_INSERT_CHECK
@ WCO_VIEW_CHECK
@ WCO_RLS_UPDATE_CHECK
@ WCO_RLS_MERGE_DELETE_CHECK
@ RTE_RELATION
FormData_pg_attribute * Form_pg_attribute
#define lfirst(lc)
Definition pg_list.h:172
#define lfirst_node(type, lc)
Definition pg_list.h:176
static int list_length(const List *l)
Definition pg_list.h:152
#define NIL
Definition pg_list.h:68
#define forboth(cell1, list1, cell2, list2)
Definition pg_list.h:550
#define lfirst_int(lc)
Definition pg_list.h:173
#define linitial_int(l)
Definition pg_list.h:179
static void * list_nth(const List *list, int n)
Definition pg_list.h:331
#define linitial(l)
Definition pg_list.h:178
#define list_nth_node(type, list, n)
Definition pg_list.h:359
#define plan(x)
Definition pg_regress.c:164
#define ERRCODE_T_R_SERIALIZATION_FAILURE
Definition pgbench.c:77
#define outerPlan(node)
Definition plannodes.h:267
static Oid DatumGetObjectId(Datum X)
Definition postgres.h:242
uint64_t Datum
Definition postgres.h:70
static Pointer DatumGetPointer(Datum X)
Definition postgres.h:332
static TransactionId DatumGetTransactionId(Datum X)
Definition postgres.h:282
#define InvalidOid
unsigned int Oid
static void test(void)
static int fb(int x)
@ MERGE_WHEN_NOT_MATCHED_BY_TARGET
Definition primnodes.h:2026
@ MERGE_WHEN_NOT_MATCHED_BY_SOURCE
Definition primnodes.h:2025
@ MERGE_WHEN_MATCHED
Definition primnodes.h:2024
#define RelationGetForm(relation)
Definition rel.h:510
#define RelationGetRelid(relation)
Definition rel.h:516
#define RelationGetDescr(relation)
Definition rel.h:542
#define RelationGetRelationName(relation)
Definition rel.h:550
Node * build_column_default(Relation rel, int attrno)
Node * map_variable_attnos(Node *node, int target_varno, int sublevels_up, const AttrMap *attno_map, Oid to_rowtype, bool *found_whole_row)
int RI_FKey_trigger_type(Oid tgfoid)
#define SnapshotAny
Definition snapmgr.h:33
AttrNumber * attnums
Definition attmap.h:36
uint64 es_processed
Definition execnodes.h:750
Bitmapset * es_unpruned_relids
Definition execnodes.h:709
List * es_insert_pending_result_relations
Definition execnodes.h:807
MemoryContext es_query_cxt
Definition execnodes.h:746
List * es_tupleTable
Definition execnodes.h:748
struct EPQState * es_epq_active
Definition execnodes.h:778
CommandId es_output_cid
Definition execnodes.h:718
List * es_insert_pending_modifytables
Definition execnodes.h:808
Snapshot es_snapshot
Definition execnodes.h:696
List * es_auxmodifytables
Definition execnodes.h:763
Snapshot es_crosscheck_snapshot
Definition execnodes.h:697
TupleTableSlot * ecxt_innertuple
Definition execnodes.h:289
TupleTableSlot * ecxt_newtuple
Definition execnodes.h:326
TupleTableSlot * ecxt_scantuple
Definition execnodes.h:287
TupleTableSlot * ecxt_oldtuple
Definition execnodes.h:324
TupleTableSlot * ecxt_outertuple
Definition execnodes.h:291
uint8 flags
Definition execnodes.h:103
BeginForeignModify_function BeginForeignModify
Definition fdwapi.h:235
EndForeignModify_function EndForeignModify
Definition fdwapi.h:241
ExecForeignInsert_function ExecForeignInsert
Definition fdwapi.h:236
ExecForeignUpdate_function ExecForeignUpdate
Definition fdwapi.h:239
ExecForeignBatchInsert_function ExecForeignBatchInsert
Definition fdwapi.h:237
GetForeignModifyBatchSize_function GetForeignModifyBatchSize
Definition fdwapi.h:238
ExecForeignDelete_function ExecForeignDelete
Definition fdwapi.h:240
ParseLoc targetLocation
Definition primnodes.h:2452
TupleTableSlot * fp_Existing
Definition execnodes.h:484
Size keysize
Definition hsearch.h:69
Definition pg_list.h:54
MergeAction * mas_action
Definition execnodes.h:464
ProjectionInfo * mas_proj
Definition execnodes.h:465
ExprState * mas_whenqual
Definition execnodes.h:467
CmdType commandType
Definition primnodes.h:2035
TM_FailureData tmfd
TupleTableSlot * planSlot
TupleTableSlot * cpDeletedSlot
TupleTableSlot * cpUpdateReturningSlot
ModifyTableState * mtstate
List * mt_mergeJoinConditions
Definition execnodes.h:1510
TupleTableSlot * mt_merge_pending_not_matched
Definition execnodes.h:1496
ResultRelInfo * resultRelInfo
Definition execnodes.h:1446
double mt_merge_deleted
Definition execnodes.h:1501
struct PartitionTupleRouting * mt_partition_tuple_routing
Definition execnodes.h:1477
List * mt_updateColnosLists
Definition execnodes.h:1508
double mt_merge_inserted
Definition execnodes.h:1499
TupleTableSlot * mt_root_tuple_slot
Definition execnodes.h:1474
EPQState mt_epqstate
Definition execnodes.h:1456
double mt_merge_updated
Definition execnodes.h:1500
List * mt_mergeActionLists
Definition execnodes.h:1509
HTAB * mt_resultOidHash
Definition execnodes.h:1468
ResultRelInfo * rootResultRelInfo
Definition execnodes.h:1454
struct TransitionCaptureState * mt_transition_capture
Definition execnodes.h:1480
struct TransitionCaptureState * mt_oc_transition_capture
Definition execnodes.h:1483
MergeActionState * mt_merge_action
Definition execnodes.h:1489
List * updateColnosLists
Definition plannodes.h:350
List * arbiterIndexes
Definition plannodes.h:370
List * onConflictCols
Definition plannodes.h:376
List * mergeJoinConditions
Definition plannodes.h:388
CmdType operation
Definition plannodes.h:340
Node * forPortionOf
Definition plannodes.h:380
List * resultRelations
Definition plannodes.h:348
Bitmapset * fdwDirectModifyPlans
Definition plannodes.h:362
List * onConflictSet
Definition plannodes.h:374
List * mergeActionLists
Definition plannodes.h:386
bool canSetTag
Definition plannodes.h:342
List * fdwPrivLists
Definition plannodes.h:360
List * returningLists
Definition plannodes.h:358
List * withCheckOptionLists
Definition plannodes.h:352
LockClauseStrength onConflictLockStrength
Definition plannodes.h:372
Index rootRelation
Definition plannodes.h:346
Node * onConflictWhere
Definition plannodes.h:378
List * rowMarks
Definition plannodes.h:364
OnConflictAction onConflictAction
Definition plannodes.h:368
Definition nodes.h:135
ExprState * oc_WhereClause
Definition execnodes.h:451
ProjectionInfo * oc_ProjInfo
Definition execnodes.h:449
TupleTableSlot * oc_ProjSlot
Definition execnodes.h:448
TupleTableSlot * oc_Existing
Definition execnodes.h:447
LockClauseStrength oc_LockStrength
Definition execnodes.h:450
Plan * plan
Definition execnodes.h:1201
EState * state
Definition execnodes.h:1203
ExprContext * ps_ExprContext
Definition execnodes.h:1242
TupleTableSlot * ps_ResultTupleSlot
Definition execnodes.h:1241
ExecProcNodeMtd ExecProcNode
Definition execnodes.h:1207
List * targetlist
Definition plannodes.h:235
ExprState pi_state
Definition execnodes.h:400
TriggerDesc * trigdesc
Definition rel.h:117
TupleDesc rd_att
Definition rel.h:112
Form_pg_class rd_rel
Definition rel.h:111
OnConflictActionState * ri_onConflict
Definition execnodes.h:616
TupleTableSlot * ri_PartitionTupleSlot
Definition execnodes.h:655
bool ri_projectNewInfoValid
Definition execnodes.h:542
List * ri_onConflictArbiterIndexes
Definition execnodes.h:613
struct ResultRelInfo * ri_RootResultRelInfo
Definition execnodes.h:654
TupleTableSlot ** ri_Slots
Definition execnodes.h:578
ExprState * ri_MergeJoinCondition
Definition execnodes.h:622
bool ri_needLockTagTuple
Definition execnodes.h:545
Relation ri_RelationDesc
Definition execnodes.h:513
RelationPtr ri_IndexRelationDescs
Definition execnodes.h:519
int ri_NumSlotsInitialized
Definition execnodes.h:576
List * ri_WithCheckOptions
Definition execnodes.h:582
TupleTableSlot * ri_oldTupleSlot
Definition execnodes.h:540
bool ri_extraUpdatedCols_valid
Definition execnodes.h:533
TriggerDesc * ri_TrigDesc
Definition execnodes.h:548
ForPortionOfState * ri_forPortionOf
Definition execnodes.h:625
Bitmapset * ri_extraUpdatedCols
Definition execnodes.h:531
Index ri_RangeTableIndex
Definition execnodes.h:510
ExprState ** ri_GeneratedExprsI
Definition execnodes.h:599
int ri_NumGeneratedNeededU
Definition execnodes.h:604
List * ri_MergeActions[NUM_MERGE_MATCH_KINDS]
Definition execnodes.h:619
TupleTableSlot * ri_newTupleSlot
Definition execnodes.h:538
List * ri_WithCheckOptionExprs
Definition execnodes.h:585
ProjectionInfo * ri_projectNew
Definition execnodes.h:536
ProjectionInfo * ri_projectReturning
Definition execnodes.h:610
ExprState ** ri_GeneratedExprsU
Definition execnodes.h:600
struct FdwRoutine * ri_FdwRoutine
Definition execnodes.h:566
List * ri_returningList
Definition execnodes.h:607
TupleTableSlot ** ri_PlanSlots
Definition execnodes.h:579
bool ri_usesFdwDirectModify
Definition execnodes.h:572
AttrNumber ri_RowIdAttNo
Definition execnodes.h:528
int ri_NumGeneratedNeededI
Definition execnodes.h:603
NodeTag type
Definition execnodes.h:368
SetFunctionReturnMode returnMode
Definition execnodes.h:374
ExprContext * econtext
Definition execnodes.h:370
TupleDesc setDesc
Definition execnodes.h:378
Tuplestorestate * setResult
Definition execnodes.h:377
TupleDesc expectedDesc
Definition execnodes.h:371
ExprDoneCond isDone
Definition execnodes.h:375
TransactionId xmax
Definition tableam.h:172
CommandId cmax
Definition tableam.h:173
TupleTableSlot * tcs_original_insert_tuple
Definition trigger.h:76
int numtriggers
Definition reltrigger.h:50
bool trig_delete_before_row
Definition reltrigger.h:66
bool trig_update_instead_row
Definition reltrigger.h:63
Trigger * triggers
Definition reltrigger.h:49
bool trig_delete_instead_row
Definition reltrigger.h:68
bool trig_update_after_row
Definition reltrigger.h:62
bool trig_insert_instead_row
Definition reltrigger.h:58
bool trig_update_before_row
Definition reltrigger.h:61
bool trig_insert_before_row
Definition reltrigger.h:56
bool has_generated_virtual
Definition tupdesc.h:47
bool has_generated_stored
Definition tupdesc.h:46
AttrMap * attrMap
Definition tupconvert.h:28
TupleConstr * constr
Definition tupdesc.h:159
TupleDesc tts_tupleDescriptor
Definition tuptable.h:129
const TupleTableSlotOps *const tts_ops
Definition tuptable.h:127
bool * tts_isnull
Definition tuptable.h:133
ItemPointerData tts_tid
Definition tuptable.h:142
Datum * tts_values
Definition tuptable.h:131
TU_UpdateIndexes updateIndexes
LockTupleMode lockmode
AttrNumber varattno
Definition primnodes.h:275
#define MinTransactionIdAttributeNumber
Definition sysattr.h:22
#define FirstLowInvalidHeapAttributeNumber
Definition sysattr.h:27
TupleTableSlot * table_slot_create(Relation relation, List **reglist)
Definition tableam.c:92
TU_UpdateIndexes
Definition tableam.h:133
@ TU_Summarizing
Definition tableam.h:141
@ TU_None
Definition tableam.h:135
TM_Result
Definition tableam.h:95
@ TM_Ok
Definition tableam.h:100
@ TM_BeingModified
Definition tableam.h:122
@ TM_Deleted
Definition tableam.h:115
@ TM_WouldBlock
Definition tableam.h:125
@ TM_Updated
Definition tableam.h:112
@ TM_SelfModified
Definition tableam.h:106
@ TM_Invisible
Definition tableam.h:103
static TM_Result table_tuple_lock(Relation rel, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, uint8 flags, TM_FailureData *tmfd)
Definition tableam.h:1648
static void table_tuple_insert_speculative(Relation rel, TupleTableSlot *slot, CommandId cid, uint32 options, BulkInsertStateData *bistate, uint32 specToken)
Definition tableam.h:1477
static void table_tuple_complete_speculative(Relation rel, TupleTableSlot *slot, uint32 specToken, bool succeeded)
Definition tableam.h:1491
#define TUPLE_LOCK_FLAG_FIND_LAST_VERSION
Definition tableam.h:299
#define TABLE_DELETE_CHANGING_PARTITION
Definition tableam.h:289
static void table_tuple_insert(Relation rel, TupleTableSlot *slot, CommandId cid, uint32 options, BulkInsertStateData *bistate)
Definition tableam.h:1458
static TM_Result table_tuple_update(Relation rel, ItemPointer otid, TupleTableSlot *slot, CommandId cid, uint32 options, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode, TU_UpdateIndexes *update_indexes)
Definition tableam.h:1600
static TM_Result table_tuple_delete(Relation rel, ItemPointer tid, CommandId cid, uint32 options, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd)
Definition tableam.h:1549
static bool table_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot, Snapshot snapshot)
Definition tableam.h:1391
static bool table_tuple_fetch_row_version(Relation rel, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot)
Definition tableam.h:1344
bool ExecBRUpdateTriggers(EState *estate, EPQState *epqstate, ResultRelInfo *relinfo, ItemPointer tupleid, HeapTuple fdw_trigtuple, TupleTableSlot *newslot, TM_Result *tmresult, TM_FailureData *tmfd, bool is_merge_update)
Definition trigger.c:2973
TransitionCaptureState * MakeTransitionCaptureState(TriggerDesc *trigdesc, Oid relid, CmdType cmdType)
Definition trigger.c:4976
void ExecARDeleteTriggers(EState *estate, ResultRelInfo *relinfo, ItemPointer tupleid, HeapTuple fdw_trigtuple, TransitionCaptureState *transition_capture, bool is_crosspart_update)
Definition trigger.c:2803
void ExecBSInsertTriggers(EState *estate, ResultRelInfo *relinfo)
Definition trigger.c:2403
bool ExecBRInsertTriggers(EState *estate, ResultRelInfo *relinfo, TupleTableSlot *slot)
Definition trigger.c:2467
bool ExecIRDeleteTriggers(EState *estate, ResultRelInfo *relinfo, HeapTuple trigtuple)
Definition trigger.c:2850
void ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
Definition trigger.c:2632
bool ExecIRInsertTriggers(EState *estate, ResultRelInfo *relinfo, TupleTableSlot *slot)
Definition trigger.c:2571
void ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo, ResultRelInfo *src_partinfo, ResultRelInfo *dst_partinfo, ItemPointer tupleid, HeapTuple fdw_trigtuple, TupleTableSlot *newslot, List *recheckIndexes, TransitionCaptureState *transition_capture, bool is_crosspart_update)
Definition trigger.c:3146
bool ExecBRDeleteTriggers(EState *estate, EPQState *epqstate, ResultRelInfo *relinfo, ItemPointer tupleid, HeapTuple fdw_trigtuple, TupleTableSlot **epqslot, TM_Result *tmresult, TM_FailureData *tmfd, bool is_merge_delete)
Definition trigger.c:2703
void ExecASUpdateTriggers(EState *estate, ResultRelInfo *relinfo, TransitionCaptureState *transition_capture)
Definition trigger.c:2955
void ExecASDeleteTriggers(EState *estate, ResultRelInfo *relinfo, TransitionCaptureState *transition_capture)
Definition trigger.c:2683
void ExecARInsertTriggers(EState *estate, ResultRelInfo *relinfo, TupleTableSlot *slot, List *recheckIndexes, TransitionCaptureState *transition_capture)
Definition trigger.c:2545
void ExecASInsertTriggers(EState *estate, ResultRelInfo *relinfo, TransitionCaptureState *transition_capture)
Definition trigger.c:2454
bool ExecIRUpdateTriggers(EState *estate, ResultRelInfo *relinfo, HeapTuple trigtuple, TupleTableSlot *newslot)
Definition trigger.c:3216
void AfterTriggerEndQuery(EState *estate)
Definition trigger.c:5156
void ExecBSUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
Definition trigger.c:2897
void AfterTriggerBeginQuery(void)
Definition trigger.c:5136
#define RI_TRIGGER_PK
Definition trigger.h:286
TupleTableSlot * execute_attr_map_slot(AttrMap *attrMap, TupleTableSlot *in_slot, TupleTableSlot *out_slot)
Definition tupconvert.c:193
TupleDesc CreateTupleDescCopy(TupleDesc tupdesc)
Definition tupdesc.c:242
static FormData_pg_attribute * TupleDescAttr(TupleDesc tupdesc, int i)
Definition tupdesc.h:178
static CompactAttribute * TupleDescCompactAttr(TupleDesc tupdesc, int i)
Definition tupdesc.h:195
#define TTS_EMPTY(slot)
Definition tuptable.h:92
static Datum slot_getsysattr(TupleTableSlot *slot, int attnum, bool *isnull)
Definition tuptable.h:438
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition tuptable.h:476
#define TupIsNull(slot)
Definition tuptable.h:325
static void slot_getallattrs(TupleTableSlot *slot)
Definition tuptable.h:390
static TupleTableSlot * ExecCopySlot(TupleTableSlot *dstslot, TupleTableSlot *srcslot)
Definition tuptable.h:543
static void ExecMaterializeSlot(TupleTableSlot *slot)
Definition tuptable.h:494
TypeCacheEntry * lookup_type_cache(Oid type_id, int flags)
Definition typcache.c:389
void pull_varattnos(Node *node, Index varno, Bitmapset **varattnos)
Definition var.c:296
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition xact.c:943
TransactionId GetCurrentTransactionId(void)
Definition xact.c:456
#define IsolationUsesXactSnapshot()
Definition xact.h:52