PostgreSQL Source Code git master
nodeModifyTable.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * nodeModifyTable.c
4 * routines to handle ModifyTable nodes.
5 *
6 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/executor/nodeModifyTable.c
12 *
13 *-------------------------------------------------------------------------
14 */
15/* INTERFACE ROUTINES
16 * ExecInitModifyTable - initialize the ModifyTable node
17 * ExecModifyTable - retrieve the next tuple from the node
18 * ExecEndModifyTable - shut down the ModifyTable node
19 * ExecReScanModifyTable - rescan the ModifyTable node
20 *
21 * NOTES
22 * The ModifyTable node receives input from its outerPlan, which is
23 * the data to insert for INSERT cases, the changed columns' new
24 * values plus row-locating info for UPDATE and MERGE cases, or just the
25 * row-locating info for DELETE cases.
26 *
27 * The relation to modify can be an ordinary table, a foreign table, or a
28 * view. If it's a view, either it has sufficient INSTEAD OF triggers or
29 * this node executes only MERGE ... DO NOTHING. If the original MERGE
30 * targeted a view not in one of those two categories, earlier processing
31 * already pointed the ModifyTable result relation to an underlying
32 * relation of that other view. This node does process
33 * ri_WithCheckOptions, which may have expressions from those other,
34 * automatically updatable views.
35 *
36 * MERGE runs a join between the source relation and the target table.
37 * If any WHEN NOT MATCHED [BY TARGET] clauses are present, then the join
38 * is an outer join that might output tuples without a matching target
39 * tuple. In this case, any unmatched target tuples will have NULL
40 * row-locating info, and only INSERT can be run. But for matched target
41 * tuples, the row-locating info is used to determine the tuple to UPDATE
42 * or DELETE. When all clauses are WHEN MATCHED or WHEN NOT MATCHED BY
43 * SOURCE, all tuples produced by the join will include a matching target
44 * tuple, so all tuples contain row-locating info.
45 *
46 * If the query specifies RETURNING, then the ModifyTable returns a
47 * RETURNING tuple after completing each row insert, update, or delete.
48 * It must be called again to continue the operation. Without RETURNING,
49 * we just loop within the node until all the work is done, then
50 * return NULL. This avoids useless call/return overhead.
51 */
52
53#include "postgres.h"
54
55#include "access/htup_details.h"
56#include "access/tableam.h"
57#include "access/xact.h"
58#include "commands/trigger.h"
60#include "executor/executor.h"
62#include "foreign/fdwapi.h"
63#include "miscadmin.h"
64#include "nodes/nodeFuncs.h"
65#include "optimizer/optimizer.h"
67#include "storage/lmgr.h"
68#include "utils/builtins.h"
69#include "utils/datum.h"
70#include "utils/rel.h"
71#include "utils/snapmgr.h"
72
73
74typedef struct MTTargetRelLookup
75{
76 Oid relationOid; /* hash key, must be first */
77 int relationIndex; /* rel's index in resultRelInfo[] array */
79
80/*
81 * Context struct for a ModifyTable operation, containing basic execution
82 * state and some output variables populated by ExecUpdateAct() and
83 * ExecDeleteAct() to report the result of their actions to callers.
84 */
85typedef struct ModifyTableContext
86{
87 /* Operation state */
91
92 /*
93 * Slot containing tuple obtained from ModifyTable's subplan. Used to
94 * access "junk" columns that are not going to be stored.
95 */
97
98 /*
99 * Information about the changes that were made concurrently to a tuple
100 * being updated or deleted
101 */
103
104 /*
105 * The tuple deleted when doing a cross-partition UPDATE with a RETURNING
106 * clause that refers to OLD columns (converted to the root's tuple
107 * descriptor).
108 */
110
111 /*
112 * The tuple projected by the INSERT's RETURNING clause, when doing a
113 * cross-partition UPDATE
114 */
117
118/*
119 * Context struct containing output data specific to UPDATE operations.
120 */
121typedef struct UpdateContext
122{
123 bool crossPartUpdate; /* was it a cross-partition update? */
124 TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
125
126 /*
127 * Lock mode to acquire on the latest tuple version before performing
128 * EvalPlanQual on it
129 */
132
133
134static void ExecBatchInsert(ModifyTableState *mtstate,
135 ResultRelInfo *resultRelInfo,
136 TupleTableSlot **slots,
137 TupleTableSlot **planSlots,
138 int numSlots,
139 EState *estate,
140 bool canSetTag);
141static void ExecPendingInserts(EState *estate);
143 ResultRelInfo *sourcePartInfo,
144 ResultRelInfo *destPartInfo,
145 ItemPointer tupleid,
146 TupleTableSlot *oldslot,
147 TupleTableSlot *newslot);
148static bool ExecOnConflictUpdate(ModifyTableContext *context,
149 ResultRelInfo *resultRelInfo,
150 ItemPointer conflictTid,
151 TupleTableSlot *excludedSlot,
152 bool canSetTag,
153 TupleTableSlot **returning);
155 EState *estate,
156 PartitionTupleRouting *proute,
157 ResultRelInfo *targetRelInfo,
158 TupleTableSlot *slot,
159 ResultRelInfo **partRelInfo);
160
162 ResultRelInfo *resultRelInfo,
163 ItemPointer tupleid,
164 HeapTuple oldtuple,
165 bool canSetTag);
166static void ExecInitMerge(ModifyTableState *mtstate, EState *estate);
168 ResultRelInfo *resultRelInfo,
169 ItemPointer tupleid,
170 HeapTuple oldtuple,
171 bool canSetTag,
172 bool *matched);
174 ResultRelInfo *resultRelInfo,
175 bool canSetTag);
176
177
178/*
179 * Verify that the tuples to be produced by INSERT match the
180 * target relation's rowtype
181 *
182 * We do this to guard against stale plans. If plan invalidation is
183 * functioning properly then we should never get a failure here, but better
184 * safe than sorry. Note that this is called after we have obtained lock
185 * on the target rel, so the rowtype can't change underneath us.
186 *
187 * The plan output is represented by its targetlist, because that makes
188 * handling the dropped-column case easier.
189 *
190 * We used to use this for UPDATE as well, but now the equivalent checks
191 * are done in ExecBuildUpdateProjection.
192 */
193static void
194ExecCheckPlanOutput(Relation resultRel, List *targetList)
195{
196 TupleDesc resultDesc = RelationGetDescr(resultRel);
197 int attno = 0;
198 ListCell *lc;
199
200 foreach(lc, targetList)
201 {
202 TargetEntry *tle = (TargetEntry *) lfirst(lc);
204
205 Assert(!tle->resjunk); /* caller removed junk items already */
206
207 if (attno >= resultDesc->natts)
209 (errcode(ERRCODE_DATATYPE_MISMATCH),
210 errmsg("table row type and query-specified row type do not match"),
211 errdetail("Query has too many columns.")));
212 attr = TupleDescAttr(resultDesc, attno);
213 attno++;
214
215 if (!attr->attisdropped)
216 {
217 /* Normal case: demand type match */
218 if (exprType((Node *) tle->expr) != attr->atttypid)
220 (errcode(ERRCODE_DATATYPE_MISMATCH),
221 errmsg("table row type and query-specified row type do not match"),
222 errdetail("Table has type %s at ordinal position %d, but query expects %s.",
223 format_type_be(attr->atttypid),
224 attno,
225 format_type_be(exprType((Node *) tle->expr)))));
226 }
227 else
228 {
229 /*
230 * For a dropped column, we can't check atttypid (it's likely 0).
231 * In any case the planner has most likely inserted an INT4 null.
232 * What we insist on is just *some* NULL constant.
233 */
234 if (!IsA(tle->expr, Const) ||
235 !((Const *) tle->expr)->constisnull)
237 (errcode(ERRCODE_DATATYPE_MISMATCH),
238 errmsg("table row type and query-specified row type do not match"),
239 errdetail("Query provides a value for a dropped column at ordinal position %d.",
240 attno)));
241 }
242 }
243 if (attno != resultDesc->natts)
245 (errcode(ERRCODE_DATATYPE_MISMATCH),
246 errmsg("table row type and query-specified row type do not match"),
247 errdetail("Query has too few columns.")));
248}
249
250/*
251 * ExecProcessReturning --- evaluate a RETURNING list
252 *
253 * context: context for the ModifyTable operation
254 * resultRelInfo: current result rel
255 * cmdType: operation/merge action performed (INSERT, UPDATE, or DELETE)
256 * oldSlot: slot holding old tuple deleted or updated
257 * newSlot: slot holding new tuple inserted or updated
258 * planSlot: slot holding tuple returned by top subplan node
259 *
260 * Note: If oldSlot and newSlot are NULL, the FDW should have already provided
261 * econtext's scan tuple and its old & new tuples are not needed (FDW direct-
262 * modify is disabled if the RETURNING list refers to any OLD/NEW values).
263 *
264 * Returns a slot holding the result tuple
265 */
266static TupleTableSlot *
268 ResultRelInfo *resultRelInfo,
269 CmdType cmdType,
270 TupleTableSlot *oldSlot,
271 TupleTableSlot *newSlot,
272 TupleTableSlot *planSlot)
273{
274 EState *estate = context->estate;
275 ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning;
276 ExprContext *econtext = projectReturning->pi_exprContext;
277
278 /* Make tuple and any needed join variables available to ExecProject */
279 switch (cmdType)
280 {
281 case CMD_INSERT:
282 case CMD_UPDATE:
283 /* return new tuple by default */
284 if (newSlot)
285 econtext->ecxt_scantuple = newSlot;
286 break;
287
288 case CMD_DELETE:
289 /* return old tuple by default */
290 if (oldSlot)
291 econtext->ecxt_scantuple = oldSlot;
292 break;
293
294 default:
295 elog(ERROR, "unrecognized commandType: %d", (int) cmdType);
296 }
297 econtext->ecxt_outertuple = planSlot;
298
299 /* Make old/new tuples available to ExecProject, if required */
300 if (oldSlot)
301 econtext->ecxt_oldtuple = oldSlot;
302 else if (projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD)
303 econtext->ecxt_oldtuple = ExecGetAllNullSlot(estate, resultRelInfo);
304 else
305 econtext->ecxt_oldtuple = NULL; /* No references to OLD columns */
306
307 if (newSlot)
308 econtext->ecxt_newtuple = newSlot;
309 else if (projectReturning->pi_state.flags & EEO_FLAG_HAS_NEW)
310 econtext->ecxt_newtuple = ExecGetAllNullSlot(estate, resultRelInfo);
311 else
312 econtext->ecxt_newtuple = NULL; /* No references to NEW columns */
313
314 /*
315 * Tell ExecProject whether or not the OLD/NEW rows actually exist. This
316 * information is required to evaluate ReturningExpr nodes and also in
317 * ExecEvalSysVar() and ExecEvalWholeRowVar().
318 */
319 if (oldSlot == NULL)
320 projectReturning->pi_state.flags |= EEO_FLAG_OLD_IS_NULL;
321 else
322 projectReturning->pi_state.flags &= ~EEO_FLAG_OLD_IS_NULL;
323
324 if (newSlot == NULL)
325 projectReturning->pi_state.flags |= EEO_FLAG_NEW_IS_NULL;
326 else
327 projectReturning->pi_state.flags &= ~EEO_FLAG_NEW_IS_NULL;
328
329 /* Compute the RETURNING expressions */
330 return ExecProject(projectReturning);
331}
332
333/*
334 * ExecCheckTupleVisible -- verify tuple is visible
335 *
336 * It would not be consistent with guarantees of the higher isolation levels to
337 * proceed with avoiding insertion (taking speculative insertion's alternative
338 * path) on the basis of another tuple that is not visible to MVCC snapshot.
339 * Check for the need to raise a serialization failure, and do so as necessary.
340 */
341static void
343 Relation rel,
344 TupleTableSlot *slot)
345{
347 return;
348
349 if (!table_tuple_satisfies_snapshot(rel, slot, estate->es_snapshot))
350 {
351 Datum xminDatum;
352 TransactionId xmin;
353 bool isnull;
354
355 xminDatum = slot_getsysattr(slot, MinTransactionIdAttributeNumber, &isnull);
356 Assert(!isnull);
357 xmin = DatumGetTransactionId(xminDatum);
358
359 /*
360 * We should not raise a serialization failure if the conflict is
361 * against a tuple inserted by our own transaction, even if it's not
362 * visible to our snapshot. (This would happen, for example, if
363 * conflicting keys are proposed for insertion in a single command.)
364 */
368 errmsg("could not serialize access due to concurrent update")));
369 }
370}
371
372/*
373 * ExecCheckTIDVisible -- convenience variant of ExecCheckTupleVisible()
374 */
375static void
377 ResultRelInfo *relinfo,
378 ItemPointer tid,
379 TupleTableSlot *tempSlot)
380{
381 Relation rel = relinfo->ri_RelationDesc;
382
383 /* Redundantly check isolation level */
385 return;
386
387 if (!table_tuple_fetch_row_version(rel, tid, SnapshotAny, tempSlot))
388 elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
389 ExecCheckTupleVisible(estate, rel, tempSlot);
390 ExecClearTuple(tempSlot);
391}
392
393/*
394 * Initialize to compute stored generated columns for a tuple
395 *
396 * This fills the resultRelInfo's ri_GeneratedExprsI/ri_NumGeneratedNeededI
397 * or ri_GeneratedExprsU/ri_NumGeneratedNeededU fields, depending on cmdtype.
398 * If cmdType == CMD_UPDATE, the ri_extraUpdatedCols field is filled too.
399 *
400 * Note: usually, a given query would need only one of ri_GeneratedExprsI and
401 * ri_GeneratedExprsU per result rel; but MERGE can need both, and so can
402 * cross-partition UPDATEs, since a partition might be the target of both
403 * UPDATE and INSERT actions.
404 */
405void
407 EState *estate,
408 CmdType cmdtype)
409{
410 Relation rel = resultRelInfo->ri_RelationDesc;
411 TupleDesc tupdesc = RelationGetDescr(rel);
412 int natts = tupdesc->natts;
413 ExprState **ri_GeneratedExprs;
414 int ri_NumGeneratedNeeded;
415 Bitmapset *updatedCols;
416 MemoryContext oldContext;
417
418 /* Nothing to do if no generated columns */
419 if (!(tupdesc->constr && tupdesc->constr->has_generated_stored))
420 return;
421
422 /*
423 * In an UPDATE, we can skip computing any generated columns that do not
424 * depend on any UPDATE target column. But if there is a BEFORE ROW
425 * UPDATE trigger, we cannot skip because the trigger might change more
426 * columns.
427 */
428 if (cmdtype == CMD_UPDATE &&
430 updatedCols = ExecGetUpdatedCols(resultRelInfo, estate);
431 else
432 updatedCols = NULL;
433
434 /*
435 * Make sure these data structures are built in the per-query memory
436 * context so they'll survive throughout the query.
437 */
438 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
439
440 ri_GeneratedExprs = (ExprState **) palloc0(natts * sizeof(ExprState *));
441 ri_NumGeneratedNeeded = 0;
442
443 for (int i = 0; i < natts; i++)
444 {
445 if (TupleDescAttr(tupdesc, i)->attgenerated == ATTRIBUTE_GENERATED_STORED)
446 {
447 Expr *expr;
448
449 /* Fetch the GENERATED AS expression tree */
450 expr = (Expr *) build_column_default(rel, i + 1);
451 if (expr == NULL)
452 elog(ERROR, "no generation expression found for column number %d of table \"%s\"",
453 i + 1, RelationGetRelationName(rel));
454
455 /*
456 * If it's an update with a known set of update target columns,
457 * see if we can skip the computation.
458 */
459 if (updatedCols)
460 {
461 Bitmapset *attrs_used = NULL;
462
463 pull_varattnos((Node *) expr, 1, &attrs_used);
464
465 if (!bms_overlap(updatedCols, attrs_used))
466 continue; /* need not update this column */
467 }
468
469 /* No luck, so prepare the expression for execution */
470 ri_GeneratedExprs[i] = ExecPrepareExpr(expr, estate);
471 ri_NumGeneratedNeeded++;
472
473 /* If UPDATE, mark column in resultRelInfo->ri_extraUpdatedCols */
474 if (cmdtype == CMD_UPDATE)
475 resultRelInfo->ri_extraUpdatedCols =
476 bms_add_member(resultRelInfo->ri_extraUpdatedCols,
478 }
479 }
480
481 /* Save in appropriate set of fields */
482 if (cmdtype == CMD_UPDATE)
483 {
484 /* Don't call twice */
485 Assert(resultRelInfo->ri_GeneratedExprsU == NULL);
486
487 resultRelInfo->ri_GeneratedExprsU = ri_GeneratedExprs;
488 resultRelInfo->ri_NumGeneratedNeededU = ri_NumGeneratedNeeded;
489 }
490 else
491 {
492 /* Don't call twice */
493 Assert(resultRelInfo->ri_GeneratedExprsI == NULL);
494
495 resultRelInfo->ri_GeneratedExprsI = ri_GeneratedExprs;
496 resultRelInfo->ri_NumGeneratedNeededI = ri_NumGeneratedNeeded;
497 }
498
499 MemoryContextSwitchTo(oldContext);
500}
501
502/*
503 * Compute stored generated columns for a tuple
504 */
505void
507 EState *estate, TupleTableSlot *slot,
508 CmdType cmdtype)
509{
510 Relation rel = resultRelInfo->ri_RelationDesc;
511 TupleDesc tupdesc = RelationGetDescr(rel);
512 int natts = tupdesc->natts;
513 ExprContext *econtext = GetPerTupleExprContext(estate);
514 ExprState **ri_GeneratedExprs;
515 MemoryContext oldContext;
516 Datum *values;
517 bool *nulls;
518
519 /* We should not be called unless this is true */
520 Assert(tupdesc->constr && tupdesc->constr->has_generated_stored);
521
522 /*
523 * Initialize the expressions if we didn't already, and check whether we
524 * can exit early because nothing needs to be computed.
525 */
526 if (cmdtype == CMD_UPDATE)
527 {
528 if (resultRelInfo->ri_GeneratedExprsU == NULL)
529 ExecInitStoredGenerated(resultRelInfo, estate, cmdtype);
530 if (resultRelInfo->ri_NumGeneratedNeededU == 0)
531 return;
532 ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsU;
533 }
534 else
535 {
536 if (resultRelInfo->ri_GeneratedExprsI == NULL)
537 ExecInitStoredGenerated(resultRelInfo, estate, cmdtype);
538 /* Early exit is impossible given the prior Assert */
539 Assert(resultRelInfo->ri_NumGeneratedNeededI > 0);
540 ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsI;
541 }
542
544
545 values = palloc(sizeof(*values) * natts);
546 nulls = palloc(sizeof(*nulls) * natts);
547
548 slot_getallattrs(slot);
549 memcpy(nulls, slot->tts_isnull, sizeof(*nulls) * natts);
550
551 for (int i = 0; i < natts; i++)
552 {
553 CompactAttribute *attr = TupleDescCompactAttr(tupdesc, i);
554
555 if (ri_GeneratedExprs[i])
556 {
557 Datum val;
558 bool isnull;
559
560 Assert(TupleDescAttr(tupdesc, i)->attgenerated == ATTRIBUTE_GENERATED_STORED);
561
562 econtext->ecxt_scantuple = slot;
563
564 val = ExecEvalExpr(ri_GeneratedExprs[i], econtext, &isnull);
565
566 /*
567 * We must make a copy of val as we have no guarantees about where
568 * memory for a pass-by-reference Datum is located.
569 */
570 if (!isnull)
571 val = datumCopy(val, attr->attbyval, attr->attlen);
572
573 values[i] = val;
574 nulls[i] = isnull;
575 }
576 else
577 {
578 if (!nulls[i])
579 values[i] = datumCopy(slot->tts_values[i], attr->attbyval, attr->attlen);
580 }
581 }
582
583 ExecClearTuple(slot);
584 memcpy(slot->tts_values, values, sizeof(*values) * natts);
585 memcpy(slot->tts_isnull, nulls, sizeof(*nulls) * natts);
588
589 MemoryContextSwitchTo(oldContext);
590}
591
592/*
593 * ExecInitInsertProjection
594 * Do one-time initialization of projection data for INSERT tuples.
595 *
596 * INSERT queries may need a projection to filter out junk attrs in the tlist.
597 *
598 * This is also a convenient place to verify that the
599 * output of an INSERT matches the target table.
600 */
601static void
603 ResultRelInfo *resultRelInfo)
604{
605 ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
606 Plan *subplan = outerPlan(node);
607 EState *estate = mtstate->ps.state;
608 List *insertTargetList = NIL;
609 bool need_projection = false;
610 ListCell *l;
611
612 /* Extract non-junk columns of the subplan's result tlist. */
613 foreach(l, subplan->targetlist)
614 {
615 TargetEntry *tle = (TargetEntry *) lfirst(l);
616
617 if (!tle->resjunk)
618 insertTargetList = lappend(insertTargetList, tle);
619 else
620 need_projection = true;
621 }
622
623 /*
624 * The junk-free list must produce a tuple suitable for the result
625 * relation.
626 */
627 ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc, insertTargetList);
628
629 /* We'll need a slot matching the table's format. */
630 resultRelInfo->ri_newTupleSlot =
631 table_slot_create(resultRelInfo->ri_RelationDesc,
632 &estate->es_tupleTable);
633
634 /* Build ProjectionInfo if needed (it probably isn't). */
635 if (need_projection)
636 {
637 TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
638
639 /* need an expression context to do the projection */
640 if (mtstate->ps.ps_ExprContext == NULL)
641 ExecAssignExprContext(estate, &mtstate->ps);
642
643 resultRelInfo->ri_projectNew =
644 ExecBuildProjectionInfo(insertTargetList,
645 mtstate->ps.ps_ExprContext,
646 resultRelInfo->ri_newTupleSlot,
647 &mtstate->ps,
648 relDesc);
649 }
650
651 resultRelInfo->ri_projectNewInfoValid = true;
652}
653
654/*
655 * ExecInitUpdateProjection
656 * Do one-time initialization of projection data for UPDATE tuples.
657 *
658 * UPDATE always needs a projection, because (1) there's always some junk
659 * attrs, and (2) we may need to merge values of not-updated columns from
660 * the old tuple into the final tuple. In UPDATE, the tuple arriving from
661 * the subplan contains only new values for the changed columns, plus row
662 * identity info in the junk attrs.
663 *
664 * This is "one-time" for any given result rel, but we might touch more than
665 * one result rel in the course of an inherited UPDATE, and each one needs
666 * its own projection due to possible column order variation.
667 *
668 * This is also a convenient place to verify that the output of an UPDATE
669 * matches the target table (ExecBuildUpdateProjection does that).
670 */
671static void
673 ResultRelInfo *resultRelInfo)
674{
675 ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
676 Plan *subplan = outerPlan(node);
677 EState *estate = mtstate->ps.state;
678 TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
679 int whichrel;
680 List *updateColnos;
681
682 /*
683 * Usually, mt_lastResultIndex matches the target rel. If it happens not
684 * to, we can get the index the hard way with an integer division.
685 */
686 whichrel = mtstate->mt_lastResultIndex;
687 if (resultRelInfo != mtstate->resultRelInfo + whichrel)
688 {
689 whichrel = resultRelInfo - mtstate->resultRelInfo;
690 Assert(whichrel >= 0 && whichrel < mtstate->mt_nrels);
691 }
692
693 updateColnos = (List *) list_nth(node->updateColnosLists, whichrel);
694
695 /*
696 * For UPDATE, we use the old tuple to fill up missing values in the tuple
697 * produced by the subplan to get the new tuple. We need two slots, both
698 * matching the table's desired format.
699 */
700 resultRelInfo->ri_oldTupleSlot =
701 table_slot_create(resultRelInfo->ri_RelationDesc,
702 &estate->es_tupleTable);
703 resultRelInfo->ri_newTupleSlot =
704 table_slot_create(resultRelInfo->ri_RelationDesc,
705 &estate->es_tupleTable);
706
707 /* need an expression context to do the projection */
708 if (mtstate->ps.ps_ExprContext == NULL)
709 ExecAssignExprContext(estate, &mtstate->ps);
710
711 resultRelInfo->ri_projectNew =
712 ExecBuildUpdateProjection(subplan->targetlist,
713 false, /* subplan did the evaluation */
714 updateColnos,
715 relDesc,
716 mtstate->ps.ps_ExprContext,
717 resultRelInfo->ri_newTupleSlot,
718 &mtstate->ps);
719
720 resultRelInfo->ri_projectNewInfoValid = true;
721}
722
723/*
724 * ExecGetInsertNewTuple
725 * This prepares a "new" tuple ready to be inserted into given result
726 * relation, by removing any junk columns of the plan's output tuple
727 * and (if necessary) coercing the tuple to the right tuple format.
728 */
729static TupleTableSlot *
731 TupleTableSlot *planSlot)
732{
733 ProjectionInfo *newProj = relinfo->ri_projectNew;
734 ExprContext *econtext;
735
736 /*
737 * If there's no projection to be done, just make sure the slot is of the
738 * right type for the target rel. If the planSlot is the right type we
739 * can use it as-is, else copy the data into ri_newTupleSlot.
740 */
741 if (newProj == NULL)
742 {
743 if (relinfo->ri_newTupleSlot->tts_ops != planSlot->tts_ops)
744 {
745 ExecCopySlot(relinfo->ri_newTupleSlot, planSlot);
746 return relinfo->ri_newTupleSlot;
747 }
748 else
749 return planSlot;
750 }
751
752 /*
753 * Else project; since the projection output slot is ri_newTupleSlot, this
754 * will also fix any slot-type problem.
755 *
756 * Note: currently, this is dead code, because INSERT cases don't receive
757 * any junk columns so there's never a projection to be done.
758 */
759 econtext = newProj->pi_exprContext;
760 econtext->ecxt_outertuple = planSlot;
761 return ExecProject(newProj);
762}
763
764/*
765 * ExecGetUpdateNewTuple
766 * This prepares a "new" tuple by combining an UPDATE subplan's output
767 * tuple (which contains values of changed columns) with unchanged
768 * columns taken from the old tuple.
769 *
770 * The subplan tuple might also contain junk columns, which are ignored.
771 * Note that the projection also ensures we have a slot of the right type.
772 */
775 TupleTableSlot *planSlot,
776 TupleTableSlot *oldSlot)
777{
778 ProjectionInfo *newProj = relinfo->ri_projectNew;
779 ExprContext *econtext;
780
781 /* Use a few extra Asserts to protect against outside callers */
783 Assert(planSlot != NULL && !TTS_EMPTY(planSlot));
784 Assert(oldSlot != NULL && !TTS_EMPTY(oldSlot));
785
786 econtext = newProj->pi_exprContext;
787 econtext->ecxt_outertuple = planSlot;
788 econtext->ecxt_scantuple = oldSlot;
789 return ExecProject(newProj);
790}
791
792/* ----------------------------------------------------------------
793 * ExecInsert
794 *
795 * For INSERT, we have to insert the tuple into the target relation
796 * (or partition thereof) and insert appropriate tuples into the index
797 * relations.
798 *
799 * slot contains the new tuple value to be stored.
800 *
801 * Returns RETURNING result if any, otherwise NULL.
802 * *inserted_tuple is the tuple that's effectively inserted;
803 * *insert_destrel is the relation where it was inserted.
804 * These are only set on success.
805 *
806 * This may change the currently active tuple conversion map in
807 * mtstate->mt_transition_capture, so the callers must take care to
808 * save the previous value to avoid losing track of it.
809 * ----------------------------------------------------------------
810 */
811static TupleTableSlot *
813 ResultRelInfo *resultRelInfo,
814 TupleTableSlot *slot,
815 bool canSetTag,
816 TupleTableSlot **inserted_tuple,
817 ResultRelInfo **insert_destrel)
818{
819 ModifyTableState *mtstate = context->mtstate;
820 EState *estate = context->estate;
821 Relation resultRelationDesc;
822 List *recheckIndexes = NIL;
823 TupleTableSlot *planSlot = context->planSlot;
824 TupleTableSlot *result = NULL;
825 TransitionCaptureState *ar_insert_trig_tcs;
826 ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
827 OnConflictAction onconflict = node->onConflictAction;
829 MemoryContext oldContext;
830
831 /*
832 * If the input result relation is a partitioned table, find the leaf
833 * partition to insert the tuple into.
834 */
835 if (proute)
836 {
837 ResultRelInfo *partRelInfo;
838
839 slot = ExecPrepareTupleRouting(mtstate, estate, proute,
840 resultRelInfo, slot,
841 &partRelInfo);
842 resultRelInfo = partRelInfo;
843 }
844
846
847 resultRelationDesc = resultRelInfo->ri_RelationDesc;
848
849 /*
850 * Open the table's indexes, if we have not done so already, so that we
851 * can add new index entries for the inserted tuple.
852 */
853 if (resultRelationDesc->rd_rel->relhasindex &&
854 resultRelInfo->ri_IndexRelationDescs == NULL)
855 ExecOpenIndices(resultRelInfo, onconflict != ONCONFLICT_NONE);
856
857 /*
858 * BEFORE ROW INSERT Triggers.
859 *
860 * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an
861 * INSERT ... ON CONFLICT statement. We cannot check for constraint
862 * violations before firing these triggers, because they can change the
863 * values to insert. Also, they can run arbitrary user-defined code with
864 * side-effects that we can't cancel by just not inserting the tuple.
865 */
866 if (resultRelInfo->ri_TrigDesc &&
867 resultRelInfo->ri_TrigDesc->trig_insert_before_row)
868 {
869 /* Flush any pending inserts, so rows are visible to the triggers */
871 ExecPendingInserts(estate);
872
873 if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
874 return NULL; /* "do nothing" */
875 }
876
877 /* INSTEAD OF ROW INSERT Triggers */
878 if (resultRelInfo->ri_TrigDesc &&
879 resultRelInfo->ri_TrigDesc->trig_insert_instead_row)
880 {
881 if (!ExecIRInsertTriggers(estate, resultRelInfo, slot))
882 return NULL; /* "do nothing" */
883 }
884 else if (resultRelInfo->ri_FdwRoutine)
885 {
886 /*
887 * GENERATED expressions might reference the tableoid column, so
888 * (re-)initialize tts_tableOid before evaluating them.
889 */
890 slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
891
892 /*
893 * Compute stored generated columns
894 */
895 if (resultRelationDesc->rd_att->constr &&
896 resultRelationDesc->rd_att->constr->has_generated_stored)
897 ExecComputeStoredGenerated(resultRelInfo, estate, slot,
898 CMD_INSERT);
899
900 /*
901 * If the FDW supports batching, and batching is requested, accumulate
902 * rows and insert them in batches. Otherwise use the per-row inserts.
903 */
904 if (resultRelInfo->ri_BatchSize > 1)
905 {
906 bool flushed = false;
907
908 /*
909 * When we've reached the desired batch size, perform the
910 * insertion.
911 */
912 if (resultRelInfo->ri_NumSlots == resultRelInfo->ri_BatchSize)
913 {
914 ExecBatchInsert(mtstate, resultRelInfo,
915 resultRelInfo->ri_Slots,
916 resultRelInfo->ri_PlanSlots,
917 resultRelInfo->ri_NumSlots,
918 estate, canSetTag);
919 flushed = true;
920 }
921
922 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
923
924 if (resultRelInfo->ri_Slots == NULL)
925 {
926 resultRelInfo->ri_Slots = palloc(sizeof(TupleTableSlot *) *
927 resultRelInfo->ri_BatchSize);
928 resultRelInfo->ri_PlanSlots = palloc(sizeof(TupleTableSlot *) *
929 resultRelInfo->ri_BatchSize);
930 }
931
932 /*
933 * Initialize the batch slots. We don't know how many slots will
934 * be needed, so we initialize them as the batch grows, and we
935 * keep them across batches. To mitigate an inefficiency in how
936 * resource owner handles objects with many references (as with
937 * many slots all referencing the same tuple descriptor) we copy
938 * the appropriate tuple descriptor for each slot.
939 */
940 if (resultRelInfo->ri_NumSlots >= resultRelInfo->ri_NumSlotsInitialized)
941 {
943 TupleDesc plan_tdesc =
945
946 resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
947 MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
948
949 resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots] =
950 MakeSingleTupleTableSlot(plan_tdesc, planSlot->tts_ops);
951
952 /* remember how many batch slots we initialized */
953 resultRelInfo->ri_NumSlotsInitialized++;
954 }
955
956 ExecCopySlot(resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots],
957 slot);
958
959 ExecCopySlot(resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots],
960 planSlot);
961
962 /*
963 * If these are the first tuples stored in the buffers, add the
964 * target rel and the mtstate to the
965 * es_insert_pending_result_relations and
966 * es_insert_pending_modifytables lists respectively, except in
967 * the case where flushing was done above, in which case they
968 * would already have been added to the lists, so no need to do
969 * this.
970 */
971 if (resultRelInfo->ri_NumSlots == 0 && !flushed)
972 {
974 resultRelInfo));
977 resultRelInfo);
979 lappend(estate->es_insert_pending_modifytables, mtstate);
980 }
982 resultRelInfo));
983
984 resultRelInfo->ri_NumSlots++;
985
986 MemoryContextSwitchTo(oldContext);
987
988 return NULL;
989 }
990
991 /*
992 * insert into foreign table: let the FDW do it
993 */
994 slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate,
995 resultRelInfo,
996 slot,
997 planSlot);
998
999 if (slot == NULL) /* "do nothing" */
1000 return NULL;
1001
1002 /*
1003 * AFTER ROW Triggers or RETURNING expressions might reference the
1004 * tableoid column, so (re-)initialize tts_tableOid before evaluating
1005 * them. (This covers the case where the FDW replaced the slot.)
1006 */
1007 slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1008 }
1009 else
1010 {
1011 WCOKind wco_kind;
1012
1013 /*
1014 * Constraints and GENERATED expressions might reference the tableoid
1015 * column, so (re-)initialize tts_tableOid before evaluating them.
1016 */
1017 slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1018
1019 /*
1020 * Compute stored generated columns
1021 */
1022 if (resultRelationDesc->rd_att->constr &&
1023 resultRelationDesc->rd_att->constr->has_generated_stored)
1024 ExecComputeStoredGenerated(resultRelInfo, estate, slot,
1025 CMD_INSERT);
1026
1027 /*
1028 * Check any RLS WITH CHECK policies.
1029 *
1030 * Normally we should check INSERT policies. But if the insert is the
1031 * result of a partition key update that moved the tuple to a new
1032 * partition, we should instead check UPDATE policies, because we are
1033 * executing policies defined on the target table, and not those
1034 * defined on the child partitions.
1035 *
1036 * If we're running MERGE, we refer to the action that we're executing
1037 * to know if we're doing an INSERT or UPDATE to a partition table.
1038 */
1039 if (mtstate->operation == CMD_UPDATE)
1040 wco_kind = WCO_RLS_UPDATE_CHECK;
1041 else if (mtstate->operation == CMD_MERGE)
1042 wco_kind = (mtstate->mt_merge_action->mas_action->commandType == CMD_UPDATE) ?
1044 else
1045 wco_kind = WCO_RLS_INSERT_CHECK;
1046
1047 /*
1048 * ExecWithCheckOptions() will skip any WCOs which are not of the kind
1049 * we are looking for at this point.
1050 */
1051 if (resultRelInfo->ri_WithCheckOptions != NIL)
1052 ExecWithCheckOptions(wco_kind, resultRelInfo, slot, estate);
1053
1054 /*
1055 * Check the constraints of the tuple.
1056 */
1057 if (resultRelationDesc->rd_att->constr)
1058 ExecConstraints(resultRelInfo, slot, estate);
1059
1060 /*
1061 * Also check the tuple against the partition constraint, if there is
1062 * one; except that if we got here via tuple-routing, we don't need to
1063 * if there's no BR trigger defined on the partition.
1064 */
1065 if (resultRelationDesc->rd_rel->relispartition &&
1066 (resultRelInfo->ri_RootResultRelInfo == NULL ||
1067 (resultRelInfo->ri_TrigDesc &&
1068 resultRelInfo->ri_TrigDesc->trig_insert_before_row)))
1069 ExecPartitionCheck(resultRelInfo, slot, estate, true);
1070
1071 if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
1072 {
1073 /* Perform a speculative insertion. */
1074 uint32 specToken;
1075 ItemPointerData conflictTid;
1076 ItemPointerData invalidItemPtr;
1077 bool specConflict;
1078 List *arbiterIndexes;
1079
1080 ItemPointerSetInvalid(&invalidItemPtr);
1081 arbiterIndexes = resultRelInfo->ri_onConflictArbiterIndexes;
1082
1083 /*
1084 * Do a non-conclusive check for conflicts first.
1085 *
1086 * We're not holding any locks yet, so this doesn't guarantee that
1087 * the later insert won't conflict. But it avoids leaving behind
1088 * a lot of canceled speculative insertions, if you run a lot of
1089 * INSERT ON CONFLICT statements that do conflict.
1090 *
1091 * We loop back here if we find a conflict below, either during
1092 * the pre-check, or when we re-check after inserting the tuple
1093 * speculatively. Better allow interrupts in case some bug makes
1094 * this an infinite loop.
1095 */
1096 vlock:
1098 specConflict = false;
1099 if (!ExecCheckIndexConstraints(resultRelInfo, slot, estate,
1100 &conflictTid, &invalidItemPtr,
1101 arbiterIndexes))
1102 {
1103 /* committed conflict tuple found */
1104 if (onconflict == ONCONFLICT_UPDATE)
1105 {
1106 /*
1107 * In case of ON CONFLICT DO UPDATE, execute the UPDATE
1108 * part. Be prepared to retry if the UPDATE fails because
1109 * of another concurrent UPDATE/DELETE to the conflict
1110 * tuple.
1111 */
1112 TupleTableSlot *returning = NULL;
1113
1114 if (ExecOnConflictUpdate(context, resultRelInfo,
1115 &conflictTid, slot, canSetTag,
1116 &returning))
1117 {
1118 InstrCountTuples2(&mtstate->ps, 1);
1119 return returning;
1120 }
1121 else
1122 goto vlock;
1123 }
1124 else
1125 {
1126 /*
1127 * In case of ON CONFLICT DO NOTHING, do nothing. However,
1128 * verify that the tuple is visible to the executor's MVCC
1129 * snapshot at higher isolation levels.
1130 *
1131 * Using ExecGetReturningSlot() to store the tuple for the
1132 * recheck isn't that pretty, but we can't trivially use
1133 * the input slot, because it might not be of a compatible
1134 * type. As there's no conflicting usage of
1135 * ExecGetReturningSlot() in the DO NOTHING case...
1136 */
1137 Assert(onconflict == ONCONFLICT_NOTHING);
1138 ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid,
1139 ExecGetReturningSlot(estate, resultRelInfo));
1140 InstrCountTuples2(&mtstate->ps, 1);
1141 return NULL;
1142 }
1143 }
1144
1145 /*
1146 * Before we start insertion proper, acquire our "speculative
1147 * insertion lock". Others can use that to wait for us to decide
1148 * if we're going to go ahead with the insertion, instead of
1149 * waiting for the whole transaction to complete.
1150 */
1152
1153 /* insert the tuple, with the speculative token */
1154 table_tuple_insert_speculative(resultRelationDesc, slot,
1155 estate->es_output_cid,
1156 0,
1157 NULL,
1158 specToken);
1159
1160 /* insert index entries for tuple */
1161 recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1162 slot, estate, false, true,
1163 &specConflict,
1164 arbiterIndexes,
1165 false);
1166
1167 /* adjust the tuple's state accordingly */
1168 table_tuple_complete_speculative(resultRelationDesc, slot,
1169 specToken, !specConflict);
1170
1171 /*
1172 * Wake up anyone waiting for our decision. They will re-check
1173 * the tuple, see that it's no longer speculative, and wait on our
1174 * XID as if this was a regularly inserted tuple all along. Or if
1175 * we killed the tuple, they will see it's dead, and proceed as if
1176 * the tuple never existed.
1177 */
1179
1180 /*
1181 * If there was a conflict, start from the beginning. We'll do
1182 * the pre-check again, which will now find the conflicting tuple
1183 * (unless it aborts before we get there).
1184 */
1185 if (specConflict)
1186 {
1187 list_free(recheckIndexes);
1188 goto vlock;
1189 }
1190
1191 /* Since there was no insertion conflict, we're done */
1192 }
1193 else
1194 {
1195 /* insert the tuple normally */
1196 table_tuple_insert(resultRelationDesc, slot,
1197 estate->es_output_cid,
1198 0, NULL);
1199
1200 /* insert index entries for tuple */
1201 if (resultRelInfo->ri_NumIndices > 0)
1202 recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1203 slot, estate, false,
1204 false, NULL, NIL,
1205 false);
1206 }
1207 }
1208
1209 if (canSetTag)
1210 (estate->es_processed)++;
1211
1212 /*
1213 * If this insert is the result of a partition key update that moved the
1214 * tuple to a new partition, put this row into the transition NEW TABLE,
1215 * if there is one. We need to do this separately for DELETE and INSERT
1216 * because they happen on different tables.
1217 */
1218 ar_insert_trig_tcs = mtstate->mt_transition_capture;
1219 if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
1221 {
1222 ExecARUpdateTriggers(estate, resultRelInfo,
1223 NULL, NULL,
1224 NULL,
1225 NULL,
1226 slot,
1227 NULL,
1228 mtstate->mt_transition_capture,
1229 false);
1230
1231 /*
1232 * We've already captured the NEW TABLE row, so make sure any AR
1233 * INSERT trigger fired below doesn't capture it again.
1234 */
1235 ar_insert_trig_tcs = NULL;
1236 }
1237
1238 /* AFTER ROW INSERT Triggers */
1239 ExecARInsertTriggers(estate, resultRelInfo, slot, recheckIndexes,
1240 ar_insert_trig_tcs);
1241
1242 list_free(recheckIndexes);
1243
1244 /*
1245 * Check any WITH CHECK OPTION constraints from parent views. We are
1246 * required to do this after testing all constraints and uniqueness
1247 * violations per the SQL spec, so we do it after actually inserting the
1248 * record into the heap and all indexes.
1249 *
1250 * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
1251 * tuple will never be seen, if it violates the WITH CHECK OPTION.
1252 *
1253 * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
1254 * are looking for at this point.
1255 */
1256 if (resultRelInfo->ri_WithCheckOptions != NIL)
1257 ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1258
1259 /* Process RETURNING if present */
1260 if (resultRelInfo->ri_projectReturning)
1261 {
1262 TupleTableSlot *oldSlot = NULL;
1263
1264 /*
1265 * If this is part of a cross-partition UPDATE, and the RETURNING list
1266 * refers to any OLD columns, ExecDelete() will have saved the tuple
1267 * deleted from the original partition, which we must use here to
1268 * compute the OLD column values. Otherwise, all OLD column values
1269 * will be NULL.
1270 */
1271 if (context->cpDeletedSlot)
1272 {
1273 TupleConversionMap *tupconv_map;
1274
1275 /*
1276 * Convert the OLD tuple to the new partition's format/slot, if
1277 * needed. Note that ExceDelete() already converted it to the
1278 * root's partition's format/slot.
1279 */
1280 oldSlot = context->cpDeletedSlot;
1281 tupconv_map = ExecGetRootToChildMap(resultRelInfo, estate);
1282 if (tupconv_map != NULL)
1283 {
1284 oldSlot = execute_attr_map_slot(tupconv_map->attrMap,
1285 oldSlot,
1286 ExecGetReturningSlot(estate,
1287 resultRelInfo));
1288
1289 oldSlot->tts_tableOid = context->cpDeletedSlot->tts_tableOid;
1290 ItemPointerCopy(&context->cpDeletedSlot->tts_tid, &oldSlot->tts_tid);
1291 }
1292 }
1293
1294 result = ExecProcessReturning(context, resultRelInfo, CMD_INSERT,
1295 oldSlot, slot, planSlot);
1296
1297 /*
1298 * For a cross-partition UPDATE, release the old tuple, first making
1299 * sure that the result slot has a local copy of any pass-by-reference
1300 * values.
1301 */
1302 if (context->cpDeletedSlot)
1303 {
1304 ExecMaterializeSlot(result);
1305 ExecClearTuple(oldSlot);
1306 if (context->cpDeletedSlot != oldSlot)
1307 ExecClearTuple(context->cpDeletedSlot);
1308 context->cpDeletedSlot = NULL;
1309 }
1310 }
1311
1312 if (inserted_tuple)
1313 *inserted_tuple = slot;
1314 if (insert_destrel)
1315 *insert_destrel = resultRelInfo;
1316
1317 return result;
1318}
1319
1320/* ----------------------------------------------------------------
1321 * ExecBatchInsert
1322 *
1323 * Insert multiple tuples in an efficient way.
1324 * Currently, this handles inserting into a foreign table without
1325 * RETURNING clause.
1326 * ----------------------------------------------------------------
1327 */
1328static void
1330 ResultRelInfo *resultRelInfo,
1331 TupleTableSlot **slots,
1332 TupleTableSlot **planSlots,
1333 int numSlots,
1334 EState *estate,
1335 bool canSetTag)
1336{
1337 int i;
1338 int numInserted = numSlots;
1339 TupleTableSlot *slot = NULL;
1340 TupleTableSlot **rslots;
1341
1342 /*
1343 * insert into foreign table: let the FDW do it
1344 */
1345 rslots = resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert(estate,
1346 resultRelInfo,
1347 slots,
1348 planSlots,
1349 &numInserted);
1350
1351 for (i = 0; i < numInserted; i++)
1352 {
1353 slot = rslots[i];
1354
1355 /*
1356 * AFTER ROW Triggers might reference the tableoid column, so
1357 * (re-)initialize tts_tableOid before evaluating them.
1358 */
1359 slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1360
1361 /* AFTER ROW INSERT Triggers */
1362 ExecARInsertTriggers(estate, resultRelInfo, slot, NIL,
1363 mtstate->mt_transition_capture);
1364
1365 /*
1366 * Check any WITH CHECK OPTION constraints from parent views. See the
1367 * comment in ExecInsert.
1368 */
1369 if (resultRelInfo->ri_WithCheckOptions != NIL)
1370 ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1371 }
1372
1373 if (canSetTag && numInserted > 0)
1374 estate->es_processed += numInserted;
1375
1376 /* Clean up all the slots, ready for the next batch */
1377 for (i = 0; i < numSlots; i++)
1378 {
1379 ExecClearTuple(slots[i]);
1380 ExecClearTuple(planSlots[i]);
1381 }
1382 resultRelInfo->ri_NumSlots = 0;
1383}
1384
1385/*
1386 * ExecPendingInserts -- flushes all pending inserts to the foreign tables
1387 */
1388static void
1390{
1391 ListCell *l1,
1392 *l2;
1393
1396 {
1397 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l1);
1398 ModifyTableState *mtstate = (ModifyTableState *) lfirst(l2);
1399
1400 Assert(mtstate);
1401 ExecBatchInsert(mtstate, resultRelInfo,
1402 resultRelInfo->ri_Slots,
1403 resultRelInfo->ri_PlanSlots,
1404 resultRelInfo->ri_NumSlots,
1405 estate, mtstate->canSetTag);
1406 }
1407
1412}
1413
1414/*
1415 * ExecDeletePrologue -- subroutine for ExecDelete
1416 *
1417 * Prepare executor state for DELETE. Actually, the only thing we have to do
1418 * here is execute BEFORE ROW triggers. We return false if one of them makes
1419 * the delete a no-op; otherwise, return true.
1420 */
1421static bool
1423 ItemPointer tupleid, HeapTuple oldtuple,
1424 TupleTableSlot **epqreturnslot, TM_Result *result)
1425{
1426 if (result)
1427 *result = TM_Ok;
1428
1429 /* BEFORE ROW DELETE triggers */
1430 if (resultRelInfo->ri_TrigDesc &&
1431 resultRelInfo->ri_TrigDesc->trig_delete_before_row)
1432 {
1433 /* Flush any pending inserts, so rows are visible to the triggers */
1435 ExecPendingInserts(context->estate);
1436
1437 return ExecBRDeleteTriggers(context->estate, context->epqstate,
1438 resultRelInfo, tupleid, oldtuple,
1439 epqreturnslot, result, &context->tmfd);
1440 }
1441
1442 return true;
1443}
1444
1445/*
1446 * ExecDeleteAct -- subroutine for ExecDelete
1447 *
1448 * Actually delete the tuple from a plain table.
1449 *
1450 * Caller is in charge of doing EvalPlanQual as necessary
1451 */
1452static TM_Result
1454 ItemPointer tupleid, bool changingPart)
1455{
1456 EState *estate = context->estate;
1457
1458 return table_tuple_delete(resultRelInfo->ri_RelationDesc, tupleid,
1459 estate->es_output_cid,
1460 estate->es_snapshot,
1461 estate->es_crosscheck_snapshot,
1462 true /* wait for commit */ ,
1463 &context->tmfd,
1464 changingPart);
1465}
1466
1467/*
1468 * ExecDeleteEpilogue -- subroutine for ExecDelete
1469 *
1470 * Closing steps of tuple deletion; this invokes AFTER FOR EACH ROW triggers,
1471 * including the UPDATE triggers if the deletion is being done as part of a
1472 * cross-partition tuple move.
1473 */
1474static void
1476 ItemPointer tupleid, HeapTuple oldtuple, bool changingPart)
1477{
1478 ModifyTableState *mtstate = context->mtstate;
1479 EState *estate = context->estate;
1480 TransitionCaptureState *ar_delete_trig_tcs;
1481
1482 /*
1483 * If this delete is the result of a partition key update that moved the
1484 * tuple to a new partition, put this row into the transition OLD TABLE,
1485 * if there is one. We need to do this separately for DELETE and INSERT
1486 * because they happen on different tables.
1487 */
1488 ar_delete_trig_tcs = mtstate->mt_transition_capture;
1489 if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture &&
1491 {
1492 ExecARUpdateTriggers(estate, resultRelInfo,
1493 NULL, NULL,
1494 tupleid, oldtuple,
1495 NULL, NULL, mtstate->mt_transition_capture,
1496 false);
1497
1498 /*
1499 * We've already captured the OLD TABLE row, so make sure any AR
1500 * DELETE trigger fired below doesn't capture it again.
1501 */
1502 ar_delete_trig_tcs = NULL;
1503 }
1504
1505 /* AFTER ROW DELETE Triggers */
1506 ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
1507 ar_delete_trig_tcs, changingPart);
1508}
1509
1510/* ----------------------------------------------------------------
1511 * ExecDelete
1512 *
1513 * DELETE is like UPDATE, except that we delete the tuple and no
1514 * index modifications are needed.
1515 *
1516 * When deleting from a table, tupleid identifies the tuple to delete and
1517 * oldtuple is NULL. When deleting through a view INSTEAD OF trigger,
1518 * oldtuple is passed to the triggers and identifies what to delete, and
1519 * tupleid is invalid. When deleting from a foreign table, tupleid is
1520 * invalid; the FDW has to figure out which row to delete using data from
1521 * the planSlot. oldtuple is passed to foreign table triggers; it is
1522 * NULL when the foreign table has no relevant triggers. We use
1523 * tupleDeleted to indicate whether the tuple is actually deleted,
1524 * callers can use it to decide whether to continue the operation. When
1525 * this DELETE is a part of an UPDATE of partition-key, then the slot
1526 * returned by EvalPlanQual() is passed back using output parameter
1527 * epqreturnslot.
1528 *
1529 * Returns RETURNING result if any, otherwise NULL.
1530 * ----------------------------------------------------------------
1531 */
1532static TupleTableSlot *
1534 ResultRelInfo *resultRelInfo,
1535 ItemPointer tupleid,
1536 HeapTuple oldtuple,
1537 bool processReturning,
1538 bool changingPart,
1539 bool canSetTag,
1540 TM_Result *tmresult,
1541 bool *tupleDeleted,
1542 TupleTableSlot **epqreturnslot)
1543{
1544 EState *estate = context->estate;
1545 Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1546 TupleTableSlot *slot = NULL;
1547 TM_Result result;
1548 bool saveOld;
1549
1550 if (tupleDeleted)
1551 *tupleDeleted = false;
1552
1553 /*
1554 * Prepare for the delete. This includes BEFORE ROW triggers, so we're
1555 * done if it says we are.
1556 */
1557 if (!ExecDeletePrologue(context, resultRelInfo, tupleid, oldtuple,
1558 epqreturnslot, tmresult))
1559 return NULL;
1560
1561 /* INSTEAD OF ROW DELETE Triggers */
1562 if (resultRelInfo->ri_TrigDesc &&
1563 resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
1564 {
1565 bool dodelete;
1566
1567 Assert(oldtuple != NULL);
1568 dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple);
1569
1570 if (!dodelete) /* "do nothing" */
1571 return NULL;
1572 }
1573 else if (resultRelInfo->ri_FdwRoutine)
1574 {
1575 /*
1576 * delete from foreign table: let the FDW do it
1577 *
1578 * We offer the returning slot as a place to store RETURNING data,
1579 * although the FDW can return some other slot if it wants.
1580 */
1581 slot = ExecGetReturningSlot(estate, resultRelInfo);
1582 slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate,
1583 resultRelInfo,
1584 slot,
1585 context->planSlot);
1586
1587 if (slot == NULL) /* "do nothing" */
1588 return NULL;
1589
1590 /*
1591 * RETURNING expressions might reference the tableoid column, so
1592 * (re)initialize tts_tableOid before evaluating them.
1593 */
1594 if (TTS_EMPTY(slot))
1596
1597 slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1598 }
1599 else
1600 {
1601 /*
1602 * delete the tuple
1603 *
1604 * Note: if context->estate->es_crosscheck_snapshot isn't
1605 * InvalidSnapshot, we check that the row to be deleted is visible to
1606 * that snapshot, and throw a can't-serialize error if not. This is a
1607 * special-case behavior needed for referential integrity updates in
1608 * transaction-snapshot mode transactions.
1609 */
1610ldelete:
1611 result = ExecDeleteAct(context, resultRelInfo, tupleid, changingPart);
1612
1613 if (tmresult)
1614 *tmresult = result;
1615
1616 switch (result)
1617 {
1618 case TM_SelfModified:
1619
1620 /*
1621 * The target tuple was already updated or deleted by the
1622 * current command, or by a later command in the current
1623 * transaction. The former case is possible in a join DELETE
1624 * where multiple tuples join to the same target tuple. This
1625 * is somewhat questionable, but Postgres has always allowed
1626 * it: we just ignore additional deletion attempts.
1627 *
1628 * The latter case arises if the tuple is modified by a
1629 * command in a BEFORE trigger, or perhaps by a command in a
1630 * volatile function used in the query. In such situations we
1631 * should not ignore the deletion, but it is equally unsafe to
1632 * proceed. We don't want to discard the original DELETE
1633 * while keeping the triggered actions based on its deletion;
1634 * and it would be no better to allow the original DELETE
1635 * while discarding updates that it triggered. The row update
1636 * carries some information that might be important according
1637 * to business rules; so throwing an error is the only safe
1638 * course.
1639 *
1640 * If a trigger actually intends this type of interaction, it
1641 * can re-execute the DELETE and then return NULL to cancel
1642 * the outer delete.
1643 */
1644 if (context->tmfd.cmax != estate->es_output_cid)
1645 ereport(ERROR,
1646 (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1647 errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1648 errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1649
1650 /* Else, already deleted by self; nothing to do */
1651 return NULL;
1652
1653 case TM_Ok:
1654 break;
1655
1656 case TM_Updated:
1657 {
1658 TupleTableSlot *inputslot;
1659 TupleTableSlot *epqslot;
1660
1662 ereport(ERROR,
1664 errmsg("could not serialize access due to concurrent update")));
1665
1666 /*
1667 * Already know that we're going to need to do EPQ, so
1668 * fetch tuple directly into the right slot.
1669 */
1670 EvalPlanQualBegin(context->epqstate);
1671 inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
1672 resultRelInfo->ri_RangeTableIndex);
1673
1674 result = table_tuple_lock(resultRelationDesc, tupleid,
1675 estate->es_snapshot,
1676 inputslot, estate->es_output_cid,
1679 &context->tmfd);
1680
1681 switch (result)
1682 {
1683 case TM_Ok:
1684 Assert(context->tmfd.traversed);
1685 epqslot = EvalPlanQual(context->epqstate,
1686 resultRelationDesc,
1687 resultRelInfo->ri_RangeTableIndex,
1688 inputslot);
1689 if (TupIsNull(epqslot))
1690 /* Tuple not passing quals anymore, exiting... */
1691 return NULL;
1692
1693 /*
1694 * If requested, skip delete and pass back the
1695 * updated row.
1696 */
1697 if (epqreturnslot)
1698 {
1699 *epqreturnslot = epqslot;
1700 return NULL;
1701 }
1702 else
1703 goto ldelete;
1704
1705 case TM_SelfModified:
1706
1707 /*
1708 * This can be reached when following an update
1709 * chain from a tuple updated by another session,
1710 * reaching a tuple that was already updated in
1711 * this transaction. If previously updated by this
1712 * command, ignore the delete, otherwise error
1713 * out.
1714 *
1715 * See also TM_SelfModified response to
1716 * table_tuple_delete() above.
1717 */
1718 if (context->tmfd.cmax != estate->es_output_cid)
1719 ereport(ERROR,
1720 (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1721 errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1722 errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1723 return NULL;
1724
1725 case TM_Deleted:
1726 /* tuple already deleted; nothing to do */
1727 return NULL;
1728
1729 default:
1730
1731 /*
1732 * TM_Invisible should be impossible because we're
1733 * waiting for updated row versions, and would
1734 * already have errored out if the first version
1735 * is invisible.
1736 *
1737 * TM_Updated should be impossible, because we're
1738 * locking the latest version via
1739 * TUPLE_LOCK_FLAG_FIND_LAST_VERSION.
1740 */
1741 elog(ERROR, "unexpected table_tuple_lock status: %u",
1742 result);
1743 return NULL;
1744 }
1745
1746 Assert(false);
1747 break;
1748 }
1749
1750 case TM_Deleted:
1752 ereport(ERROR,
1754 errmsg("could not serialize access due to concurrent delete")));
1755 /* tuple already deleted; nothing to do */
1756 return NULL;
1757
1758 default:
1759 elog(ERROR, "unrecognized table_tuple_delete status: %u",
1760 result);
1761 return NULL;
1762 }
1763
1764 /*
1765 * Note: Normally one would think that we have to delete index tuples
1766 * associated with the heap tuple now...
1767 *
1768 * ... but in POSTGRES, we have no need to do this because VACUUM will
1769 * take care of it later. We can't delete index tuples immediately
1770 * anyway, since the tuple is still visible to other transactions.
1771 */
1772 }
1773
1774 if (canSetTag)
1775 (estate->es_processed)++;
1776
1777 /* Tell caller that the delete actually happened. */
1778 if (tupleDeleted)
1779 *tupleDeleted = true;
1780
1781 ExecDeleteEpilogue(context, resultRelInfo, tupleid, oldtuple, changingPart);
1782
1783 /*
1784 * Process RETURNING if present and if requested.
1785 *
1786 * If this is part of a cross-partition UPDATE, and the RETURNING list
1787 * refers to any OLD column values, save the old tuple here for later
1788 * processing of the RETURNING list by ExecInsert().
1789 */
1790 saveOld = changingPart && resultRelInfo->ri_projectReturning &&
1792
1793 if (resultRelInfo->ri_projectReturning && (processReturning || saveOld))
1794 {
1795 /*
1796 * We have to put the target tuple into a slot, which means first we
1797 * gotta fetch it. We can use the trigger tuple slot.
1798 */
1799 TupleTableSlot *rslot;
1800
1801 if (resultRelInfo->ri_FdwRoutine)
1802 {
1803 /* FDW must have provided a slot containing the deleted row */
1804 Assert(!TupIsNull(slot));
1805 }
1806 else
1807 {
1808 slot = ExecGetReturningSlot(estate, resultRelInfo);
1809 if (oldtuple != NULL)
1810 {
1811 ExecForceStoreHeapTuple(oldtuple, slot, false);
1812 }
1813 else
1814 {
1815 if (!table_tuple_fetch_row_version(resultRelationDesc, tupleid,
1816 SnapshotAny, slot))
1817 elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1818 }
1819 }
1820
1821 /*
1822 * If required, save the old tuple for later processing of the
1823 * RETURNING list by ExecInsert().
1824 */
1825 if (saveOld)
1826 {
1827 TupleConversionMap *tupconv_map;
1828
1829 /*
1830 * Convert the tuple into the root partition's format/slot, if
1831 * needed. ExecInsert() will then convert it to the new
1832 * partition's format/slot, if necessary.
1833 */
1834 tupconv_map = ExecGetChildToRootMap(resultRelInfo);
1835 if (tupconv_map != NULL)
1836 {
1837 ResultRelInfo *rootRelInfo = context->mtstate->rootResultRelInfo;
1838 TupleTableSlot *oldSlot = slot;
1839
1840 slot = execute_attr_map_slot(tupconv_map->attrMap,
1841 slot,
1842 ExecGetReturningSlot(estate,
1843 rootRelInfo));
1844
1845 slot->tts_tableOid = oldSlot->tts_tableOid;
1846 ItemPointerCopy(&oldSlot->tts_tid, &slot->tts_tid);
1847 }
1848
1849 context->cpDeletedSlot = slot;
1850
1851 return NULL;
1852 }
1853
1854 rslot = ExecProcessReturning(context, resultRelInfo, CMD_DELETE,
1855 slot, NULL, context->planSlot);
1856
1857 /*
1858 * Before releasing the target tuple again, make sure rslot has a
1859 * local copy of any pass-by-reference values.
1860 */
1861 ExecMaterializeSlot(rslot);
1862
1863 ExecClearTuple(slot);
1864
1865 return rslot;
1866 }
1867
1868 return NULL;
1869}
1870
1871/*
1872 * ExecCrossPartitionUpdate --- Move an updated tuple to another partition.
1873 *
1874 * This works by first deleting the old tuple from the current partition,
1875 * followed by inserting the new tuple into the root parent table, that is,
1876 * mtstate->rootResultRelInfo. It will be re-routed from there to the
1877 * correct partition.
1878 *
1879 * Returns true if the tuple has been successfully moved, or if it's found
1880 * that the tuple was concurrently deleted so there's nothing more to do
1881 * for the caller.
1882 *
1883 * False is returned if the tuple we're trying to move is found to have been
1884 * concurrently updated. In that case, the caller must check if the updated
1885 * tuple that's returned in *retry_slot still needs to be re-routed, and call
1886 * this function again or perform a regular update accordingly. For MERGE,
1887 * the updated tuple is not returned in *retry_slot; it has its own retry
1888 * logic.
1889 */
1890static bool
1892 ResultRelInfo *resultRelInfo,
1893 ItemPointer tupleid, HeapTuple oldtuple,
1894 TupleTableSlot *slot,
1895 bool canSetTag,
1896 UpdateContext *updateCxt,
1897 TM_Result *tmresult,
1898 TupleTableSlot **retry_slot,
1899 TupleTableSlot **inserted_tuple,
1900 ResultRelInfo **insert_destrel)
1901{
1902 ModifyTableState *mtstate = context->mtstate;
1903 EState *estate = mtstate->ps.state;
1904 TupleConversionMap *tupconv_map;
1905 bool tuple_deleted;
1906 TupleTableSlot *epqslot = NULL;
1907
1908 context->cpDeletedSlot = NULL;
1909 context->cpUpdateReturningSlot = NULL;
1910 *retry_slot = NULL;
1911
1912 /*
1913 * Disallow an INSERT ON CONFLICT DO UPDATE that causes the original row
1914 * to migrate to a different partition. Maybe this can be implemented
1915 * some day, but it seems a fringe feature with little redeeming value.
1916 */
1917 if (((ModifyTable *) mtstate->ps.plan)->onConflictAction == ONCONFLICT_UPDATE)
1918 ereport(ERROR,
1919 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1920 errmsg("invalid ON UPDATE specification"),
1921 errdetail("The result tuple would appear in a different partition than the original tuple.")));
1922
1923 /*
1924 * When an UPDATE is run directly on a leaf partition, simply fail with a
1925 * partition constraint violation error.
1926 */
1927 if (resultRelInfo == mtstate->rootResultRelInfo)
1928 ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
1929
1930 /* Initialize tuple routing info if not already done. */
1931 if (mtstate->mt_partition_tuple_routing == NULL)
1932 {
1933 Relation rootRel = mtstate->rootResultRelInfo->ri_RelationDesc;
1934 MemoryContext oldcxt;
1935
1936 /* Things built here have to last for the query duration. */
1937 oldcxt = MemoryContextSwitchTo(estate->es_query_cxt);
1938
1940 ExecSetupPartitionTupleRouting(estate, rootRel);
1941
1942 /*
1943 * Before a partition's tuple can be re-routed, it must first be
1944 * converted to the root's format, so we'll need a slot for storing
1945 * such tuples.
1946 */
1947 Assert(mtstate->mt_root_tuple_slot == NULL);
1948 mtstate->mt_root_tuple_slot = table_slot_create(rootRel, NULL);
1949
1950 MemoryContextSwitchTo(oldcxt);
1951 }
1952
1953 /*
1954 * Row movement, part 1. Delete the tuple, but skip RETURNING processing.
1955 * We want to return rows from INSERT.
1956 */
1957 ExecDelete(context, resultRelInfo,
1958 tupleid, oldtuple,
1959 false, /* processReturning */
1960 true, /* changingPart */
1961 false, /* canSetTag */
1962 tmresult, &tuple_deleted, &epqslot);
1963
1964 /*
1965 * For some reason if DELETE didn't happen (e.g. trigger prevented it, or
1966 * it was already deleted by self, or it was concurrently deleted by
1967 * another transaction), then we should skip the insert as well;
1968 * otherwise, an UPDATE could cause an increase in the total number of
1969 * rows across all partitions, which is clearly wrong.
1970 *
1971 * For a normal UPDATE, the case where the tuple has been the subject of a
1972 * concurrent UPDATE or DELETE would be handled by the EvalPlanQual
1973 * machinery, but for an UPDATE that we've translated into a DELETE from
1974 * this partition and an INSERT into some other partition, that's not
1975 * available, because CTID chains can't span relation boundaries. We
1976 * mimic the semantics to a limited extent by skipping the INSERT if the
1977 * DELETE fails to find a tuple. This ensures that two concurrent
1978 * attempts to UPDATE the same tuple at the same time can't turn one tuple
1979 * into two, and that an UPDATE of a just-deleted tuple can't resurrect
1980 * it.
1981 */
1982 if (!tuple_deleted)
1983 {
1984 /*
1985 * epqslot will be typically NULL. But when ExecDelete() finds that
1986 * another transaction has concurrently updated the same row, it
1987 * re-fetches the row, skips the delete, and epqslot is set to the
1988 * re-fetched tuple slot. In that case, we need to do all the checks
1989 * again. For MERGE, we leave everything to the caller (it must do
1990 * additional rechecking, and might end up executing a different
1991 * action entirely).
1992 */
1993 if (mtstate->operation == CMD_MERGE)
1994 return *tmresult == TM_Ok;
1995 else if (TupIsNull(epqslot))
1996 return true;
1997 else
1998 {
1999 /* Fetch the most recent version of old tuple. */
2000 TupleTableSlot *oldSlot;
2001
2002 /* ... but first, make sure ri_oldTupleSlot is initialized. */
2003 if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
2004 ExecInitUpdateProjection(mtstate, resultRelInfo);
2005 oldSlot = resultRelInfo->ri_oldTupleSlot;
2007 tupleid,
2009 oldSlot))
2010 elog(ERROR, "failed to fetch tuple being updated");
2011 /* and project the new tuple to retry the UPDATE with */
2012 *retry_slot = ExecGetUpdateNewTuple(resultRelInfo, epqslot,
2013 oldSlot);
2014 return false;
2015 }
2016 }
2017
2018 /*
2019 * resultRelInfo is one of the per-relation resultRelInfos. So we should
2020 * convert the tuple into root's tuple descriptor if needed, since
2021 * ExecInsert() starts the search from root.
2022 */
2023 tupconv_map = ExecGetChildToRootMap(resultRelInfo);
2024 if (tupconv_map != NULL)
2025 slot = execute_attr_map_slot(tupconv_map->attrMap,
2026 slot,
2027 mtstate->mt_root_tuple_slot);
2028
2029 /* Tuple routing starts from the root table. */
2030 context->cpUpdateReturningSlot =
2031 ExecInsert(context, mtstate->rootResultRelInfo, slot, canSetTag,
2032 inserted_tuple, insert_destrel);
2033
2034 /*
2035 * Reset the transition state that may possibly have been written by
2036 * INSERT.
2037 */
2038 if (mtstate->mt_transition_capture)
2040
2041 /* We're done moving. */
2042 return true;
2043}
2044
2045/*
2046 * ExecUpdatePrologue -- subroutine for ExecUpdate
2047 *
2048 * Prepare executor state for UPDATE. This includes running BEFORE ROW
2049 * triggers. We return false if one of them makes the update a no-op;
2050 * otherwise, return true.
2051 */
2052static bool
2054 ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2055 TM_Result *result)
2056{
2057 Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2058
2059 if (result)
2060 *result = TM_Ok;
2061
2062 ExecMaterializeSlot(slot);
2063
2064 /*
2065 * Open the table's indexes, if we have not done so already, so that we
2066 * can add new index entries for the updated tuple.
2067 */
2068 if (resultRelationDesc->rd_rel->relhasindex &&
2069 resultRelInfo->ri_IndexRelationDescs == NULL)
2070 ExecOpenIndices(resultRelInfo, false);
2071
2072 /* BEFORE ROW UPDATE triggers */
2073 if (resultRelInfo->ri_TrigDesc &&
2074 resultRelInfo->ri_TrigDesc->trig_update_before_row)
2075 {
2076 /* Flush any pending inserts, so rows are visible to the triggers */
2078 ExecPendingInserts(context->estate);
2079
2080 return ExecBRUpdateTriggers(context->estate, context->epqstate,
2081 resultRelInfo, tupleid, oldtuple, slot,
2082 result, &context->tmfd);
2083 }
2084
2085 return true;
2086}
2087
2088/*
2089 * ExecUpdatePrepareSlot -- subroutine for ExecUpdateAct
2090 *
2091 * Apply the final modifications to the tuple slot before the update.
2092 * (This is split out because we also need it in the foreign-table code path.)
2093 */
2094static void
2096 TupleTableSlot *slot,
2097 EState *estate)
2098{
2099 Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2100
2101 /*
2102 * Constraints and GENERATED expressions might reference the tableoid
2103 * column, so (re-)initialize tts_tableOid before evaluating them.
2104 */
2105 slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2106
2107 /*
2108 * Compute stored generated columns
2109 */
2110 if (resultRelationDesc->rd_att->constr &&
2111 resultRelationDesc->rd_att->constr->has_generated_stored)
2112 ExecComputeStoredGenerated(resultRelInfo, estate, slot,
2113 CMD_UPDATE);
2114}
2115
2116/*
2117 * ExecUpdateAct -- subroutine for ExecUpdate
2118 *
2119 * Actually update the tuple, when operating on a plain table. If the
2120 * table is a partition, and the command was called referencing an ancestor
2121 * partitioned table, this routine migrates the resulting tuple to another
2122 * partition.
2123 *
2124 * The caller is in charge of keeping indexes current as necessary. The
2125 * caller is also in charge of doing EvalPlanQual if the tuple is found to
2126 * be concurrently updated. However, in case of a cross-partition update,
2127 * this routine does it.
2128 */
2129static TM_Result
2131 ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2132 bool canSetTag, UpdateContext *updateCxt)
2133{
2134 EState *estate = context->estate;
2135 Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2136 bool partition_constraint_failed;
2137 TM_Result result;
2138
2139 updateCxt->crossPartUpdate = false;
2140
2141 /*
2142 * If we move the tuple to a new partition, we loop back here to recompute
2143 * GENERATED values (which are allowed to be different across partitions)
2144 * and recheck any RLS policies and constraints. We do not fire any
2145 * BEFORE triggers of the new partition, however.
2146 */
2147lreplace:
2148 /* Fill in GENERATEd columns */
2149 ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2150
2151 /* ensure slot is independent, consider e.g. EPQ */
2152 ExecMaterializeSlot(slot);
2153
2154 /*
2155 * If partition constraint fails, this row might get moved to another
2156 * partition, in which case we should check the RLS CHECK policy just
2157 * before inserting into the new partition, rather than doing it here.
2158 * This is because a trigger on that partition might again change the row.
2159 * So skip the WCO checks if the partition constraint fails.
2160 */
2161 partition_constraint_failed =
2162 resultRelationDesc->rd_rel->relispartition &&
2163 !ExecPartitionCheck(resultRelInfo, slot, estate, false);
2164
2165 /* Check any RLS UPDATE WITH CHECK policies */
2166 if (!partition_constraint_failed &&
2167 resultRelInfo->ri_WithCheckOptions != NIL)
2168 {
2169 /*
2170 * ExecWithCheckOptions() will skip any WCOs which are not of the kind
2171 * we are looking for at this point.
2172 */
2174 resultRelInfo, slot, estate);
2175 }
2176
2177 /*
2178 * If a partition check failed, try to move the row into the right
2179 * partition.
2180 */
2181 if (partition_constraint_failed)
2182 {
2183 TupleTableSlot *inserted_tuple,
2184 *retry_slot;
2185 ResultRelInfo *insert_destrel = NULL;
2186
2187 /*
2188 * ExecCrossPartitionUpdate will first DELETE the row from the
2189 * partition it's currently in and then insert it back into the root
2190 * table, which will re-route it to the correct partition. However,
2191 * if the tuple has been concurrently updated, a retry is needed.
2192 */
2193 if (ExecCrossPartitionUpdate(context, resultRelInfo,
2194 tupleid, oldtuple, slot,
2195 canSetTag, updateCxt,
2196 &result,
2197 &retry_slot,
2198 &inserted_tuple,
2199 &insert_destrel))
2200 {
2201 /* success! */
2202 updateCxt->crossPartUpdate = true;
2203
2204 /*
2205 * If the partitioned table being updated is referenced in foreign
2206 * keys, queue up trigger events to check that none of them were
2207 * violated. No special treatment is needed in
2208 * non-cross-partition update situations, because the leaf
2209 * partition's AR update triggers will take care of that. During
2210 * cross-partition updates implemented as delete on the source
2211 * partition followed by insert on the destination partition,
2212 * AR-UPDATE triggers of the root table (that is, the table
2213 * mentioned in the query) must be fired.
2214 *
2215 * NULL insert_destrel means that the move failed to occur, that
2216 * is, the update failed, so no need to anything in that case.
2217 */
2218 if (insert_destrel &&
2219 resultRelInfo->ri_TrigDesc &&
2220 resultRelInfo->ri_TrigDesc->trig_update_after_row)
2222 resultRelInfo,
2223 insert_destrel,
2224 tupleid, slot,
2225 inserted_tuple);
2226
2227 return TM_Ok;
2228 }
2229
2230 /*
2231 * No luck, a retry is needed. If running MERGE, we do not do so
2232 * here; instead let it handle that on its own rules.
2233 */
2234 if (context->mtstate->operation == CMD_MERGE)
2235 return result;
2236
2237 /*
2238 * ExecCrossPartitionUpdate installed an updated version of the new
2239 * tuple in the retry slot; start over.
2240 */
2241 slot = retry_slot;
2242 goto lreplace;
2243 }
2244
2245 /*
2246 * Check the constraints of the tuple. We've already checked the
2247 * partition constraint above; however, we must still ensure the tuple
2248 * passes all other constraints, so we will call ExecConstraints() and
2249 * have it validate all remaining checks.
2250 */
2251 if (resultRelationDesc->rd_att->constr)
2252 ExecConstraints(resultRelInfo, slot, estate);
2253
2254 /*
2255 * replace the heap tuple
2256 *
2257 * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
2258 * the row to be updated is visible to that snapshot, and throw a
2259 * can't-serialize error if not. This is a special-case behavior needed
2260 * for referential integrity updates in transaction-snapshot mode
2261 * transactions.
2262 */
2263 result = table_tuple_update(resultRelationDesc, tupleid, slot,
2264 estate->es_output_cid,
2265 estate->es_snapshot,
2266 estate->es_crosscheck_snapshot,
2267 true /* wait for commit */ ,
2268 &context->tmfd, &updateCxt->lockmode,
2269 &updateCxt->updateIndexes);
2270
2271 return result;
2272}
2273
2274/*
2275 * ExecUpdateEpilogue -- subroutine for ExecUpdate
2276 *
2277 * Closing steps of updating a tuple. Must be called if ExecUpdateAct
2278 * returns indicating that the tuple was updated.
2279 */
2280static void
2282 ResultRelInfo *resultRelInfo, ItemPointer tupleid,
2283 HeapTuple oldtuple, TupleTableSlot *slot)
2284{
2285 ModifyTableState *mtstate = context->mtstate;
2286 List *recheckIndexes = NIL;
2287
2288 /* insert index entries for tuple if necessary */
2289 if (resultRelInfo->ri_NumIndices > 0 && (updateCxt->updateIndexes != TU_None))
2290 recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
2291 slot, context->estate,
2292 true, false,
2293 NULL, NIL,
2294 (updateCxt->updateIndexes == TU_Summarizing));
2295
2296 /* AFTER ROW UPDATE Triggers */
2297 ExecARUpdateTriggers(context->estate, resultRelInfo,
2298 NULL, NULL,
2299 tupleid, oldtuple, slot,
2300 recheckIndexes,
2301 mtstate->operation == CMD_INSERT ?
2302 mtstate->mt_oc_transition_capture :
2303 mtstate->mt_transition_capture,
2304 false);
2305
2306 list_free(recheckIndexes);
2307
2308 /*
2309 * Check any WITH CHECK OPTION constraints from parent views. We are
2310 * required to do this after testing all constraints and uniqueness
2311 * violations per the SQL spec, so we do it after actually updating the
2312 * record in the heap and all indexes.
2313 *
2314 * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
2315 * are looking for at this point.
2316 */
2317 if (resultRelInfo->ri_WithCheckOptions != NIL)
2318 ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo,
2319 slot, context->estate);
2320}
2321
2322/*
2323 * Queues up an update event using the target root partitioned table's
2324 * trigger to check that a cross-partition update hasn't broken any foreign
2325 * keys pointing into it.
2326 */
2327static void
2329 ResultRelInfo *sourcePartInfo,
2330 ResultRelInfo *destPartInfo,
2331 ItemPointer tupleid,
2332 TupleTableSlot *oldslot,
2333 TupleTableSlot *newslot)
2334{
2335 ListCell *lc;
2336 ResultRelInfo *rootRelInfo;
2337 List *ancestorRels;
2338
2339 rootRelInfo = sourcePartInfo->ri_RootResultRelInfo;
2340 ancestorRels = ExecGetAncestorResultRels(context->estate, sourcePartInfo);
2341
2342 /*
2343 * For any foreign keys that point directly into a non-root ancestors of
2344 * the source partition, we can in theory fire an update event to enforce
2345 * those constraints using their triggers, if we could tell that both the
2346 * source and the destination partitions are under the same ancestor. But
2347 * for now, we simply report an error that those cannot be enforced.
2348 */
2349 foreach(lc, ancestorRels)
2350 {
2351 ResultRelInfo *rInfo = lfirst(lc);
2352 TriggerDesc *trigdesc = rInfo->ri_TrigDesc;
2353 bool has_noncloned_fkey = false;
2354
2355 /* Root ancestor's triggers will be processed. */
2356 if (rInfo == rootRelInfo)
2357 continue;
2358
2359 if (trigdesc && trigdesc->trig_update_after_row)
2360 {
2361 for (int i = 0; i < trigdesc->numtriggers; i++)
2362 {
2363 Trigger *trig = &trigdesc->triggers[i];
2364
2365 if (!trig->tgisclone &&
2367 {
2368 has_noncloned_fkey = true;
2369 break;
2370 }
2371 }
2372 }
2373
2374 if (has_noncloned_fkey)
2375 ereport(ERROR,
2376 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2377 errmsg("cannot move tuple across partitions when a non-root ancestor of the source partition is directly referenced in a foreign key"),
2378 errdetail("A foreign key points to ancestor \"%s\" but not the root ancestor \"%s\".",
2381 errhint("Consider defining the foreign key on table \"%s\".",
2383 }
2384
2385 /* Perform the root table's triggers. */
2387 rootRelInfo, sourcePartInfo, destPartInfo,
2388 tupleid, NULL, newslot, NIL, NULL, true);
2389}
2390
2391/* ----------------------------------------------------------------
2392 * ExecUpdate
2393 *
2394 * note: we can't run UPDATE queries with transactions
2395 * off because UPDATEs are actually INSERTs and our
2396 * scan will mistakenly loop forever, updating the tuple
2397 * it just inserted.. This should be fixed but until it
2398 * is, we don't want to get stuck in an infinite loop
2399 * which corrupts your database..
2400 *
2401 * When updating a table, tupleid identifies the tuple to update and
2402 * oldtuple is NULL. When updating through a view INSTEAD OF trigger,
2403 * oldtuple is passed to the triggers and identifies what to update, and
2404 * tupleid is invalid. When updating a foreign table, tupleid is
2405 * invalid; the FDW has to figure out which row to update using data from
2406 * the planSlot. oldtuple is passed to foreign table triggers; it is
2407 * NULL when the foreign table has no relevant triggers.
2408 *
2409 * oldSlot contains the old tuple value.
2410 * slot contains the new tuple value to be stored.
2411 * planSlot is the output of the ModifyTable's subplan; we use it
2412 * to access values from other input tables (for RETURNING),
2413 * row-ID junk columns, etc.
2414 *
2415 * Returns RETURNING result if any, otherwise NULL. On exit, if tupleid
2416 * had identified the tuple to update, it will identify the tuple
2417 * actually updated after EvalPlanQual.
2418 * ----------------------------------------------------------------
2419 */
2420static TupleTableSlot *
2422 ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *oldSlot,
2423 TupleTableSlot *slot, bool canSetTag)
2424{
2425 EState *estate = context->estate;
2426 Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2427 UpdateContext updateCxt = {0};
2428 TM_Result result;
2429
2430 /*
2431 * abort the operation if not running transactions
2432 */
2434 elog(ERROR, "cannot UPDATE during bootstrap");
2435
2436 /*
2437 * Prepare for the update. This includes BEFORE ROW triggers, so we're
2438 * done if it says we are.
2439 */
2440 if (!ExecUpdatePrologue(context, resultRelInfo, tupleid, oldtuple, slot, NULL))
2441 return NULL;
2442
2443 /* INSTEAD OF ROW UPDATE Triggers */
2444 if (resultRelInfo->ri_TrigDesc &&
2445 resultRelInfo->ri_TrigDesc->trig_update_instead_row)
2446 {
2447 if (!ExecIRUpdateTriggers(estate, resultRelInfo,
2448 oldtuple, slot))
2449 return NULL; /* "do nothing" */
2450 }
2451 else if (resultRelInfo->ri_FdwRoutine)
2452 {
2453 /* Fill in GENERATEd columns */
2454 ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2455
2456 /*
2457 * update in foreign table: let the FDW do it
2458 */
2459 slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate,
2460 resultRelInfo,
2461 slot,
2462 context->planSlot);
2463
2464 if (slot == NULL) /* "do nothing" */
2465 return NULL;
2466
2467 /*
2468 * AFTER ROW Triggers or RETURNING expressions might reference the
2469 * tableoid column, so (re-)initialize tts_tableOid before evaluating
2470 * them. (This covers the case where the FDW replaced the slot.)
2471 */
2472 slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2473 }
2474 else
2475 {
2476 ItemPointerData lockedtid;
2477
2478 /*
2479 * If we generate a new candidate tuple after EvalPlanQual testing, we
2480 * must loop back here to try again. (We don't need to redo triggers,
2481 * however. If there are any BEFORE triggers then trigger.c will have
2482 * done table_tuple_lock to lock the correct tuple, so there's no need
2483 * to do them again.)
2484 */
2485redo_act:
2486 lockedtid = *tupleid;
2487 result = ExecUpdateAct(context, resultRelInfo, tupleid, oldtuple, slot,
2488 canSetTag, &updateCxt);
2489
2490 /*
2491 * If ExecUpdateAct reports that a cross-partition update was done,
2492 * then the RETURNING tuple (if any) has been projected and there's
2493 * nothing else for us to do.
2494 */
2495 if (updateCxt.crossPartUpdate)
2496 return context->cpUpdateReturningSlot;
2497
2498 switch (result)
2499 {
2500 case TM_SelfModified:
2501
2502 /*
2503 * The target tuple was already updated or deleted by the
2504 * current command, or by a later command in the current
2505 * transaction. The former case is possible in a join UPDATE
2506 * where multiple tuples join to the same target tuple. This
2507 * is pretty questionable, but Postgres has always allowed it:
2508 * we just execute the first update action and ignore
2509 * additional update attempts.
2510 *
2511 * The latter case arises if the tuple is modified by a
2512 * command in a BEFORE trigger, or perhaps by a command in a
2513 * volatile function used in the query. In such situations we
2514 * should not ignore the update, but it is equally unsafe to
2515 * proceed. We don't want to discard the original UPDATE
2516 * while keeping the triggered actions based on it; and we
2517 * have no principled way to merge this update with the
2518 * previous ones. So throwing an error is the only safe
2519 * course.
2520 *
2521 * If a trigger actually intends this type of interaction, it
2522 * can re-execute the UPDATE (assuming it can figure out how)
2523 * and then return NULL to cancel the outer update.
2524 */
2525 if (context->tmfd.cmax != estate->es_output_cid)
2526 ereport(ERROR,
2527 (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2528 errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2529 errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2530
2531 /* Else, already updated by self; nothing to do */
2532 return NULL;
2533
2534 case TM_Ok:
2535 break;
2536
2537 case TM_Updated:
2538 {
2539 TupleTableSlot *inputslot;
2540 TupleTableSlot *epqslot;
2541
2543 ereport(ERROR,
2545 errmsg("could not serialize access due to concurrent update")));
2546
2547 /*
2548 * Already know that we're going to need to do EPQ, so
2549 * fetch tuple directly into the right slot.
2550 */
2551 inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
2552 resultRelInfo->ri_RangeTableIndex);
2553
2554 result = table_tuple_lock(resultRelationDesc, tupleid,
2555 estate->es_snapshot,
2556 inputslot, estate->es_output_cid,
2557 updateCxt.lockmode, LockWaitBlock,
2559 &context->tmfd);
2560
2561 switch (result)
2562 {
2563 case TM_Ok:
2564 Assert(context->tmfd.traversed);
2565
2566 epqslot = EvalPlanQual(context->epqstate,
2567 resultRelationDesc,
2568 resultRelInfo->ri_RangeTableIndex,
2569 inputslot);
2570 if (TupIsNull(epqslot))
2571 /* Tuple not passing quals anymore, exiting... */
2572 return NULL;
2573
2574 /* Make sure ri_oldTupleSlot is initialized. */
2575 if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
2577 resultRelInfo);
2578
2579 if (resultRelInfo->ri_needLockTagTuple)
2580 {
2581 UnlockTuple(resultRelationDesc,
2582 &lockedtid, InplaceUpdateTupleLock);
2583 LockTuple(resultRelationDesc,
2584 tupleid, InplaceUpdateTupleLock);
2585 }
2586
2587 /* Fetch the most recent version of old tuple. */
2588 oldSlot = resultRelInfo->ri_oldTupleSlot;
2589 if (!table_tuple_fetch_row_version(resultRelationDesc,
2590 tupleid,
2592 oldSlot))
2593 elog(ERROR, "failed to fetch tuple being updated");
2594 slot = ExecGetUpdateNewTuple(resultRelInfo,
2595 epqslot, oldSlot);
2596 goto redo_act;
2597
2598 case TM_Deleted:
2599 /* tuple already deleted; nothing to do */
2600 return NULL;
2601
2602 case TM_SelfModified:
2603
2604 /*
2605 * This can be reached when following an update
2606 * chain from a tuple updated by another session,
2607 * reaching a tuple that was already updated in
2608 * this transaction. If previously modified by
2609 * this command, ignore the redundant update,
2610 * otherwise error out.
2611 *
2612 * See also TM_SelfModified response to
2613 * table_tuple_update() above.
2614 */
2615 if (context->tmfd.cmax != estate->es_output_cid)
2616 ereport(ERROR,
2617 (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2618 errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2619 errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2620 return NULL;
2621
2622 default:
2623 /* see table_tuple_lock call in ExecDelete() */
2624 elog(ERROR, "unexpected table_tuple_lock status: %u",
2625 result);
2626 return NULL;
2627 }
2628 }
2629
2630 break;
2631
2632 case TM_Deleted:
2634 ereport(ERROR,
2636 errmsg("could not serialize access due to concurrent delete")));
2637 /* tuple already deleted; nothing to do */
2638 return NULL;
2639
2640 default:
2641 elog(ERROR, "unrecognized table_tuple_update status: %u",
2642 result);
2643 return NULL;
2644 }
2645 }
2646
2647 if (canSetTag)
2648 (estate->es_processed)++;
2649
2650 ExecUpdateEpilogue(context, &updateCxt, resultRelInfo, tupleid, oldtuple,
2651 slot);
2652
2653 /* Process RETURNING if present */
2654 if (resultRelInfo->ri_projectReturning)
2655 return ExecProcessReturning(context, resultRelInfo, CMD_UPDATE,
2656 oldSlot, slot, context->planSlot);
2657
2658 return NULL;
2659}
2660
2661/*
2662 * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE
2663 *
2664 * Try to lock tuple for update as part of speculative insertion. If
2665 * a qual originating from ON CONFLICT DO UPDATE is satisfied, update
2666 * (but still lock row, even though it may not satisfy estate's
2667 * snapshot).
2668 *
2669 * Returns true if we're done (with or without an update), or false if
2670 * the caller must retry the INSERT from scratch.
2671 */
2672static bool
2674 ResultRelInfo *resultRelInfo,
2675 ItemPointer conflictTid,
2676 TupleTableSlot *excludedSlot,
2677 bool canSetTag,
2678 TupleTableSlot **returning)
2679{
2680 ModifyTableState *mtstate = context->mtstate;
2681 ExprContext *econtext = mtstate->ps.ps_ExprContext;
2682 Relation relation = resultRelInfo->ri_RelationDesc;
2683 ExprState *onConflictSetWhere = resultRelInfo->ri_onConflict->oc_WhereClause;
2684 TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing;
2685 TM_FailureData tmfd;
2686 LockTupleMode lockmode;
2688 Datum xminDatum;
2689 TransactionId xmin;
2690 bool isnull;
2691
2692 /*
2693 * Parse analysis should have blocked ON CONFLICT for all system
2694 * relations, which includes these. There's no fundamental obstacle to
2695 * supporting this; we'd just need to handle LOCKTAG_TUPLE like the other
2696 * ExecUpdate() caller.
2697 */
2698 Assert(!resultRelInfo->ri_needLockTagTuple);
2699
2700 /* Determine lock mode to use */
2701 lockmode = ExecUpdateLockMode(context->estate, resultRelInfo);
2702
2703 /*
2704 * Lock tuple for update. Don't follow updates when tuple cannot be
2705 * locked without doing so. A row locking conflict here means our
2706 * previous conclusion that the tuple is conclusively committed is not
2707 * true anymore.
2708 */
2709 test = table_tuple_lock(relation, conflictTid,
2710 context->estate->es_snapshot,
2711 existing, context->estate->es_output_cid,
2712 lockmode, LockWaitBlock, 0,
2713 &tmfd);
2714 switch (test)
2715 {
2716 case TM_Ok:
2717 /* success! */
2718 break;
2719
2720 case TM_Invisible:
2721
2722 /*
2723 * This can occur when a just inserted tuple is updated again in
2724 * the same command. E.g. because multiple rows with the same
2725 * conflicting key values are inserted.
2726 *
2727 * This is somewhat similar to the ExecUpdate() TM_SelfModified
2728 * case. We do not want to proceed because it would lead to the
2729 * same row being updated a second time in some unspecified order,
2730 * and in contrast to plain UPDATEs there's no historical behavior
2731 * to break.
2732 *
2733 * It is the user's responsibility to prevent this situation from
2734 * occurring. These problems are why the SQL standard similarly
2735 * specifies that for SQL MERGE, an exception must be raised in
2736 * the event of an attempt to update the same row twice.
2737 */
2738 xminDatum = slot_getsysattr(existing,
2740 &isnull);
2741 Assert(!isnull);
2742 xmin = DatumGetTransactionId(xminDatum);
2743
2745 ereport(ERROR,
2746 (errcode(ERRCODE_CARDINALITY_VIOLATION),
2747 /* translator: %s is a SQL command name */
2748 errmsg("%s command cannot affect row a second time",
2749 "ON CONFLICT DO UPDATE"),
2750 errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
2751
2752 /* This shouldn't happen */
2753 elog(ERROR, "attempted to lock invisible tuple");
2754 break;
2755
2756 case TM_SelfModified:
2757
2758 /*
2759 * This state should never be reached. As a dirty snapshot is used
2760 * to find conflicting tuples, speculative insertion wouldn't have
2761 * seen this row to conflict with.
2762 */
2763 elog(ERROR, "unexpected self-updated tuple");
2764 break;
2765
2766 case TM_Updated:
2768 ereport(ERROR,
2770 errmsg("could not serialize access due to concurrent update")));
2771
2772 /*
2773 * As long as we don't support an UPDATE of INSERT ON CONFLICT for
2774 * a partitioned table we shouldn't reach to a case where tuple to
2775 * be lock is moved to another partition due to concurrent update
2776 * of the partition key.
2777 */
2779
2780 /*
2781 * Tell caller to try again from the very start.
2782 *
2783 * It does not make sense to use the usual EvalPlanQual() style
2784 * loop here, as the new version of the row might not conflict
2785 * anymore, or the conflicting tuple has actually been deleted.
2786 */
2787 ExecClearTuple(existing);
2788 return false;
2789
2790 case TM_Deleted:
2792 ereport(ERROR,
2794 errmsg("could not serialize access due to concurrent delete")));
2795
2796 /* see TM_Updated case */
2798 ExecClearTuple(existing);
2799 return false;
2800
2801 default:
2802 elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
2803 }
2804
2805 /* Success, the tuple is locked. */
2806
2807 /*
2808 * Verify that the tuple is visible to our MVCC snapshot if the current
2809 * isolation level mandates that.
2810 *
2811 * It's not sufficient to rely on the check within ExecUpdate() as e.g.
2812 * CONFLICT ... WHERE clause may prevent us from reaching that.
2813 *
2814 * This means we only ever continue when a new command in the current
2815 * transaction could see the row, even though in READ COMMITTED mode the
2816 * tuple will not be visible according to the current statement's
2817 * snapshot. This is in line with the way UPDATE deals with newer tuple
2818 * versions.
2819 */
2820 ExecCheckTupleVisible(context->estate, relation, existing);
2821
2822 /*
2823 * Make tuple and any needed join variables available to ExecQual and
2824 * ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while
2825 * the target's existing tuple is installed in the scantuple. EXCLUDED
2826 * has been made to reference INNER_VAR in setrefs.c, but there is no
2827 * other redirection.
2828 */
2829 econtext->ecxt_scantuple = existing;
2830 econtext->ecxt_innertuple = excludedSlot;
2831 econtext->ecxt_outertuple = NULL;
2832
2833 if (!ExecQual(onConflictSetWhere, econtext))
2834 {
2835 ExecClearTuple(existing); /* see return below */
2836 InstrCountFiltered1(&mtstate->ps, 1);
2837 return true; /* done with the tuple */
2838 }
2839
2840 if (resultRelInfo->ri_WithCheckOptions != NIL)
2841 {
2842 /*
2843 * Check target's existing tuple against UPDATE-applicable USING
2844 * security barrier quals (if any), enforced here as RLS checks/WCOs.
2845 *
2846 * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security
2847 * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK,
2848 * but that's almost the extent of its special handling for ON
2849 * CONFLICT DO UPDATE.
2850 *
2851 * The rewriter will also have associated UPDATE applicable straight
2852 * RLS checks/WCOs for the benefit of the ExecUpdate() call that
2853 * follows. INSERTs and UPDATEs naturally have mutually exclusive WCO
2854 * kinds, so there is no danger of spurious over-enforcement in the
2855 * INSERT or UPDATE path.
2856 */
2858 existing,
2859 mtstate->ps.state);
2860 }
2861
2862 /* Project the new tuple version */
2863 ExecProject(resultRelInfo->ri_onConflict->oc_ProjInfo);
2864
2865 /*
2866 * Note that it is possible that the target tuple has been modified in
2867 * this session, after the above table_tuple_lock. We choose to not error
2868 * out in that case, in line with ExecUpdate's treatment of similar cases.
2869 * This can happen if an UPDATE is triggered from within ExecQual(),
2870 * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
2871 * wCTE in the ON CONFLICT's SET.
2872 */
2873
2874 /* Execute UPDATE with projection */
2875 *returning = ExecUpdate(context, resultRelInfo,
2876 conflictTid, NULL, existing,
2877 resultRelInfo->ri_onConflict->oc_ProjSlot,
2878 canSetTag);
2879
2880 /*
2881 * Clear out existing tuple, as there might not be another conflict among
2882 * the next input rows. Don't want to hold resources till the end of the
2883 * query. First though, make sure that the returning slot, if any, has a
2884 * local copy of any OLD pass-by-reference values, if it refers to any OLD
2885 * columns.
2886 */
2887 if (*returning != NULL &&
2889 ExecMaterializeSlot(*returning);
2890
2891 ExecClearTuple(existing);
2892
2893 return true;
2894}
2895
2896/*
2897 * Perform MERGE.
2898 */
2899static TupleTableSlot *
2901 ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag)
2902{
2903 TupleTableSlot *rslot = NULL;
2904 bool matched;
2905
2906 /*-----
2907 * If we are dealing with a WHEN MATCHED case, tupleid or oldtuple is
2908 * valid, depending on whether the result relation is a table or a view.
2909 * We execute the first action for which the additional WHEN MATCHED AND
2910 * quals pass. If an action without quals is found, that action is
2911 * executed.
2912 *
2913 * Similarly, in the WHEN NOT MATCHED BY SOURCE case, tupleid or oldtuple
2914 * is valid, and we look at the given WHEN NOT MATCHED BY SOURCE actions
2915 * in sequence until one passes. This is almost identical to the WHEN
2916 * MATCHED case, and both cases are handled by ExecMergeMatched().
2917 *
2918 * Finally, in the WHEN NOT MATCHED [BY TARGET] case, both tupleid and
2919 * oldtuple are invalid, and we look at the given WHEN NOT MATCHED [BY
2920 * TARGET] actions in sequence until one passes.
2921 *
2922 * Things get interesting in case of concurrent update/delete of the
2923 * target tuple. Such concurrent update/delete is detected while we are
2924 * executing a WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action.
2925 *
2926 * A concurrent update can:
2927 *
2928 * 1. modify the target tuple so that the results from checking any
2929 * additional quals attached to WHEN MATCHED or WHEN NOT MATCHED BY
2930 * SOURCE actions potentially change, but the result from the join
2931 * quals does not change.
2932 *
2933 * In this case, we are still dealing with the same kind of match
2934 * (MATCHED or NOT MATCHED BY SOURCE). We recheck the same list of
2935 * actions from the start and choose the first one that satisfies the
2936 * new target tuple.
2937 *
2938 * 2. modify the target tuple in the WHEN MATCHED case so that the join
2939 * quals no longer pass and hence the source and target tuples no
2940 * longer match.
2941 *
2942 * In this case, we are now dealing with a NOT MATCHED case, and we
2943 * process both WHEN NOT MATCHED BY SOURCE and WHEN NOT MATCHED [BY
2944 * TARGET] actions. First ExecMergeMatched() processes the list of
2945 * WHEN NOT MATCHED BY SOURCE actions in sequence until one passes,
2946 * then ExecMergeNotMatched() processes any WHEN NOT MATCHED [BY
2947 * TARGET] actions in sequence until one passes. Thus we may execute
2948 * two actions; one of each kind.
2949 *
2950 * Thus we support concurrent updates that turn MATCHED candidate rows
2951 * into NOT MATCHED rows. However, we do not attempt to support cases
2952 * that would turn NOT MATCHED rows into MATCHED rows, or which would
2953 * cause a target row to match a different source row.
2954 *
2955 * A concurrent delete changes a WHEN MATCHED case to WHEN NOT MATCHED
2956 * [BY TARGET].
2957 *
2958 * ExecMergeMatched() takes care of following the update chain and
2959 * re-finding the qualifying WHEN MATCHED or WHEN NOT MATCHED BY SOURCE
2960 * action, as long as the target tuple still exists. If the target tuple
2961 * gets deleted or a concurrent update causes the join quals to fail, it
2962 * returns a matched status of false and we call ExecMergeNotMatched().
2963 * Given that ExecMergeMatched() always makes progress by following the
2964 * update chain and we never switch from ExecMergeNotMatched() to
2965 * ExecMergeMatched(), there is no risk of a livelock.
2966 */
2967 matched = tupleid != NULL || oldtuple != NULL;
2968 if (matched)
2969 rslot = ExecMergeMatched(context, resultRelInfo, tupleid, oldtuple,
2970 canSetTag, &matched);
2971
2972 /*
2973 * Deal with the NOT MATCHED case (either a NOT MATCHED tuple from the
2974 * join, or a previously MATCHED tuple for which ExecMergeMatched() set
2975 * "matched" to false, indicating that it no longer matches).
2976 */
2977 if (!matched)
2978 {
2979 /*
2980 * If a concurrent update turned a MATCHED case into a NOT MATCHED
2981 * case, and we have both WHEN NOT MATCHED BY SOURCE and WHEN NOT
2982 * MATCHED [BY TARGET] actions, and there is a RETURNING clause,
2983 * ExecMergeMatched() may have already executed a WHEN NOT MATCHED BY
2984 * SOURCE action, and computed the row to return. If so, we cannot
2985 * execute a WHEN NOT MATCHED [BY TARGET] action now, so mark it as
2986 * pending (to be processed on the next call to ExecModifyTable()).
2987 * Otherwise, just process the action now.
2988 */
2989 if (rslot == NULL)
2990 rslot = ExecMergeNotMatched(context, resultRelInfo, canSetTag);
2991 else
2992 context->mtstate->mt_merge_pending_not_matched = context->planSlot;
2993 }
2994
2995 return rslot;
2996}
2997
2998/*
2999 * Check and execute the first qualifying MATCHED or NOT MATCHED BY SOURCE
3000 * action, depending on whether the join quals are satisfied. If the target
3001 * relation is a table, the current target tuple is identified by tupleid.
3002 * Otherwise, if the target relation is a view, oldtuple is the current target
3003 * tuple from the view.
3004 *
3005 * We start from the first WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action
3006 * and check if the WHEN quals pass, if any. If the WHEN quals for the first
3007 * action do not pass, we check the second, then the third and so on. If we
3008 * reach the end without finding a qualifying action, we return NULL.
3009 * Otherwise, we execute the qualifying action and return its RETURNING
3010 * result, if any, or NULL.
3011 *
3012 * On entry, "*matched" is assumed to be true. If a concurrent update or
3013 * delete is detected that causes the join quals to no longer pass, we set it
3014 * to false, indicating that the caller should process any NOT MATCHED [BY
3015 * TARGET] actions.
3016 *
3017 * After a concurrent update, we restart from the first action to look for a
3018 * new qualifying action to execute. If the join quals originally passed, and
3019 * the concurrent update caused them to no longer pass, then we switch from
3020 * the MATCHED to the NOT MATCHED BY SOURCE list of actions before restarting
3021 * (and setting "*matched" to false). As a result we may execute a WHEN NOT
3022 * MATCHED BY SOURCE action, and set "*matched" to false, causing the caller
3023 * to also execute a WHEN NOT MATCHED [BY TARGET] action.
3024 */
3025static TupleTableSlot *
3027 ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag,
3028 bool *matched)
3029{
3030 ModifyTableState *mtstate = context->mtstate;
3031 List **mergeActions = resultRelInfo->ri_MergeActions;
3032 ItemPointerData lockedtid;
3033 List *actionStates;
3034 TupleTableSlot *newslot = NULL;
3035 TupleTableSlot *rslot = NULL;
3036 EState *estate = context->estate;
3037 ExprContext *econtext = mtstate->ps.ps_ExprContext;
3038 bool isNull;
3039 EPQState *epqstate = &mtstate->mt_epqstate;
3040 ListCell *l;
3041
3042 /* Expect matched to be true on entry */
3043 Assert(*matched);
3044
3045 /*
3046 * If there are no WHEN MATCHED or WHEN NOT MATCHED BY SOURCE actions, we
3047 * are done.
3048 */
3049 if (mergeActions[MERGE_WHEN_MATCHED] == NIL &&
3050 mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE] == NIL)
3051 return NULL;
3052
3053 /*
3054 * Make tuple and any needed join variables available to ExecQual and
3055 * ExecProject. The target's existing tuple is installed in the scantuple.
3056 * This target relation's slot is required only in the case of a MATCHED
3057 * or NOT MATCHED BY SOURCE tuple and UPDATE/DELETE actions.
3058 */
3059 econtext->ecxt_scantuple = resultRelInfo->ri_oldTupleSlot;
3060 econtext->ecxt_innertuple = context->planSlot;
3061 econtext->ecxt_outertuple = NULL;
3062
3063 /*
3064 * This routine is only invoked for matched target rows, so we should
3065 * either have the tupleid of the target row, or an old tuple from the
3066 * target wholerow junk attr.
3067 */
3068 Assert(tupleid != NULL || oldtuple != NULL);
3069 ItemPointerSetInvalid(&lockedtid);
3070 if (oldtuple != NULL)
3071 {
3072 Assert(!resultRelInfo->ri_needLockTagTuple);
3073 ExecForceStoreHeapTuple(oldtuple, resultRelInfo->ri_oldTupleSlot,
3074 false);
3075 }
3076 else
3077 {
3078 if (resultRelInfo->ri_needLockTagTuple)
3079 {
3080 /*
3081 * This locks even for CMD_DELETE, for CMD_NOTHING, and for tuples
3082 * that don't match mas_whenqual. MERGE on system catalogs is a
3083 * minor use case, so don't bother optimizing those.
3084 */
3085 LockTuple(resultRelInfo->ri_RelationDesc, tupleid,
3087 lockedtid = *tupleid;
3088 }
3090 tupleid,
3092 resultRelInfo->ri_oldTupleSlot))
3093 elog(ERROR, "failed to fetch the target tuple");
3094 }
3095
3096 /*
3097 * Test the join condition. If it's satisfied, perform a MATCHED action.
3098 * Otherwise, perform a NOT MATCHED BY SOURCE action.
3099 *
3100 * Note that this join condition will be NULL if there are no NOT MATCHED
3101 * BY SOURCE actions --- see transform_MERGE_to_join(). In that case, we
3102 * need only consider MATCHED actions here.
3103 */
3104 if (ExecQual(resultRelInfo->ri_MergeJoinCondition, econtext))
3105 actionStates = mergeActions[MERGE_WHEN_MATCHED];
3106 else
3107 actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
3108
3109lmerge_matched:
3110
3111 foreach(l, actionStates)
3112 {
3113 MergeActionState *relaction = (MergeActionState *) lfirst(l);
3114 CmdType commandType = relaction->mas_action->commandType;
3115 TM_Result result;
3116 UpdateContext updateCxt = {0};
3117
3118 /*
3119 * Test condition, if any.
3120 *
3121 * In the absence of any condition, we perform the action
3122 * unconditionally (no need to check separately since ExecQual() will
3123 * return true if there are no conditions to evaluate).
3124 */
3125 if (!ExecQual(relaction->mas_whenqual, econtext))
3126 continue;
3127
3128 /*
3129 * Check if the existing target tuple meets the USING checks of
3130 * UPDATE/DELETE RLS policies. If those checks fail, we throw an
3131 * error.
3132 *
3133 * The WITH CHECK quals for UPDATE RLS policies are applied in
3134 * ExecUpdateAct() and hence we need not do anything special to handle
3135 * them.
3136 *
3137 * NOTE: We must do this after WHEN quals are evaluated, so that we
3138 * check policies only when they matter.
3139 */
3140 if (resultRelInfo->ri_WithCheckOptions && commandType != CMD_NOTHING)
3141 {
3142 ExecWithCheckOptions(commandType == CMD_UPDATE ?
3144 resultRelInfo,
3145 resultRelInfo->ri_oldTupleSlot,
3146 context->mtstate->ps.state);
3147 }
3148
3149 /* Perform stated action */
3150 switch (commandType)
3151 {
3152 case CMD_UPDATE:
3153
3154 /*
3155 * Project the output tuple, and use that to update the table.
3156 * We don't need to filter out junk attributes, because the
3157 * UPDATE action's targetlist doesn't have any.
3158 */
3159 newslot = ExecProject(relaction->mas_proj);
3160
3161 mtstate->mt_merge_action = relaction;
3162 if (!ExecUpdatePrologue(context, resultRelInfo,
3163 tupleid, NULL, newslot, &result))
3164 {
3165 if (result == TM_Ok)
3166 goto out; /* "do nothing" */
3167
3168 break; /* concurrent update/delete */
3169 }
3170
3171 /* INSTEAD OF ROW UPDATE Triggers */
3172 if (resultRelInfo->ri_TrigDesc &&
3173 resultRelInfo->ri_TrigDesc->trig_update_instead_row)
3174 {
3175 if (!ExecIRUpdateTriggers(estate, resultRelInfo,
3176 oldtuple, newslot))
3177 goto out; /* "do nothing" */
3178 }
3179 else
3180 {
3181 /* checked ri_needLockTagTuple above */
3182 Assert(oldtuple == NULL);
3183
3184 result = ExecUpdateAct(context, resultRelInfo, tupleid,
3185 NULL, newslot, canSetTag,
3186 &updateCxt);
3187
3188 /*
3189 * As in ExecUpdate(), if ExecUpdateAct() reports that a
3190 * cross-partition update was done, then there's nothing
3191 * else for us to do --- the UPDATE has been turned into a
3192 * DELETE and an INSERT, and we must not perform any of
3193 * the usual post-update tasks. Also, the RETURNING tuple
3194 * (if any) has been projected, so we can just return
3195 * that.
3196 */
3197 if (updateCxt.crossPartUpdate)
3198 {
3199 mtstate->mt_merge_updated += 1;
3200 rslot = context->cpUpdateReturningSlot;
3201 goto out;
3202 }
3203 }
3204
3205 if (result == TM_Ok)
3206 {
3207 ExecUpdateEpilogue(context, &updateCxt, resultRelInfo,
3208 tupleid, NULL, newslot);
3209 mtstate->mt_merge_updated += 1;
3210 }
3211 break;
3212
3213 case CMD_DELETE:
3214 mtstate->mt_merge_action = relaction;
3215 if (!ExecDeletePrologue(context, resultRelInfo, tupleid,
3216 NULL, NULL, &result))
3217 {
3218 if (result == TM_Ok)
3219 goto out; /* "do nothing" */
3220
3221 break; /* concurrent update/delete */
3222 }
3223
3224 /* INSTEAD OF ROW DELETE Triggers */
3225 if (resultRelInfo->ri_TrigDesc &&
3226 resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
3227 {
3228 if (!ExecIRDeleteTriggers(estate, resultRelInfo,
3229 oldtuple))
3230 goto out; /* "do nothing" */
3231 }
3232 else
3233 {
3234 /* checked ri_needLockTagTuple above */
3235 Assert(oldtuple == NULL);
3236
3237 result = ExecDeleteAct(context, resultRelInfo, tupleid,
3238 false);
3239 }
3240
3241 if (result == TM_Ok)
3242 {
3243 ExecDeleteEpilogue(context, resultRelInfo, tupleid, NULL,
3244 false);
3245 mtstate->mt_merge_deleted += 1;
3246 }
3247 break;
3248
3249 case CMD_NOTHING:
3250 /* Doing nothing is always OK */
3251 result = TM_Ok;
3252 break;
3253
3254 default:
3255 elog(ERROR, "unknown action in MERGE WHEN clause");
3256 }
3257
3258 switch (result)
3259 {
3260 case TM_Ok:
3261 /* all good; perform final actions */
3262 if (canSetTag && commandType != CMD_NOTHING)
3263 (estate->es_processed)++;
3264
3265 break;
3266
3267 case TM_SelfModified:
3268
3269 /*
3270 * The target tuple was already updated or deleted by the
3271 * current command, or by a later command in the current
3272 * transaction. The former case is explicitly disallowed by
3273 * the SQL standard for MERGE, which insists that the MERGE
3274 * join condition should not join a target row to more than
3275 * one source row.
3276 *
3277 * The latter case arises if the tuple is modified by a
3278 * command in a BEFORE trigger, or perhaps by a command in a
3279 * volatile function used in the query. In such situations we
3280 * should not ignore the MERGE action, but it is equally
3281 * unsafe to proceed. We don't want to discard the original
3282 * MERGE action while keeping the triggered actions based on
3283 * it; and it would be no better to allow the original MERGE
3284 * action while discarding the updates that it triggered. So
3285 * throwing an error is the only safe course.
3286 */
3287 if (context->tmfd.cmax != estate->es_output_cid)
3288 ereport(ERROR,
3289 (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3290 errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3291 errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3292
3294 ereport(ERROR,
3295 (errcode(ERRCODE_CARDINALITY_VIOLATION),
3296 /* translator: %s is a SQL command name */
3297 errmsg("%s command cannot affect row a second time",
3298 "MERGE"),
3299 errhint("Ensure that not more than one source row matches any one target row.")));
3300
3301 /* This shouldn't happen */
3302 elog(ERROR, "attempted to update or delete invisible tuple");
3303 break;
3304
3305 case TM_Deleted:
3307 ereport(ERROR,
3309 errmsg("could not serialize access due to concurrent delete")));
3310
3311 /*
3312 * If the tuple was already deleted, set matched to false to
3313 * let caller handle it under NOT MATCHED [BY TARGET] clauses.
3314 */
3315 *matched = false;
3316 goto out;
3317
3318 case TM_Updated:
3319 {
3320 bool was_matched;
3321 Relation resultRelationDesc;
3322 TupleTableSlot *epqslot,
3323 *inputslot;
3324 LockTupleMode lockmode;
3325
3326 /*
3327 * The target tuple was concurrently updated by some other
3328 * transaction. If we are currently processing a MATCHED
3329 * action, use EvalPlanQual() with the new version of the
3330 * tuple and recheck the join qual, to detect a change
3331 * from the MATCHED to the NOT MATCHED cases. If we are
3332 * already processing a NOT MATCHED BY SOURCE action, we
3333 * skip this (cannot switch from NOT MATCHED BY SOURCE to
3334 * MATCHED).
3335 */
3336 was_matched = relaction->mas_action->matchKind == MERGE_WHEN_MATCHED;
3337 resultRelationDesc = resultRelInfo->ri_RelationDesc;
3338 lockmode = ExecUpdateLockMode(estate, resultRelInfo);
3339
3340 if (was_matched)
3341 inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
3342 resultRelInfo->ri_RangeTableIndex);
3343 else
3344 inputslot = resultRelInfo->ri_oldTupleSlot;
3345
3346 result = table_tuple_lock(resultRelationDesc, tupleid,
3347 estate->es_snapshot,
3348 inputslot, estate->es_output_cid,
3349 lockmode, LockWaitBlock,
3351 &context->tmfd);
3352 switch (result)
3353 {
3354 case TM_Ok:
3355
3356 /*
3357 * If the tuple was updated and migrated to
3358 * another partition concurrently, the current
3359 * MERGE implementation can't follow. There's
3360 * probably a better way to handle this case, but
3361 * it'd require recognizing the relation to which
3362 * the tuple moved, and setting our current
3363 * resultRelInfo to that.
3364 */
3366 ereport(ERROR,
3368 errmsg("tuple to be merged was already moved to another partition due to concurrent update")));
3369
3370 /*
3371 * If this was a MATCHED case, use EvalPlanQual()
3372 * to recheck the join condition.
3373 */
3374 if (was_matched)
3375 {
3376 epqslot = EvalPlanQual(epqstate,
3377 resultRelationDesc,
3378 resultRelInfo->ri_RangeTableIndex,
3379 inputslot);
3380
3381 /*
3382 * If the subplan didn't return a tuple, then
3383 * we must be dealing with an inner join for
3384 * which the join condition no longer matches.
3385 * This can only happen if there are no NOT
3386 * MATCHED actions, and so there is nothing
3387 * more to do.
3388 */
3389 if (TupIsNull(epqslot))
3390 goto out;
3391
3392 /*
3393 * If we got a NULL ctid from the subplan, the
3394 * join quals no longer pass and we switch to
3395 * the NOT MATCHED BY SOURCE case.
3396 */
3397 (void) ExecGetJunkAttribute(epqslot,
3398 resultRelInfo->ri_RowIdAttNo,
3399 &isNull);
3400 if (isNull)
3401 *matched = false;
3402
3403 /*
3404 * Otherwise, recheck the join quals to see if
3405 * we need to switch to the NOT MATCHED BY
3406 * SOURCE case.
3407 */
3408 if (resultRelInfo->ri_needLockTagTuple)
3409 {
3410 if (ItemPointerIsValid(&lockedtid))
3411 UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid,
3413 LockTuple(resultRelInfo->ri_RelationDesc, &context->tmfd.ctid,
3415 lockedtid = context->tmfd.ctid;
3416 }
3417 if (!table_tuple_fetch_row_version(resultRelationDesc,
3418 &context->tmfd.ctid,
3420 resultRelInfo->ri_oldTupleSlot))
3421 elog(ERROR, "failed to fetch the target tuple");
3422
3423 if (*matched)
3424 *matched = ExecQual(resultRelInfo->ri_MergeJoinCondition,
3425 econtext);
3426
3427 /* Switch lists, if necessary */
3428 if (!*matched)
3429 actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
3430 }
3431
3432 /*
3433 * Loop back and process the MATCHED or NOT
3434 * MATCHED BY SOURCE actions from the start.
3435 */
3436 goto lmerge_matched;
3437
3438 case TM_Deleted:
3439
3440 /*
3441 * tuple already deleted; tell caller to run NOT
3442 * MATCHED [BY TARGET] actions
3443 */
3444 *matched = false;
3445 goto out;
3446
3447 case TM_SelfModified:
3448
3449 /*
3450 * This can be reached when following an update
3451 * chain from a tuple updated by another session,
3452 * reaching a tuple that was already updated or
3453 * deleted by the current command, or by a later
3454 * command in the current transaction. As above,
3455 * this should always be treated as an error.
3456 */
3457 if (context->tmfd.cmax != estate->es_output_cid)
3458 ereport(ERROR,
3459 (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3460 errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3461 errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3462
3464 ereport(ERROR,
3465 (errcode(ERRCODE_CARDINALITY_VIOLATION),
3466 /* translator: %s is a SQL command name */
3467 errmsg("%s command cannot affect row a second time",
3468 "MERGE"),
3469 errhint("Ensure that not more than one source row matches any one target row.")));
3470
3471 /* This shouldn't happen */
3472 elog(ERROR, "attempted to update or delete invisible tuple");
3473 goto out;
3474
3475 default:
3476 /* see table_tuple_lock call in ExecDelete() */
3477 elog(ERROR, "unexpected table_tuple_lock status: %u",
3478 result);
3479 goto out;
3480 }
3481 }
3482
3483 case TM_Invisible:
3484 case TM_WouldBlock:
3485 case TM_BeingModified:
3486 /* these should not occur */
3487 elog(ERROR, "unexpected tuple operation result: %d", result);
3488 break;
3489 }
3490
3491 /* Process RETURNING if present */
3492 if (resultRelInfo->ri_projectReturning)
3493 {
3494 switch (commandType)
3495 {
3496 case CMD_UPDATE:
3497 rslot = ExecProcessReturning(context,
3498 resultRelInfo,
3499 CMD_UPDATE,
3500 resultRelInfo->ri_oldTupleSlot,
3501 newslot,
3502 context->planSlot);
3503 break;
3504
3505 case CMD_DELETE:
3506 rslot = ExecProcessReturning(context,
3507 resultRelInfo,
3508 CMD_DELETE,
3509 resultRelInfo->ri_oldTupleSlot,
3510 NULL,
3511 context->planSlot);
3512 break;
3513
3514 case CMD_NOTHING:
3515 break;
3516
3517 default:
3518 elog(ERROR, "unrecognized commandType: %d",
3519 (int) commandType);
3520 }
3521 }
3522
3523 /*
3524 * We've activated one of the WHEN clauses, so we don't search
3525 * further. This is required behaviour, not an optimization.
3526 */
3527 break;
3528 }
3529
3530 /*
3531 * Successfully executed an action or no qualifying action was found.
3532 */
3533out:
3534 if (ItemPointerIsValid(&lockedtid))
3535 UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid,
3537 return rslot;
3538}
3539
3540/*
3541 * Execute the first qualifying NOT MATCHED [BY TARGET] action.
3542 */
3543static TupleTableSlot *
3545 bool canSetTag)
3546{
3547 ModifyTableState *mtstate = context->mtstate;
3548 ExprContext *econtext = mtstate->ps.ps_ExprContext;
3549 List *actionStates;
3550 TupleTableSlot *rslot = NULL;
3551 ListCell *l;
3552
3553 /*
3554 * For INSERT actions, the root relation's merge action is OK since the
3555 * INSERT's targetlist and the WHEN conditions can only refer to the
3556 * source relation and hence it does not matter which result relation we
3557 * work with.
3558 *
3559 * XXX does this mean that we can avoid creating copies of actionStates on
3560 * partitioned tables, for not-matched actions?
3561 */
3562 actionStates = resultRelInfo->ri_MergeActions[MERGE_WHEN_NOT_MATCHED_BY_TARGET];
3563
3564 /*
3565 * Make source tuple available to ExecQual and ExecProject. We don't need
3566 * the target tuple, since the WHEN quals and targetlist can't refer to
3567 * the target columns.
3568 */
3569 econtext->ecxt_scantuple = NULL;
3570 econtext->ecxt_innertuple = context->planSlot;
3571 econtext->ecxt_outertuple = NULL;
3572
3573 foreach(l, actionStates)
3574 {
3576 CmdType commandType = action->mas_action->commandType;
3577 TupleTableSlot *newslot;
3578
3579 /*
3580 * Test condition, if any.
3581 *
3582 * In the absence of any condition, we perform the action
3583 * unconditionally (no need to check separately since ExecQual() will
3584 * return true if there are no conditions to evaluate).
3585 */
3586 if (!ExecQual(action->mas_whenqual, econtext))
3587 continue;
3588
3589 /* Perform stated action */
3590 switch (commandType)
3591 {
3592 case CMD_INSERT:
3593
3594 /*
3595 * Project the tuple. In case of a partitioned table, the
3596 * projection was already built to use the root's descriptor,
3597 * so we don't need to map the tuple here.
3598 */
3599 newslot = ExecProject(action->mas_proj);
3600 mtstate->mt_merge_action = action;
3601
3602 rslot = ExecInsert(context, mtstate->rootResultRelInfo,
3603 newslot, canSetTag, NULL, NULL);
3604 mtstate->mt_merge_inserted += 1;
3605 break;
3606 case CMD_NOTHING:
3607 /* Do nothing */
3608 break;
3609 default:
3610 elog(ERROR, "unknown action in MERGE WHEN NOT MATCHED clause");
3611 }
3612
3613 /*
3614 * We've activated one of the WHEN clauses, so we don't search
3615 * further. This is required behaviour, not an optimization.
3616 */
3617 break;
3618 }
3619
3620 return rslot;
3621}
3622
3623/*
3624 * Initialize state for execution of MERGE.
3625 */
3626void
3628{
3629 ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
3630 ResultRelInfo *rootRelInfo = mtstate->rootResultRelInfo;
3631 ResultRelInfo *resultRelInfo;
3632 ExprContext *econtext;
3633 ListCell *lc;
3634 int i;
3635
3636 if (node->mergeActionLists == NIL)
3637 return;
3638
3639 mtstate->mt_merge_subcommands = 0;
3640
3641 if (mtstate->ps.ps_ExprContext == NULL)
3642 ExecAssignExprContext(estate, &mtstate->ps);
3643 econtext = mtstate->ps.ps_ExprContext;
3644
3645 /*
3646 * Create a MergeActionState for each action on the mergeActionList and
3647 * add it to either a list of matched actions or not-matched actions.
3648 *
3649 * Similar logic appears in ExecInitPartitionInfo(), so if changing
3650 * anything here, do so there too.
3651 */
3652 i = 0;
3653 foreach(lc, node->mergeActionLists)
3654 {
3655 List *mergeActionList = lfirst(lc);
3656 Node *joinCondition;
3657 TupleDesc relationDesc;
3658 ListCell *l;
3659
3660 joinCondition = (Node *) list_nth(node->mergeJoinConditions, i);
3661 resultRelInfo = mtstate->resultRelInfo + i;
3662 i++;
3663 relationDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3664
3665 /* initialize slots for MERGE fetches from this rel */
3666 if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
3667 ExecInitMergeTupleSlots(mtstate, resultRelInfo);
3668
3669 /* initialize state for join condition checking */
3670 resultRelInfo->ri_MergeJoinCondition =
3671 ExecInitQual((List *) joinCondition, &mtstate->ps);
3672
3673 foreach(l, mergeActionList)
3674 {
3676 MergeActionState *action_state;
3677 TupleTableSlot *tgtslot;
3678 TupleDesc tgtdesc;
3679
3680 /*
3681 * Build action merge state for this rel. (For partitions,
3682 * equivalent code exists in ExecInitPartitionInfo.)
3683 */
3684 action_state = makeNode(MergeActionState);
3685 action_state->mas_action = action;
3686 action_state->mas_whenqual = ExecInitQual((List *) action->qual,
3687 &mtstate->ps);
3688
3689 /*
3690 * We create three lists - one for each MergeMatchKind - and stick
3691 * the MergeActionState into the appropriate list.
3692 */
3693 resultRelInfo->ri_MergeActions[action->matchKind] =
3694 lappend(resultRelInfo->ri_MergeActions[action->matchKind],
3695 action_state);
3696
3697 switch (action->commandType)
3698 {
3699 case CMD_INSERT:
3700 ExecCheckPlanOutput(rootRelInfo->ri_RelationDesc,
3701 action->targetList);
3702
3703 /*
3704 * If the MERGE targets a partitioned table, any INSERT
3705 * actions must be routed through it, not the child
3706 * relations. Initialize the routing struct and the root
3707 * table's "new" tuple slot for that, if not already done.
3708 * The projection we prepare, for all relations, uses the
3709 * root relation descriptor, and targets the plan's root
3710 * slot. (This is consistent with the fact that we
3711 * checked the plan output to match the root relation,
3712 * above.)
3713 */
3714 if (rootRelInfo->ri_RelationDesc->rd_rel->relkind ==
3715 RELKIND_PARTITIONED_TABLE)
3716 {
3717 if (mtstate->mt_partition_tuple_routing == NULL)
3718 {
3719 /*
3720 * Initialize planstate for routing if not already
3721 * done.
3722 *
3723 * Note that the slot is managed as a standalone
3724 * slot belonging to ModifyTableState, so we pass
3725 * NULL for the 2nd argument.
3726 */
3727 mtstate->mt_root_tuple_slot =
3728 table_slot_create(rootRelInfo->ri_RelationDesc,
3729 NULL);
3732 rootRelInfo->ri_RelationDesc);
3733 }
3734 tgtslot = mtstate->mt_root_tuple_slot;
3735 tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
3736 }
3737 else
3738 {
3739 /* not partitioned? use the stock relation and slot */
3740 tgtslot = resultRelInfo->ri_newTupleSlot;
3741 tgtdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3742 }
3743
3744 action_state->mas_proj =
3745 ExecBuildProjectionInfo(action->targetList, econtext,
3746 tgtslot,
3747 &mtstate->ps,
3748 tgtdesc);
3749
3751 break;
3752 case CMD_UPDATE:
3753 action_state->mas_proj =
3755 true,
3756 action->updateColnos,
3757 relationDesc,
3758 econtext,
3759 resultRelInfo->ri_newTupleSlot,
3760 &mtstate->ps);
3762 break;
3763 case CMD_DELETE:
3765 break;
3766 case CMD_NOTHING:
3767 break;
3768 default:
3769 elog(ERROR, "unknown operation");
3770 break;
3771 }
3772 }
3773 }
3774}
3775
3776/*
3777 * Initializes the tuple slots in a ResultRelInfo for any MERGE action.
3778 *
3779 * We mark 'projectNewInfoValid' even though the projections themselves
3780 * are not initialized here.
3781 */
3782void
3784 ResultRelInfo *resultRelInfo)
3785{
3786 EState *estate = mtstate->ps.state;
3787
3788 Assert(!resultRelInfo->ri_projectNewInfoValid);
3789
3790 resultRelInfo->ri_oldTupleSlot =
3791 table_slot_create(resultRelInfo->ri_RelationDesc,
3792 &estate->es_tupleTable);
3793 resultRelInfo->ri_newTupleSlot =
3794 table_slot_create(resultRelInfo->ri_RelationDesc,
3795 &estate->es_tupleTable);
3796 resultRelInfo->ri_projectNewInfoValid = true;
3797}
3798
3799/*
3800 * Process BEFORE EACH STATEMENT triggers
3801 */
3802static void
3804{
3805 ModifyTable *plan = (ModifyTable *) node->ps.plan;
3806 ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
3807
3808 switch (node->operation)
3809 {
3810 case CMD_INSERT:
3811 ExecBSInsertTriggers(node->ps.state, resultRelInfo);
3812 if (plan->onConflictAction == ONCONFLICT_UPDATE)
3814 resultRelInfo);
3815 break;
3816 case CMD_UPDATE:
3817 ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
3818 break;
3819 case CMD_DELETE:
3820 ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
3821 break;
3822 case CMD_MERGE:
3824 ExecBSInsertTriggers(node->ps.state, resultRelInfo);
3826 ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
3828 ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
3829 break;
3830 default:
3831 elog(ERROR, "unknown operation");
3832 break;
3833 }
3834}
3835
3836/*
3837 * Process AFTER EACH STATEMENT triggers
3838 */
3839static void
3841{
3842 ModifyTable *plan = (ModifyTable *) node->ps.plan;
3843 ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
3844
3845 switch (node->operation)
3846 {
3847 case CMD_INSERT:
3848 if (plan->onConflictAction == ONCONFLICT_UPDATE)
3850 resultRelInfo,
3852 ExecASInsertTriggers(node->ps.state, resultRelInfo,
3853 node->mt_transition_capture);
3854 break;
3855 case CMD_UPDATE:
3856 ExecASUpdateTriggers(node->ps.state, resultRelInfo,
3857 node->mt_transition_capture);
3858 break;
3859 case CMD_DELETE:
3860 ExecASDeleteTriggers(node->ps.state, resultRelInfo,
3861 node->mt_transition_capture);
3862 break;
3863 case CMD_MERGE:
3865 ExecASDeleteTriggers(node->ps.state, resultRelInfo,
3866 node->mt_transition_capture);
3868 ExecASUpdateTriggers(node->ps.state, resultRelInfo,
3869 node->mt_transition_capture);
3871 ExecASInsertTriggers(node->ps.state, resultRelInfo,
3872 node->mt_transition_capture);
3873 break;
3874 default:
3875 elog(ERROR, "unknown operation");
3876 break;
3877 }
3878}
3879
3880/*
3881 * Set up the state needed for collecting transition tuples for AFTER
3882 * triggers.
3883 */
3884static void
3886{
3887 ModifyTable *plan = (ModifyTable *) mtstate->ps.plan;
3888 ResultRelInfo *targetRelInfo = mtstate->rootResultRelInfo;
3889
3890 /* Check for transition tables on the directly targeted relation. */
3891 mtstate->mt_transition_capture =
3892 MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
3893 RelationGetRelid(targetRelInfo->ri_RelationDesc),
3894 mtstate->operation);
3895 if (plan->operation == CMD_INSERT &&
3896 plan->onConflictAction == ONCONFLICT_UPDATE)
3897 mtstate->mt_oc_transition_capture =
3898 MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
3899 RelationGetRelid(targetRelInfo->ri_RelationDesc),
3900 CMD_UPDATE);
3901}
3902
3903/*
3904 * ExecPrepareTupleRouting --- prepare for routing one tuple
3905 *
3906 * Determine the partition in which the tuple in slot is to be inserted,
3907 * and return its ResultRelInfo in *partRelInfo. The return value is
3908 * a slot holding the tuple of the partition rowtype.
3909 *
3910 * This also sets the transition table information in mtstate based on the
3911 * selected partition.
3912 */
3913static TupleTableSlot *
3915 EState *estate,
3916 PartitionTupleRouting *proute,
3917 ResultRelInfo *targetRelInfo,
3918 TupleTableSlot *slot,
3919 ResultRelInfo **partRelInfo)
3920{
3921 ResultRelInfo *partrel;
3922 TupleConversionMap *map;
3923
3924 /*
3925 * Lookup the target partition's ResultRelInfo. If ExecFindPartition does
3926 * not find a valid partition for the tuple in 'slot' then an error is
3927 * raised. An error may also be raised if the found partition is not a
3928 * valid target for INSERTs. This is required since a partitioned table
3929 * UPDATE to another partition becomes a DELETE+INSERT.
3930 */
3931 partrel = ExecFindPartition(mtstate, targetRelInfo, proute, slot, estate);
3932
3933 /*
3934 * If we're capturing transition tuples, we might need to convert from the
3935 * partition rowtype to root partitioned table's rowtype. But if there
3936 * are no BEFORE triggers on the partition that could change the tuple, we
3937 * can just remember the original unconverted tuple to avoid a needless
3938 * round trip conversion.
3939 */
3940 if (mtstate->mt_transition_capture != NULL)
3941 {
3942 bool has_before_insert_row_trig;
3943
3944 has_before_insert_row_trig = (partrel->ri_TrigDesc &&
3946
3948 !has_before_insert_row_trig ? slot : NULL;
3949 }
3950
3951 /*
3952 * Convert the tuple, if necessary.
3953 */
3954 map = ExecGetRootToChildMap(partrel, estate);
3955 if (map != NULL)
3956 {
3957 TupleTableSlot *new_slot = partrel->ri_PartitionTupleSlot;
3958
3959 slot = execute_attr_map_slot(map->attrMap, slot, new_slot);
3960 }
3961
3962 *partRelInfo = partrel;
3963 return slot;
3964}
3965
3966/* ----------------------------------------------------------------
3967 * ExecModifyTable
3968 *
3969 * Perform table modifications as required, and return RETURNING results
3970 * if needed.
3971 * ----------------------------------------------------------------
3972 */
3973static TupleTableSlot *
3975{
3977 ModifyTableContext context;
3978 EState *estate = node->ps.state;
3979 CmdType operation = node->operation;
3980 ResultRelInfo *resultRelInfo;
3981 PlanState *subplanstate;
3982 TupleTableSlot *slot;
3983 TupleTableSlot *oldSlot;
3984 ItemPointerData tuple_ctid;
3985 HeapTupleData oldtupdata;
3986 HeapTuple oldtuple;
3987 ItemPointer tupleid;
3988 bool tuplock;
3989
3991
3992 /*
3993 * This should NOT get called during EvalPlanQual; we should have passed a
3994 * subplan tree to EvalPlanQual, instead. Use a runtime test not just
3995 * Assert because this condition is easy to miss in testing. (Note:
3996 * although ModifyTable should not get executed within an EvalPlanQual
3997 * operation, we do have to allow it to be initialized and shut down in
3998 * case it is within a CTE subplan. Hence this test must be here, not in
3999 * ExecInitModifyTable.)
4000 */
4001 if (estate->es_epq_active != NULL)
4002 elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
4003
4004 /*
4005 * If we've already completed processing, don't try to do more. We need
4006 * this test because ExecPostprocessPlan might call us an extra time, and
4007 * our subplan's nodes aren't necessarily robust against being called
4008 * extra times.
4009 */
4010 if (node->mt_done)
4011 return NULL;
4012
4013 /*
4014 * On first call, fire BEFORE STATEMENT triggers before proceeding.
4015 */
4016 if (node->fireBSTriggers)
4017 {
4018 fireBSTriggers(node);
4019 node->fireBSTriggers = false;
4020 }
4021
4022 /* Preload local variables */
4023 resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex;
4024 subplanstate = outerPlanState(node);
4025
4026 /* Set global context */
4027 context.mtstate = node;
4028 context.epqstate = &node->mt_epqstate;
4029 context.estate = estate;
4030
4031 /*
4032 * Fetch rows from subplan, and execute the required table modification
4033 * for each row.
4034 */
4035 for (;;)
4036 {
4037 /*
4038 * Reset the per-output-tuple exprcontext. This is needed because
4039 * triggers expect to use that context as workspace. It's a bit ugly
4040 * to do this below the top level of the plan, however. We might need
4041 * to rethink this later.
4042 */
4044
4045 /*
4046 * Reset per-tuple memory context used for processing on conflict and
4047 * returning clauses, to free any expression evaluation storage
4048 * allocated in the previous cycle.
4049 */
4050 if (pstate->ps_ExprContext)
4052
4053 /*
4054 * If there is a pending MERGE ... WHEN NOT MATCHED [BY TARGET] action
4055 * to execute, do so now --- see the comments in ExecMerge().
4056 */
4057 if (node->mt_merge_pending_not_matched != NULL)
4058 {
4059 context.planSlot = node->mt_merge_pending_not_matched;
4060 context.cpDeletedSlot = NULL;
4061
4062 slot = ExecMergeNotMatched(&context, node->resultRelInfo,
4063 node->canSetTag);
4064
4065 /* Clear the pending action */
4066 node->mt_merge_pending_not_matched = NULL;
4067
4068 /*
4069 * If we got a RETURNING result, return it to the caller. We'll
4070 * continue the work on next call.
4071 */
4072 if (slot)
4073 return slot;
4074
4075 continue; /* continue with the next tuple */
4076 }
4077
4078 /* Fetch the next row from subplan */
4079 context.planSlot = ExecProcNode(subplanstate);
4080 context.cpDeletedSlot = NULL;
4081
4082 /* No more tuples to process? */
4083 if (TupIsNull(context.planSlot))
4084 break;
4085
4086 /*
4087 * When there are multiple result relations, each tuple contains a
4088 * junk column that gives the OID of the rel from which it came.
4089 * Extract it and select the correct result relation.
4090 */
4092 {
4093 Datum datum;
4094 bool isNull;
4095 Oid resultoid;
4096
4097 datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno,
4098 &isNull);
4099 if (isNull)
4100 {
4101 /*
4102 * For commands other than MERGE, any tuples having InvalidOid
4103 * for tableoid are errors. For MERGE, we may need to handle
4104 * them as WHEN NOT MATCHED clauses if any, so do that.
4105 *
4106 * Note that we use the node's toplevel resultRelInfo, not any
4107 * specific partition's.
4108 */
4109 if (operation == CMD_MERGE)
4110 {
4111 EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4112
4113 slot = ExecMerge(&context, node->resultRelInfo,
4114 NULL, NULL, node->canSetTag);
4115
4116 /*
4117 * If we got a RETURNING result, return it to the caller.
4118 * We'll continue the work on next call.
4119 */
4120 if (slot)
4121 return slot;
4122
4123 continue; /* continue with the next tuple */
4124 }
4125
4126 elog(ERROR, "tableoid is NULL");
4127 }
4128 resultoid = DatumGetObjectId(datum);
4129
4130 /* If it's not the same as last time, we need to locate the rel */
4131 if (resultoid != node->mt_lastResultOid)
4132 resultRelInfo = ExecLookupResultRelByOid(node, resultoid,
4133 false, true);
4134 }
4135
4136 /*
4137 * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do
4138 * here is compute the RETURNING expressions.
4139 */
4140 if (resultRelInfo->ri_usesFdwDirectModify)
4141 {
4142 Assert(resultRelInfo->ri_projectReturning);
4143
4144 /*
4145 * A scan slot containing the data that was actually inserted,
4146 * updated or deleted has already been made available to
4147 * ExecProcessReturning by IterateDirectModify, so no need to
4148 * provide it here. The individual old and new slots are not
4149 * needed, since direct-modify is disabled if the RETURNING list
4150 * refers to OLD/NEW values.
4151 */
4152 Assert((resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD) == 0 &&
4153 (resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_NEW) == 0);
4154
4155 slot = ExecProcessReturning(&context, resultRelInfo, operation,
4156 NULL, NULL, context.planSlot);
4157
4158 return slot;
4159 }
4160
4161 EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4162 slot = context.planSlot;
4163
4164 tupleid = NULL;
4165 oldtuple = NULL;
4166
4167 /*
4168 * For UPDATE/DELETE/MERGE, fetch the row identity info for the tuple
4169 * to be updated/deleted/merged. For a heap relation, that's a TID;
4170 * otherwise we may have a wholerow junk attr that carries the old
4171 * tuple in toto. Keep this in step with the part of
4172 * ExecInitModifyTable that sets up ri_RowIdAttNo.
4173 */
4174 if (operation == CMD_UPDATE || operation == CMD_DELETE ||
4175 operation == CMD_MERGE)
4176 {
4177 char relkind;
4178 Datum datum;
4179 bool isNull;
4180
4181 relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
4182 if (relkind == RELKIND_RELATION ||
4183 relkind == RELKIND_MATVIEW ||
4184 relkind == RELKIND_PARTITIONED_TABLE)
4185 {
4186 /* ri_RowIdAttNo refers to a ctid attribute */
4188 datum = ExecGetJunkAttribute(slot,
4189 resultRelInfo->ri_RowIdAttNo,
4190 &isNull);
4191
4192 /*
4193 * For commands other than MERGE, any tuples having a null row
4194 * identifier are errors. For MERGE, we may need to handle
4195 * them as WHEN NOT MATCHED clauses if any, so do that.
4196 *
4197 * Note that we use the node's toplevel resultRelInfo, not any
4198 * specific partition's.
4199 */
4200 if (isNull)
4201 {
4202 if (operation == CMD_MERGE)
4203 {
4204 EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4205
4206 slot = ExecMerge(&context, node->resultRelInfo,
4207 NULL, NULL, node->canSetTag);
4208
4209 /*
4210 * If we got a RETURNING result, return it to the
4211 * caller. We'll continue the work on next call.
4212 */
4213 if (slot)
4214 return slot;
4215
4216 continue; /* continue with the next tuple */
4217 }
4218
4219 elog(ERROR, "ctid is NULL");
4220 }
4221
4222 tupleid = (ItemPointer) DatumGetPointer(datum);
4223 tuple_ctid = *tupleid; /* be sure we don't free ctid!! */
4224 tupleid = &tuple_ctid;
4225 }
4226
4227 /*
4228 * Use the wholerow attribute, when available, to reconstruct the
4229 * old relation tuple. The old tuple serves one or both of two
4230 * purposes: 1) it serves as the OLD tuple for row triggers, 2) it
4231 * provides values for any unchanged columns for the NEW tuple of
4232 * an UPDATE, because the subplan does not produce all the columns
4233 * of the target table.
4234 *
4235 * Note that the wholerow attribute does not carry system columns,
4236 * so foreign table triggers miss seeing those, except that we
4237 * know enough here to set t_tableOid. Quite separately from
4238 * this, the FDW may fetch its own junk attrs to identify the row.
4239 *
4240 * Other relevant relkinds, currently limited to views, always
4241 * have a wholerow attribute.
4242 */
4243 else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4244 {
4245 datum = ExecGetJunkAttribute(slot,
4246 resultRelInfo->ri_RowIdAttNo,
4247 &isNull);
4248
4249 /*
4250 * For commands other than MERGE, any tuples having a null row
4251 * identifier are errors. For MERGE, we may need to handle
4252 * them as WHEN NOT MATCHED clauses if any, so do that.
4253 *
4254 * Note that we use the node's toplevel resultRelInfo, not any
4255 * specific partition's.
4256 */
4257 if (isNull)
4258 {
4259 if (operation == CMD_MERGE)
4260 {
4261 EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4262
4263 slot = ExecMerge(&context, node->resultRelInfo,
4264 NULL, NULL, node->canSetTag);
4265
4266 /*
4267 * If we got a RETURNING result, return it to the
4268 * caller. We'll continue the work on next call.
4269 */
4270 if (slot)
4271 return slot;
4272
4273 continue; /* continue with the next tuple */
4274 }
4275
4276 elog(ERROR, "wholerow is NULL");
4277 }
4278
4279 oldtupdata.t_data = DatumGetHeapTupleHeader(datum);
4280 oldtupdata.t_len =
4282 ItemPointerSetInvalid(&(oldtupdata.t_self));
4283 /* Historically, view triggers see invalid t_tableOid. */
4284 oldtupdata.t_tableOid =
4285 (relkind == RELKIND_VIEW) ? InvalidOid :
4286 RelationGetRelid(resultRelInfo->ri_RelationDesc);
4287
4288 oldtuple = &oldtupdata;
4289 }
4290 else
4291 {
4292 /* Only foreign tables are allowed to omit a row-ID attr */
4293 Assert(relkind == RELKIND_FOREIGN_TABLE);
4294 }
4295 }
4296
4297 switch (operation)
4298 {
4299 case CMD_INSERT:
4300 /* Initialize projection info if first time for this table */
4301 if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4302 ExecInitInsertProjection(node, resultRelInfo);
4303 slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot);
4304 slot = ExecInsert(&context, resultRelInfo, slot,
4305 node->canSetTag, NULL, NULL);
4306 break;
4307
4308 case CMD_UPDATE:
4309 tuplock = false;
4310
4311 /* Initialize projection info if first time for this table */
4312 if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4313 ExecInitUpdateProjection(node, resultRelInfo);
4314
4315 /*
4316 * Make the new tuple by combining plan's output tuple with
4317 * the old tuple being updated.
4318 */
4319 oldSlot = resultRelInfo->ri_oldTupleSlot;
4320 if (oldtuple != NULL)
4321 {
4322 Assert(!resultRelInfo->ri_needLockTagTuple);
4323 /* Use the wholerow junk attr as the old tuple. */
4324 ExecForceStoreHeapTuple(oldtuple, oldSlot, false);
4325 }
4326 else
4327 {
4328 /* Fetch the most recent version of old tuple. */
4329 Relation relation = resultRelInfo->ri_RelationDesc;
4330
4331 if (resultRelInfo->ri_needLockTagTuple)
4332 {
4333 LockTuple(relation, tupleid, InplaceUpdateTupleLock);
4334 tuplock = true;
4335 }
4336 if (!table_tuple_fetch_row_version(relation, tupleid,
4338 oldSlot))
4339 elog(ERROR, "failed to fetch tuple being updated");
4340 }
4341 slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot,
4342 oldSlot);
4343
4344 /* Now apply the update. */
4345 slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple,
4346 oldSlot, slot, node->canSetTag);
4347 if (tuplock)
4348 UnlockTuple(resultRelInfo->ri_RelationDesc, tupleid,
4350 break;
4351
4352 case CMD_DELETE:
4353 slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple,
4354 true, false, node->canSetTag, NULL, NULL, NULL);
4355 break;
4356
4357 case CMD_MERGE:
4358 slot = ExecMerge(&context, resultRelInfo, tupleid, oldtuple,
4359 node->canSetTag);
4360 break;
4361
4362 default:
4363 elog(ERROR, "unknown operation");
4364 break;
4365 }
4366
4367 /*
4368 * If we got a RETURNING result, return it to caller. We'll continue
4369 * the work on next call.
4370 */
4371 if (slot)
4372 return slot;
4373 }
4374
4375 /*
4376 * Insert remaining tuples for batch insert.
4377 */
4379 ExecPendingInserts(estate);
4380
4381 /*
4382 * We're done, but fire AFTER STATEMENT triggers before exiting.
4383 */
4384 fireASTriggers(node);
4385
4386 node->mt_done = true;
4387
4388 return NULL;
4389}
4390
4391/*
4392 * ExecLookupResultRelByOid
4393 * If the table with given OID is among the result relations to be
4394 * updated by the given ModifyTable node, return its ResultRelInfo.
4395 *
4396 * If not found, return NULL if missing_ok, else raise error.
4397 *
4398 * If update_cache is true, then upon successful lookup, update the node's
4399 * one-element cache. ONLY ExecModifyTable may pass true for this.
4400 */
4403 bool missing_ok, bool update_cache)
4404{
4405 if (node->mt_resultOidHash)
4406 {
4407 /* Use the pre-built hash table to locate the rel */
4408 MTTargetRelLookup *mtlookup;
4409
4410 mtlookup = (MTTargetRelLookup *)
4411 hash_search(node->mt_resultOidHash, &resultoid, HASH_FIND, NULL);
4412 if (mtlookup)
4413 {
4414 if (update_cache)
4415 {
4416 node->mt_lastResultOid = resultoid;
4417 node->mt_lastResultIndex = mtlookup->relationIndex;
4418 }
4419 return node->resultRelInfo + mtlookup->relationIndex;
4420 }
4421 }
4422 else
4423 {
4424 /* With few target rels, just search the ResultRelInfo array */
4425 for (int ndx = 0; ndx < node->mt_nrels; ndx++)
4426 {
4427 ResultRelInfo *rInfo = node->resultRelInfo + ndx;
4428
4429 if (RelationGetRelid(rInfo->ri_RelationDesc) == resultoid)
4430 {
4431 if (update_cache)
4432 {
4433 node->mt_lastResultOid = resultoid;
4434 node->mt_lastResultIndex = ndx;
4435 }
4436 return rInfo;
4437 }
4438 }
4439 }
4440
4441 if (!missing_ok)
4442 elog(ERROR, "incorrect result relation OID %u", resultoid);
4443 return NULL;
4444}
4445
4446/* ----------------------------------------------------------------
4447 * ExecInitModifyTable
4448 * ----------------------------------------------------------------
4449 */
4451ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
4452{
4453 ModifyTableState *mtstate;
4454 Plan *subplan = outerPlan(node);
4455 CmdType operation = node->operation;
4456 int nrels = list_length(node->resultRelations);
4457 ResultRelInfo *resultRelInfo;
4458 List *arowmarks;
4459 ListCell *l;
4460 int i;
4461 Relation rel;
4462
4463 /* check for unsupported flags */
4464 Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
4465
4466 /*
4467 * create state structure
4468 */
4469 mtstate = makeNode(ModifyTableState);
4470 mtstate->ps.plan = (Plan *) node;
4471 mtstate->ps.state = estate;
4472 mtstate->ps.ExecProcNode = ExecModifyTable;
4473
4474 mtstate->operation = operation;
4475 mtstate->canSetTag = node->canSetTag;
4476 mtstate->mt_done = false;
4477
4478 mtstate->mt_nrels = nrels;
4479 mtstate->resultRelInfo = (ResultRelInfo *)
4480 palloc(nrels * sizeof(ResultRelInfo));
4481
4482 mtstate->mt_merge_pending_not_matched = NULL;
4483 mtstate->mt_merge_inserted = 0;
4484 mtstate->mt_merge_updated = 0;
4485 mtstate->mt_merge_deleted = 0;
4486
4487 /*----------
4488 * Resolve the target relation. This is the same as:
4489 *
4490 * - the relation for which we will fire FOR STATEMENT triggers,
4491 * - the relation into whose tuple format all captured transition tuples
4492 * must be converted, and
4493 * - the root partitioned table used for tuple routing.
4494 *
4495 * If it's a partitioned or inherited table, the root partition or
4496 * appendrel RTE doesn't appear elsewhere in the plan and its RT index is
4497 * given explicitly in node->rootRelation. Otherwise, the target relation
4498 * is the sole relation in the node->resultRelations list.
4499 *----------
4500 */
4501 if (node->rootRelation > 0)
4502 {
4505 node->rootRelation);
4506 }
4507 else
4508 {
4509 Assert(list_length(node->resultRelations) == 1);
4510 mtstate->rootResultRelInfo = mtstate->resultRelInfo;
4511 ExecInitResultRelation(estate, mtstate->resultRelInfo,
4513 }
4514
4515 /* set up epqstate with dummy subplan data for the moment */
4516 EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL,
4517 node->epqParam, node->resultRelations);
4518 mtstate->fireBSTriggers = true;
4519
4520 /*
4521 * Build state for collecting transition tuples. This requires having a
4522 * valid trigger query context, so skip it in explain-only mode.
4523 */
4524 if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
4525 ExecSetupTransitionCaptureState(mtstate, estate);
4526
4527 /*
4528 * Open all the result relations and initialize the ResultRelInfo structs.
4529 * (But root relation was initialized above, if it's part of the array.)
4530 * We must do this before initializing the subplan, because direct-modify
4531 * FDWs expect their ResultRelInfos to be available.
4532 */
4533 resultRelInfo = mtstate->resultRelInfo;
4534 i = 0;
4535 foreach(l, node->resultRelations)
4536 {
4537 Index resultRelation = lfirst_int(l);
4538 List *mergeActions = NIL;
4539
4540 if (node->mergeActionLists)
4541 mergeActions = list_nth(node->mergeActionLists, i);
4542
4543 if (resultRelInfo != mtstate->rootResultRelInfo)
4544 {
4545 ExecInitResultRelation(estate, resultRelInfo, resultRelation);
4546
4547 /*
4548 * For child result relations, store the root result relation
4549 * pointer. We do so for the convenience of places that want to
4550 * look at the query's original target relation but don't have the
4551 * mtstate handy.
4552 */
4553 resultRelInfo->ri_RootResultRelInfo = mtstate->rootResultRelInfo;
4554 }
4555
4556 /* Initialize the usesFdwDirectModify flag */
4557 resultRelInfo->ri_usesFdwDirectModify =
4559
4560 /*
4561 * Verify result relation is a valid target for the current operation
4562 */
4563 CheckValidResultRel(resultRelInfo, operation, mergeActions);
4564
4565 resultRelInfo++;
4566 i++;
4567 }
4568
4569 /*
4570 * Now we may initialize the subplan.
4571 */
4572 outerPlanState(mtstate) = ExecInitNode(subplan, estate, eflags);
4573
4574 /*
4575 * Do additional per-result-relation initialization.
4576 */
4577 for (i = 0; i < nrels; i++)
4578 {
4579 resultRelInfo = &mtstate->resultRelInfo[i];
4580
4581 /* Let FDWs init themselves for foreign-table result rels */
4582 if (!resultRelInfo->ri_usesFdwDirectModify &&
4583 resultRelInfo->ri_FdwRoutine != NULL &&
4584 resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL)
4585 {
4586 List *fdw_private = (List *) list_nth(node->fdwPrivLists, i);
4587
4588 resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate,
4589 resultRelInfo,
4590 fdw_private,
4591 i,
4592 eflags);
4593 }
4594
4595 /*
4596 * For UPDATE/DELETE/MERGE, find the appropriate junk attr now, either
4597 * a 'ctid' or 'wholerow' attribute depending on relkind. For foreign
4598 * tables, the FDW might have created additional junk attr(s), but
4599 * those are no concern of ours.
4600 */
4601 if (operation == CMD_UPDATE || operation == CMD_DELETE ||
4602 operation == CMD_MERGE)
4603 {
4604 char relkind;
4605
4606 relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
4607 if (relkind == RELKIND_RELATION ||
4608 relkind == RELKIND_MATVIEW ||
4609 relkind == RELKIND_PARTITIONED_TABLE)
4610 {
4611 resultRelInfo->ri_RowIdAttNo =
4612 ExecFindJunkAttributeInTlist(subplan->targetlist, "ctid");
4613 if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4614 elog(ERROR, "could not find junk ctid column");
4615 }
4616 else if (relkind == RELKIND_FOREIGN_TABLE)
4617 {
4618 /*
4619 * We don't support MERGE with foreign tables for now. (It's
4620 * problematic because the implementation uses CTID.)
4621 */
4622 Assert(operation != CMD_MERGE);
4623
4624 /*
4625 * When there is a row-level trigger, there should be a
4626 * wholerow attribute. We also require it to be present in
4627 * UPDATE and MERGE, so we can get the values of unchanged
4628 * columns.
4629 */
4630 resultRelInfo->ri_RowIdAttNo =
4632 "wholerow");
4633 if ((mtstate->operation == CMD_UPDATE || mtstate->operation == CMD_MERGE) &&
4634 !AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4635 elog(ERROR, "could not find junk wholerow column");
4636 }
4637 else
4638 {
4639 /* Other valid target relkinds must provide wholerow */
4640 resultRelInfo->ri_RowIdAttNo =
4642 "wholerow");
4643 if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4644 elog(ERROR, "could not find junk wholerow column");
4645 }
4646 }
4647 }
4648
4649 /*
4650 * If this is an inherited update/delete/merge, there will be a junk
4651 * attribute named "tableoid" present in the subplan's targetlist. It
4652 * will be used to identify the result relation for a given tuple to be
4653 * updated/deleted/merged.
4654 */
4655 mtstate->mt_resultOidAttno =
4656 ExecFindJunkAttributeInTlist(subplan->targetlist, "tableoid");
4657 Assert(AttributeNumberIsValid(mtstate->mt_resultOidAttno) || nrels == 1);
4658 mtstate->mt_lastResultOid = InvalidOid; /* force lookup at first tuple */
4659 mtstate->mt_lastResultIndex = 0; /* must be zero if no such attr */
4660
4661 /* Get the root target relation */
4662 rel = mtstate->rootResultRelInfo->ri_RelationDesc;
4663
4664 /*
4665 * Build state for tuple routing if it's a partitioned INSERT. An UPDATE
4666 * or MERGE might need this too, but only if it actually moves tuples
4667 * between partitions; in that case setup is done by
4668 * ExecCrossPartitionUpdate.
4669 */
4670 if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
4671 operation == CMD_INSERT)
4673 ExecSetupPartitionTupleRouting(estate, rel);
4674
4675 /*
4676 * Initialize any WITH CHECK OPTION constraints if needed.
4677 */
4678 resultRelInfo = mtstate->resultRelInfo;
4679 foreach(l, node->withCheckOptionLists)
4680 {
4681 List *wcoList = (List *) lfirst(l);
4682 List *wcoExprs = NIL;
4683 ListCell *ll;
4684
4685 foreach(ll, wcoList)
4686 {
4687 WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
4688 ExprState *wcoExpr = ExecInitQual((List *) wco->qual,
4689 &mtstate->ps);
4690
4691 wcoExprs = lappend(wcoExprs, wcoExpr);
4692 }
4693
4694 resultRelInfo->ri_WithCheckOptions = wcoList;
4695 resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
4696 resultRelInfo++;
4697 }
4698
4699 /*
4700 * Initialize RETURNING projections if needed.
4701 */
4702 if (node->returningLists)
4703 {
4704 TupleTableSlot *slot;
4705 ExprContext *econtext;
4706
4707 /*
4708 * Initialize result tuple slot and assign its rowtype using the first
4709 * RETURNING list. We assume the rest will look the same.
4710 */
4711 mtstate->ps.plan->targetlist = (List *) linitial(node->returningLists);
4712
4713 /* Set up a slot for the output of the RETURNING projection(s) */
4715 slot = mtstate->ps.ps_ResultTupleSlot;
4716
4717 /* Need an econtext too */
4718 if (mtstate->ps.ps_ExprContext == NULL)
4719 ExecAssignExprContext(estate, &mtstate->ps);
4720 econtext = mtstate->ps.ps_ExprContext;
4721
4722 /*
4723 * Build a projection for each result rel.
4724 */
4725 resultRelInfo = mtstate->resultRelInfo;
4726 foreach(l, node->returningLists)
4727 {
4728 List *rlist = (List *) lfirst(l);
4729
4730 resultRelInfo->ri_returningList = rlist;
4731 resultRelInfo->ri_projectReturning =
4732 ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps,
4733 resultRelInfo->ri_RelationDesc->rd_att);
4734 resultRelInfo++;
4735 }
4736 }
4737 else
4738 {
4739 /*
4740 * We still must construct a dummy result tuple type, because InitPlan
4741 * expects one (maybe should change that?).
4742 */
4743 mtstate->ps.plan->targetlist = NIL;
4744 ExecInitResultTypeTL(&mtstate->ps);
4745
4746 mtstate->ps.ps_ExprContext = NULL;
4747 }
4748
4749 /* Set the list of arbiter indexes if needed for ON CONFLICT */
4750 resultRelInfo = mtstate->resultRelInfo;
4751 if (node->onConflictAction != ONCONFLICT_NONE)
4752 {
4753 /* insert may only have one relation, inheritance is not expanded */
4754 Assert(nrels == 1);
4755 resultRelInfo->ri_onConflictArbiterIndexes = node->arbiterIndexes;
4756 }
4757
4758 /*
4759 * If needed, Initialize target list, projection and qual for ON CONFLICT
4760 * DO UPDATE.
4761 */
4763 {
4765 ExprContext *econtext;
4766 TupleDesc relationDesc;
4767
4768 /* already exists if created by RETURNING processing above */
4769 if (mtstate->ps.ps_ExprContext == NULL)
4770 ExecAssignExprContext(estate, &mtstate->ps);
4771
4772 econtext = mtstate->ps.ps_ExprContext;
4773 relationDesc = resultRelInfo->ri_RelationDesc->rd_att;
4774
4775 /* create state for DO UPDATE SET operation */
4776 resultRelInfo->ri_onConflict = onconfl;
4777
4778 /* initialize slot for the existing tuple */
4779 onconfl->oc_Existing =
4780 table_slot_create(resultRelInfo->ri_RelationDesc,
4781 &mtstate->ps.state->es_tupleTable);
4782
4783 /*
4784 * Create the tuple slot for the UPDATE SET projection. We want a slot
4785 * of the table's type here, because the slot will be used to insert
4786 * into the table, and for RETURNING processing - which may access
4787 * system attributes.
4788 */
4789 onconfl->oc_ProjSlot =
4790 table_slot_create(resultRelInfo->ri_RelationDesc,
4791 &mtstate->ps.state->es_tupleTable);
4792
4793 /* build UPDATE SET projection state */
4794 onconfl->oc_ProjInfo =
4796 true,
4797 node->onConflictCols,
4798 relationDesc,
4799 econtext,
4800 onconfl->oc_ProjSlot,
4801 &mtstate->ps);
4802
4803 /* initialize state to evaluate the WHERE clause, if any */
4804 if (node->onConflictWhere)
4805 {
4806 ExprState *qualexpr;
4807
4808 qualexpr = ExecInitQual((List *) node->onConflictWhere,
4809 &mtstate->ps);
4810 onconfl->oc_WhereClause = qualexpr;
4811 }
4812 }
4813
4814 /*
4815 * If we have any secondary relations in an UPDATE or DELETE, they need to
4816 * be treated like non-locked relations in SELECT FOR UPDATE, i.e., the
4817 * EvalPlanQual mechanism needs to be told about them. This also goes for
4818 * the source relations in a MERGE. Locate the relevant ExecRowMarks.
4819 */
4820 arowmarks = NIL;
4821 foreach(l, node->rowMarks)
4822 {
4824 ExecRowMark *erm;
4825 ExecAuxRowMark *aerm;
4826
4827 /* ignore "parent" rowmarks; they are irrelevant at runtime */
4828 if (rc->isParent)
4829 continue;
4830
4831 /* Find ExecRowMark and build ExecAuxRowMark */
4832 erm = ExecFindRowMark(estate, rc->rti, false);
4833 aerm = ExecBuildAuxRowMark(erm, subplan->targetlist);
4834 arowmarks = lappend(arowmarks, aerm);
4835 }
4836
4837 /* For a MERGE command, initialize its state */
4838 if (mtstate->operation == CMD_MERGE)
4839 ExecInitMerge(mtstate, estate);
4840
4841 EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan, arowmarks);
4842
4843 /*
4844 * If there are a lot of result relations, use a hash table to speed the
4845 * lookups. If there are not a lot, a simple linear search is faster.
4846 *
4847 * It's not clear where the threshold is, but try 64 for starters. In a
4848 * debugging build, use a small threshold so that we get some test
4849 * coverage of both code paths.
4850 */
4851#ifdef USE_ASSERT_CHECKING
4852#define MT_NRELS_HASH 4
4853#else
4854#define MT_NRELS_HASH 64
4855#endif
4856 if (nrels >= MT_NRELS_HASH)
4857 {
4858 HASHCTL hash_ctl;
4859
4860 hash_ctl.keysize = sizeof(Oid);
4861 hash_ctl.entrysize = sizeof(MTTargetRelLookup);
4862 hash_ctl.hcxt = CurrentMemoryContext;
4863 mtstate->mt_resultOidHash =
4864 hash_create("ModifyTable target hash",
4865 nrels, &hash_ctl,
4867 for (i = 0; i < nrels; i++)
4868 {
4869 Oid hashkey;
4870 MTTargetRelLookup *mtlookup;
4871 bool found;
4872
4873 resultRelInfo = &mtstate->resultRelInfo[i];
4874 hashkey = RelationGetRelid(resultRelInfo->ri_RelationDesc);
4875 mtlookup = (MTTargetRelLookup *)
4876 hash_search(mtstate->mt_resultOidHash, &hashkey,
4877 HASH_ENTER, &found);
4878 Assert(!found);
4879 mtlookup->relationIndex = i;
4880 }
4881 }
4882 else
4883 mtstate->mt_resultOidHash = NULL;
4884
4885 /*
4886 * Determine if the FDW supports batch insert and determine the batch size
4887 * (a FDW may support batching, but it may be disabled for the
4888 * server/table).
4889 *
4890 * We only do this for INSERT, so that for UPDATE/DELETE the batch size
4891 * remains set to 0.
4892 */
4893 if (operation == CMD_INSERT)
4894 {
4895 /* insert may only have one relation, inheritance is not expanded */
4896 Assert(nrels == 1);
4897 resultRelInfo = mtstate->resultRelInfo;
4898 if (!resultRelInfo->ri_usesFdwDirectModify &&
4899 resultRelInfo->ri_FdwRoutine != NULL &&
4900 resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize &&
4901 resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert)
4902 {
4903 resultRelInfo->ri_BatchSize =
4904 resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize(resultRelInfo);
4905 Assert(resultRelInfo->ri_BatchSize >= 1);
4906 }
4907 else
4908 resultRelInfo->ri_BatchSize = 1;
4909 }
4910
4911 /*
4912 * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
4913 * to estate->es_auxmodifytables so that it will be run to completion by
4914 * ExecPostprocessPlan. (It'd actually work fine to add the primary
4915 * ModifyTable node too, but there's no need.) Note the use of lcons not
4916 * lappend: we need later-initialized ModifyTable nodes to be shut down
4917 * before earlier ones. This ensures that we don't throw away RETURNING
4918 * rows that need to be seen by a later CTE subplan.
4919 */
4920 if (!mtstate->canSetTag)
4921 estate->es_auxmodifytables = lcons(mtstate,
4922 estate->es_auxmodifytables);
4923
4924 return mtstate;
4925}
4926
4927/* ----------------------------------------------------------------
4928 * ExecEndModifyTable
4929 *
4930 * Shuts down the plan.
4931 *
4932 * Returns nothing of interest.
4933 * ----------------------------------------------------------------
4934 */
4935void
4937{
4938 int i;
4939
4940 /*
4941 * Allow any FDWs to shut down
4942 */
4943 for (i = 0; i < node->mt_nrels; i++)
4944 {
4945 int j;
4946 ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
4947
4948 if (!resultRelInfo->ri_usesFdwDirectModify &&
4949 resultRelInfo->ri_FdwRoutine != NULL &&
4950 resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL)
4951 resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state,
4952 resultRelInfo);
4953
4954 /*
4955 * Cleanup the initialized batch slots. This only matters for FDWs
4956 * with batching, but the other cases will have ri_NumSlotsInitialized
4957 * == 0.
4958 */
4959 for (j = 0; j < resultRelInfo->ri_NumSlotsInitialized; j++)
4960 {
4961 ExecDropSingleTupleTableSlot(resultRelInfo->ri_Slots[j]);
4963 }
4964 }
4965
4966 /*
4967 * Close all the partitioned tables, leaf partitions, and their indices
4968 * and release the slot used for tuple routing, if set.
4969 */
4971 {
4973
4974 if (node->mt_root_tuple_slot)
4976 }
4977
4978 /*
4979 * Terminate EPQ execution if active
4980 */
4982
4983 /*
4984 * shut down subplan
4985 */
4987}
4988
4989void
4991{
4992 /*
4993 * Currently, we don't need to support rescan on ModifyTable nodes. The
4994 * semantics of that would be a bit debatable anyway.
4995 */
4996 elog(ERROR, "ExecReScanModifyTable is not implemented");
4997}
#define AttributeNumberIsValid(attributeNumber)
Definition: attnum.h:34
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:510
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:815
bool bms_overlap(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:582
static Datum values[MAXATTR]
Definition: bootstrap.c:151
#define Assert(condition)
Definition: c.h:815
#define unlikely(x)
Definition: c.h:333
uint32_t uint32
Definition: c.h:488
unsigned int Index
Definition: c.h:571
uint32 TransactionId
Definition: c.h:609
Datum datumCopy(Datum value, bool typByVal, int typLen)
Definition: datum.c:132
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:955
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:352
int errdetail(const char *fmt,...)
Definition: elog.c:1203
int errhint(const char *fmt,...)
Definition: elog.c:1317
int errcode(int sqlerrcode)
Definition: elog.c:853
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
#define ereport(elevel,...)
Definition: elog.h:149
ExprState * ExecPrepareExpr(Expr *node, EState *estate)
Definition: execExpr.c:765
ProjectionInfo * ExecBuildProjectionInfo(List *targetList, ExprContext *econtext, TupleTableSlot *slot, PlanState *parent, TupleDesc inputDesc)
Definition: execExpr.c:370
ExprState * ExecInitQual(List *qual, PlanState *parent)
Definition: execExpr.c:229
ProjectionInfo * ExecBuildUpdateProjection(List *targetList, bool evalTargetList, List *targetColnos, TupleDesc relDesc, ExprContext *econtext, TupleTableSlot *slot, PlanState *parent)
Definition: execExpr.c:547
void ExecOpenIndices(ResultRelInfo *resultRelInfo, bool speculative)
Definition: execIndexing.c:160
bool ExecCheckIndexConstraints(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate, ItemPointer conflictTid, ItemPointer tupleid, List *arbiterIndexes)
Definition: execIndexing.c:536
List * ExecInsertIndexTuples(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate, bool update, bool noDupErr, bool *specConflict, List *arbiterIndexes, bool onlySummarizing)
Definition: execIndexing.c:303
AttrNumber ExecFindJunkAttributeInTlist(List *targetlist, const char *attrName)
Definition: execJunk.c:222
LockTupleMode ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo)
Definition: execMain.c:2365
ExecRowMark * ExecFindRowMark(EState *estate, Index rti, bool missing_ok)
Definition: execMain.c:2391
ExecAuxRowMark * ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
Definition: execMain.c:2414
void CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation, List *mergeActions)
Definition: execMain.c:1026
TupleTableSlot * EvalPlanQualSlot(EPQState *epqstate, Relation relation, Index rti)
Definition: execMain.c:2612
void EvalPlanQualBegin(EPQState *epqstate)
Definition: execMain.c:2765
bool ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate, bool emitError)
Definition: execMain.c:1806
void EvalPlanQualInit(EPQState *epqstate, EState *parentestate, Plan *subplan, List *auxrowmarks, int epqParam, List *resultRelations)
Definition: execMain.c:2553
void ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate)
Definition: execMain.c:2065
void EvalPlanQualEnd(EPQState *epqstate)
Definition: execMain.c:2996
void EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks)
Definition: execMain.c:2595
TupleTableSlot * EvalPlanQual(EPQState *epqstate, Relation relation, Index rti, TupleTableSlot *inputslot)
Definition: execMain.c:2484
void ExecPartitionCheckEmitError(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate)
Definition: execMain.c:1859
void ExecConstraints(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate)
Definition: execMain.c:1930
List * ExecGetAncestorResultRels(EState *estate, ResultRelInfo *resultRelInfo)
Definition: execMain.c:1379
PartitionTupleRouting * ExecSetupPartitionTupleRouting(EState *estate, Relation rel)
ResultRelInfo * ExecFindPartition(ModifyTableState *mtstate, ResultRelInfo *rootResultRelInfo, PartitionTupleRouting *proute, TupleTableSlot *slot, EState *estate)
void ExecCleanupTupleRouting(ModifyTableState *mtstate, PartitionTupleRouting *proute)
void ExecEndNode(PlanState *node)
Definition: execProcnode.c:562
PlanState * ExecInitNode(Plan *node, EState *estate, int eflags)
Definition: execProcnode.c:142
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1425
const TupleTableSlotOps TTSOpsVirtual
Definition: execTuples.c:84
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
Definition: execTuples.c:1441
TupleTableSlot * ExecStoreVirtualTuple(TupleTableSlot *slot)
Definition: execTuples.c:1739
void ExecInitResultTypeTL(PlanState *planstate)
Definition: execTuples.c:1942
void ExecInitResultTupleSlotTL(PlanState *planstate, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1986
TupleTableSlot * ExecStoreAllNullTuple(TupleTableSlot *slot)
Definition: execTuples.c:1763
void ExecForceStoreHeapTuple(HeapTuple tuple, TupleTableSlot *slot, bool shouldFree)
Definition: execTuples.c:1656
TupleConversionMap * ExecGetRootToChildMap(ResultRelInfo *resultRelInfo, EState *estate)
Definition: execUtils.c:1305
TupleConversionMap * ExecGetChildToRootMap(ResultRelInfo *resultRelInfo)
Definition: execUtils.c:1279
Bitmapset * ExecGetUpdatedCols(ResultRelInfo *relinfo, EState *estate)
Definition: execUtils.c:1361
void ExecInitResultRelation(EState *estate, ResultRelInfo *resultRelInfo, Index rti)
Definition: execUtils.c:859
void ExecAssignExprContext(EState *estate, PlanState *planstate)
Definition: execUtils.c:485
TupleTableSlot * ExecGetAllNullSlot(EState *estate, ResultRelInfo *relInfo)
Definition: execUtils.c:1252
TupleTableSlot * ExecGetReturningSlot(EState *estate, ResultRelInfo *relInfo)
Definition: execUtils.c:1227
#define MERGE_UPDATE
Definition: execnodes.h:1370
#define InstrCountFiltered1(node, delta)
Definition: execnodes.h:1245
#define EEO_FLAG_HAS_OLD
Definition: execnodes.h:78
#define outerPlanState(node)
Definition: execnodes.h:1237
#define InstrCountTuples2(node, delta)
Definition: execnodes.h:1240
#define MERGE_INSERT
Definition: execnodes.h:1369
#define EEO_FLAG_NEW_IS_NULL
Definition: execnodes.h:84
#define EEO_FLAG_OLD_IS_NULL
Definition: execnodes.h:82
#define EEO_FLAG_HAS_NEW
Definition: execnodes.h:80
#define MERGE_DELETE
Definition: execnodes.h:1371
#define EXEC_FLAG_BACKWARD
Definition: executor.h:68
#define ResetPerTupleExprContext(estate)
Definition: executor.h:572
static TupleTableSlot * ExecProject(ProjectionInfo *projInfo)
Definition: executor.h:389
#define GetPerTupleExprContext(estate)
Definition: executor.h:563
#define ResetExprContext(econtext)
Definition: executor.h:557
#define GetPerTupleMemoryContext(estate)
Definition: executor.h:568
static bool ExecQual(ExprState *state, ExprContext *econtext)
Definition: executor.h:426
static TupleTableSlot * ExecProcNode(PlanState *node)
Definition: executor.h:267
#define EvalPlanQualSetSlot(epqstate, slot)
Definition: executor.h:242
static Datum ExecEvalExpr(ExprState *state, ExprContext *econtext, bool *isNull)
Definition: executor.h:346
#define EXEC_FLAG_EXPLAIN_ONLY
Definition: executor.h:65
static Datum ExecGetJunkAttribute(TupleTableSlot *slot, AttrNumber attno, bool *isNull)
Definition: executor.h:184
#define EXEC_FLAG_MARK
Definition: executor.h:69
#define DatumGetHeapTupleHeader(X)
Definition: fmgr.h:295
char * format_type_be(Oid type_oid)
Definition: format_type.c:343
@ HASH_FIND
Definition: hsearch.h:113
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_CONTEXT
Definition: hsearch.h:102
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_BLOBS
Definition: hsearch.h:97
#define HeapTupleHeaderGetDatumLength(tup)
Definition: htup_details.h:450
long val
Definition: informix.c:689
int j
Definition: isn.c:73
int i
Definition: isn.c:72
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:76
static void ItemPointerSetInvalid(ItemPointerData *pointer)
Definition: itemptr.h:184
static bool ItemPointerIndicatesMovedPartitions(const ItemPointerData *pointer)
Definition: itemptr.h:197
ItemPointerData * ItemPointer
Definition: itemptr.h:49
static void ItemPointerCopy(const ItemPointerData *fromPointer, ItemPointerData *toPointer)
Definition: itemptr.h:172
static bool ItemPointerIsValid(const ItemPointerData *pointer)
Definition: itemptr.h:83
List * lappend(List *list, void *datum)
Definition: list.c:339
List * lcons(void *datum, List *list)
Definition: list.c:495
bool list_member_ptr(const List *list, const void *datum)
Definition: list.c:682
void list_free(List *list)
Definition: list.c:1546
void LockTuple(Relation relation, ItemPointer tid, LOCKMODE lockmode)
Definition: lmgr.c:557
uint32 SpeculativeInsertionLockAcquire(TransactionId xid)
Definition: lmgr.c:771
void SpeculativeInsertionLockRelease(TransactionId xid)
Definition: lmgr.c:797
void UnlockTuple(Relation relation, ItemPointer tid, LOCKMODE lockmode)
Definition: lmgr.c:594
#define InplaceUpdateTupleLock
Definition: lockdefs.h:48
@ LockWaitBlock
Definition: lockoptions.h:39
LockTupleMode
Definition: lockoptions.h:50
@ LockTupleExclusive
Definition: lockoptions.h:58
void * palloc0(Size size)
Definition: mcxt.c:1347
void * palloc(Size size)
Definition: mcxt.c:1317
MemoryContext CurrentMemoryContext
Definition: mcxt.c:143
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:466
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:122
Oid exprType(const Node *expr)
Definition: nodeFuncs.c:42
static void ExecInitInsertProjection(ModifyTableState *mtstate, ResultRelInfo *resultRelInfo)
ResultRelInfo * ExecLookupResultRelByOid(ModifyTableState *node, Oid resultoid, bool missing_ok, bool update_cache)
static void ExecPendingInserts(EState *estate)
static void ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
void ExecInitMergeTupleSlots(ModifyTableState *mtstate, ResultRelInfo *resultRelInfo)
struct ModifyTableContext ModifyTableContext
static void ExecUpdatePrepareSlot(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate)
static TupleTableSlot * ExecInsert(ModifyTableContext *context, ResultRelInfo *resultRelInfo, TupleTableSlot *slot, bool canSetTag, TupleTableSlot **inserted_tuple, ResultRelInfo **insert_destrel)
ModifyTableState * ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
static TupleTableSlot * ExecMergeMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag, bool *matched)
static TupleTableSlot * ExecModifyTable(PlanState *pstate)
void ExecInitStoredGenerated(ResultRelInfo *resultRelInfo, EState *estate, CmdType cmdtype)
static bool ExecDeletePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot **epqreturnslot, TM_Result *result)
static void ExecCheckPlanOutput(Relation resultRel, List *targetList)
TupleTableSlot * ExecGetUpdateNewTuple(ResultRelInfo *relinfo, TupleTableSlot *planSlot, TupleTableSlot *oldSlot)
static void ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context, ResultRelInfo *sourcePartInfo, ResultRelInfo *destPartInfo, ItemPointer tupleid, TupleTableSlot *oldslot, TupleTableSlot *newslot)
static void ExecInitUpdateProjection(ModifyTableState *mtstate, ResultRelInfo *resultRelInfo)
static void ExecCheckTIDVisible(EState *estate, ResultRelInfo *relinfo, ItemPointer tid, TupleTableSlot *tempSlot)
static TM_Result ExecDeleteAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, bool changingPart)
static void ExecCheckTupleVisible(EState *estate, Relation rel, TupleTableSlot *slot)
static TupleTableSlot * ExecUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *oldSlot, TupleTableSlot *slot, bool canSetTag)
void ExecComputeStoredGenerated(ResultRelInfo *resultRelInfo, EState *estate, TupleTableSlot *slot, CmdType cmdtype)
static TupleTableSlot * ExecPrepareTupleRouting(ModifyTableState *mtstate, EState *estate, PartitionTupleRouting *proute, ResultRelInfo *targetRelInfo, TupleTableSlot *slot, ResultRelInfo **partRelInfo)
static TupleTableSlot * ExecGetInsertNewTuple(ResultRelInfo *relinfo, TupleTableSlot *planSlot)
struct MTTargetRelLookup MTTargetRelLookup
struct UpdateContext UpdateContext
#define MT_NRELS_HASH
static TM_Result ExecUpdateAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot, bool canSetTag, UpdateContext *updateCxt)
static void ExecUpdateEpilogue(ModifyTableContext *context, UpdateContext *updateCxt, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot)
static void fireBSTriggers(ModifyTableState *node)
void ExecReScanModifyTable(ModifyTableState *node)
static TupleTableSlot * ExecDelete(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, bool processReturning, bool changingPart, bool canSetTag, TM_Result *tmresult, bool *tupleDeleted, TupleTableSlot **epqreturnslot)
void ExecEndModifyTable(ModifyTableState *node)
static void fireASTriggers(ModifyTableState *node)
static bool ExecOnConflictUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer conflictTid, TupleTableSlot *excludedSlot, bool canSetTag, TupleTableSlot **returning)
static void ExecBatchInsert(ModifyTableState *mtstate, ResultRelInfo *resultRelInfo, TupleTableSlot **slots, TupleTableSlot **planSlots, int numSlots, EState *estate, bool canSetTag)
static bool ExecUpdatePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot, TM_Result *result)
static void ExecDeleteEpilogue(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, bool changingPart)
static TupleTableSlot * ExecMergeNotMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo, bool canSetTag)
static TupleTableSlot * ExecProcessReturning(ModifyTableContext *context, ResultRelInfo *resultRelInfo, CmdType cmdType, TupleTableSlot *oldSlot, TupleTableSlot *newSlot, TupleTableSlot *planSlot)
static TupleTableSlot * ExecMerge(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag)
static bool ExecCrossPartitionUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot, bool canSetTag, UpdateContext *updateCxt, TM_Result *tmresult, TupleTableSlot **retry_slot, TupleTableSlot **inserted_tuple, ResultRelInfo **insert_destrel)
static void ExecInitMerge(ModifyTableState *mtstate, EState *estate)
#define IsA(nodeptr, _type_)
Definition: nodes.h:158
OnConflictAction
Definition: nodes.h:417
@ ONCONFLICT_NONE
Definition: nodes.h:418
@ ONCONFLICT_UPDATE
Definition: nodes.h:420
@ ONCONFLICT_NOTHING
Definition: nodes.h:419
CmdType
Definition: nodes.h:263
@ CMD_MERGE
Definition: nodes.h:269
@ CMD_INSERT
Definition: nodes.h:267
@ CMD_DELETE
Definition: nodes.h:268
@ CMD_UPDATE
Definition: nodes.h:266
@ CMD_NOTHING
Definition: nodes.h:272
#define makeNode(_type_)
Definition: nodes.h:155
#define castNode(_type_, nodeptr)
Definition: nodes.h:176
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:124
WCOKind
Definition: parsenodes.h:1367
@ WCO_RLS_MERGE_UPDATE_CHECK
Definition: parsenodes.h:1372
@ WCO_RLS_CONFLICT_CHECK
Definition: parsenodes.h:1371
@ WCO_RLS_INSERT_CHECK
Definition: parsenodes.h:1369
@ WCO_VIEW_CHECK
Definition: parsenodes.h:1368
@ WCO_RLS_UPDATE_CHECK
Definition: parsenodes.h:1370
@ WCO_RLS_MERGE_DELETE_CHECK
Definition: parsenodes.h:1373
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:200
#define lfirst(lc)
Definition: pg_list.h:172
#define lfirst_node(type, lc)
Definition: pg_list.h:176
static int list_length(const List *l)
Definition: pg_list.h:152
#define NIL