PostgreSQL Source Code  git master
nodeModifyTable.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * nodeModifyTable.c
4  * routines to handle ModifyTable nodes.
5  *
6  * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/executor/nodeModifyTable.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 /* INTERFACE ROUTINES
16  * ExecInitModifyTable - initialize the ModifyTable node
17  * ExecModifyTable - retrieve the next tuple from the node
18  * ExecEndModifyTable - shut down the ModifyTable node
19  * ExecReScanModifyTable - rescan the ModifyTable node
20  *
21  * NOTES
22  * The ModifyTable node receives input from its outerPlan, which is
23  * the data to insert for INSERT cases, the changed columns' new
24  * values plus row-locating info for UPDATE and MERGE cases, or just the
25  * row-locating info for DELETE cases.
26  *
27  * MERGE runs a join between the source relation and the target
28  * table; if any WHEN NOT MATCHED clauses are present, then the
29  * join is an outer join. In this case, any unmatched tuples will
30  * have NULL row-locating info, and only INSERT can be run. But for
31  * matched tuples, then row-locating info is used to determine the
32  * tuple to UPDATE or DELETE. When all clauses are WHEN MATCHED,
33  * then an inner join is used, so all tuples contain row-locating info.
34  *
35  * If the query specifies RETURNING, then the ModifyTable returns a
36  * RETURNING tuple after completing each row insert, update, or delete.
37  * It must be called again to continue the operation. Without RETURNING,
38  * we just loop within the node until all the work is done, then
39  * return NULL. This avoids useless call/return overhead. (MERGE does
40  * not support RETURNING.)
41  */
42 
43 #include "postgres.h"
44 
45 #include "access/heapam.h"
46 #include "access/htup_details.h"
47 #include "access/tableam.h"
48 #include "access/xact.h"
49 #include "catalog/catalog.h"
50 #include "commands/trigger.h"
51 #include "executor/execPartition.h"
52 #include "executor/executor.h"
54 #include "foreign/fdwapi.h"
55 #include "miscadmin.h"
56 #include "nodes/nodeFuncs.h"
57 #include "optimizer/optimizer.h"
58 #include "rewrite/rewriteHandler.h"
59 #include "storage/bufmgr.h"
60 #include "storage/lmgr.h"
61 #include "utils/builtins.h"
62 #include "utils/datum.h"
63 #include "utils/memutils.h"
64 #include "utils/rel.h"
65 
66 
67 typedef struct MTTargetRelLookup
68 {
69  Oid relationOid; /* hash key, must be first */
70  int relationIndex; /* rel's index in resultRelInfo[] array */
72 
73 /*
74  * Context struct for a ModifyTable operation, containing basic execution
75  * state and some output variables populated by ExecUpdateAct() and
76  * ExecDeleteAct() to report the result of their actions to callers.
77  */
78 typedef struct ModifyTableContext
79 {
80  /* Operation state */
84 
85  /*
86  * Slot containing tuple obtained from ModifyTable's subplan. Used to
87  * access "junk" columns that are not going to be stored.
88  */
90 
91  /* MERGE specific */
92  MergeActionState *relaction; /* MERGE action in progress */
93 
94  /*
95  * Information about the changes that were made concurrently to a tuple
96  * being updated or deleted
97  */
99 
100  /*
101  * The tuple projected by the INSERT's RETURNING clause, when doing a
102  * cross-partition UPDATE
103  */
106 
107 /*
108  * Context struct containing output data specific to UPDATE operations.
109  */
110 typedef struct UpdateContext
111 {
112  bool updated; /* did UPDATE actually occur? */
113  bool crossPartUpdate; /* was it a cross-partition update? */
114  TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
115 
116  /*
117  * Lock mode to acquire on the latest tuple version before performing
118  * EvalPlanQual on it
119  */
122 
123 
124 static void ExecBatchInsert(ModifyTableState *mtstate,
125  ResultRelInfo *resultRelInfo,
126  TupleTableSlot **slots,
127  TupleTableSlot **planSlots,
128  int numSlots,
129  EState *estate,
130  bool canSetTag);
131 static void ExecPendingInserts(EState *estate);
133  ResultRelInfo *sourcePartInfo,
134  ResultRelInfo *destPartInfo,
135  ItemPointer tupleid,
136  TupleTableSlot *oldslot,
137  TupleTableSlot *newslot);
138 static bool ExecOnConflictUpdate(ModifyTableContext *context,
139  ResultRelInfo *resultRelInfo,
140  ItemPointer conflictTid,
141  TupleTableSlot *excludedSlot,
142  bool canSetTag,
143  TupleTableSlot **returning);
145  EState *estate,
146  PartitionTupleRouting *proute,
147  ResultRelInfo *targetRelInfo,
148  TupleTableSlot *slot,
149  ResultRelInfo **partRelInfo);
150 
152  ResultRelInfo *resultRelInfo,
153  ItemPointer tupleid,
154  bool canSetTag);
155 static void ExecInitMerge(ModifyTableState *mtstate, EState *estate);
156 static bool ExecMergeMatched(ModifyTableContext *context,
157  ResultRelInfo *resultRelInfo,
158  ItemPointer tupleid,
159  bool canSetTag);
160 static void ExecMergeNotMatched(ModifyTableContext *context,
161  ResultRelInfo *resultRelInfo,
162  bool canSetTag);
163 
164 
165 /*
166  * Verify that the tuples to be produced by INSERT match the
167  * target relation's rowtype
168  *
169  * We do this to guard against stale plans. If plan invalidation is
170  * functioning properly then we should never get a failure here, but better
171  * safe than sorry. Note that this is called after we have obtained lock
172  * on the target rel, so the rowtype can't change underneath us.
173  *
174  * The plan output is represented by its targetlist, because that makes
175  * handling the dropped-column case easier.
176  *
177  * We used to use this for UPDATE as well, but now the equivalent checks
178  * are done in ExecBuildUpdateProjection.
179  */
180 static void
181 ExecCheckPlanOutput(Relation resultRel, List *targetList)
182 {
183  TupleDesc resultDesc = RelationGetDescr(resultRel);
184  int attno = 0;
185  ListCell *lc;
186 
187  foreach(lc, targetList)
188  {
189  TargetEntry *tle = (TargetEntry *) lfirst(lc);
190  Form_pg_attribute attr;
191 
192  Assert(!tle->resjunk); /* caller removed junk items already */
193 
194  if (attno >= resultDesc->natts)
195  ereport(ERROR,
196  (errcode(ERRCODE_DATATYPE_MISMATCH),
197  errmsg("table row type and query-specified row type do not match"),
198  errdetail("Query has too many columns.")));
199  attr = TupleDescAttr(resultDesc, attno);
200  attno++;
201 
202  if (!attr->attisdropped)
203  {
204  /* Normal case: demand type match */
205  if (exprType((Node *) tle->expr) != attr->atttypid)
206  ereport(ERROR,
207  (errcode(ERRCODE_DATATYPE_MISMATCH),
208  errmsg("table row type and query-specified row type do not match"),
209  errdetail("Table has type %s at ordinal position %d, but query expects %s.",
210  format_type_be(attr->atttypid),
211  attno,
212  format_type_be(exprType((Node *) tle->expr)))));
213  }
214  else
215  {
216  /*
217  * For a dropped column, we can't check atttypid (it's likely 0).
218  * In any case the planner has most likely inserted an INT4 null.
219  * What we insist on is just *some* NULL constant.
220  */
221  if (!IsA(tle->expr, Const) ||
222  !((Const *) tle->expr)->constisnull)
223  ereport(ERROR,
224  (errcode(ERRCODE_DATATYPE_MISMATCH),
225  errmsg("table row type and query-specified row type do not match"),
226  errdetail("Query provides a value for a dropped column at ordinal position %d.",
227  attno)));
228  }
229  }
230  if (attno != resultDesc->natts)
231  ereport(ERROR,
232  (errcode(ERRCODE_DATATYPE_MISMATCH),
233  errmsg("table row type and query-specified row type do not match"),
234  errdetail("Query has too few columns.")));
235 }
236 
237 /*
238  * ExecProcessReturning --- evaluate a RETURNING list
239  *
240  * resultRelInfo: current result rel
241  * tupleSlot: slot holding tuple actually inserted/updated/deleted
242  * planSlot: slot holding tuple returned by top subplan node
243  *
244  * Note: If tupleSlot is NULL, the FDW should have already provided econtext's
245  * scan tuple.
246  *
247  * Returns a slot holding the result tuple
248  */
249 static TupleTableSlot *
251  TupleTableSlot *tupleSlot,
252  TupleTableSlot *planSlot)
253 {
254  ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning;
255  ExprContext *econtext = projectReturning->pi_exprContext;
256 
257  /* Make tuple and any needed join variables available to ExecProject */
258  if (tupleSlot)
259  econtext->ecxt_scantuple = tupleSlot;
260  econtext->ecxt_outertuple = planSlot;
261 
262  /*
263  * RETURNING expressions might reference the tableoid column, so
264  * reinitialize tts_tableOid before evaluating them.
265  */
266  econtext->ecxt_scantuple->tts_tableOid =
267  RelationGetRelid(resultRelInfo->ri_RelationDesc);
268 
269  /* Compute the RETURNING expressions */
270  return ExecProject(projectReturning);
271 }
272 
273 /*
274  * ExecCheckTupleVisible -- verify tuple is visible
275  *
276  * It would not be consistent with guarantees of the higher isolation levels to
277  * proceed with avoiding insertion (taking speculative insertion's alternative
278  * path) on the basis of another tuple that is not visible to MVCC snapshot.
279  * Check for the need to raise a serialization failure, and do so as necessary.
280  */
281 static void
283  Relation rel,
284  TupleTableSlot *slot)
285 {
287  return;
288 
289  if (!table_tuple_satisfies_snapshot(rel, slot, estate->es_snapshot))
290  {
291  Datum xminDatum;
292  TransactionId xmin;
293  bool isnull;
294 
295  xminDatum = slot_getsysattr(slot, MinTransactionIdAttributeNumber, &isnull);
296  Assert(!isnull);
297  xmin = DatumGetTransactionId(xminDatum);
298 
299  /*
300  * We should not raise a serialization failure if the conflict is
301  * against a tuple inserted by our own transaction, even if it's not
302  * visible to our snapshot. (This would happen, for example, if
303  * conflicting keys are proposed for insertion in a single command.)
304  */
306  ereport(ERROR,
308  errmsg("could not serialize access due to concurrent update")));
309  }
310 }
311 
312 /*
313  * ExecCheckTIDVisible -- convenience variant of ExecCheckTupleVisible()
314  */
315 static void
317  ResultRelInfo *relinfo,
318  ItemPointer tid,
319  TupleTableSlot *tempSlot)
320 {
321  Relation rel = relinfo->ri_RelationDesc;
322 
323  /* Redundantly check isolation level */
325  return;
326 
327  if (!table_tuple_fetch_row_version(rel, tid, SnapshotAny, tempSlot))
328  elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
329  ExecCheckTupleVisible(estate, rel, tempSlot);
330  ExecClearTuple(tempSlot);
331 }
332 
333 /*
334  * Initialize to compute stored generated columns for a tuple
335  *
336  * This fills the resultRelInfo's ri_GeneratedExprsI/ri_NumGeneratedNeededI
337  * or ri_GeneratedExprsU/ri_NumGeneratedNeededU fields, depending on cmdtype.
338  * If cmdType == CMD_UPDATE, the ri_extraUpdatedCols field is filled too.
339  *
340  * Note: usually, a given query would need only one of ri_GeneratedExprsI and
341  * ri_GeneratedExprsU per result rel; but MERGE can need both, and so can
342  * cross-partition UPDATEs, since a partition might be the target of both
343  * UPDATE and INSERT actions.
344  */
345 void
347  EState *estate,
348  CmdType cmdtype)
349 {
350  Relation rel = resultRelInfo->ri_RelationDesc;
351  TupleDesc tupdesc = RelationGetDescr(rel);
352  int natts = tupdesc->natts;
353  ExprState **ri_GeneratedExprs;
354  int ri_NumGeneratedNeeded;
355  Bitmapset *updatedCols;
356  MemoryContext oldContext;
357 
358  /* Nothing to do if no generated columns */
359  if (!(tupdesc->constr && tupdesc->constr->has_generated_stored))
360  return;
361 
362  /*
363  * In an UPDATE, we can skip computing any generated columns that do not
364  * depend on any UPDATE target column. But if there is a BEFORE ROW
365  * UPDATE trigger, we cannot skip because the trigger might change more
366  * columns.
367  */
368  if (cmdtype == CMD_UPDATE &&
369  !(rel->trigdesc && rel->trigdesc->trig_update_before_row))
370  updatedCols = ExecGetUpdatedCols(resultRelInfo, estate);
371  else
372  updatedCols = NULL;
373 
374  /*
375  * Make sure these data structures are built in the per-query memory
376  * context so they'll survive throughout the query.
377  */
378  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
379 
380  ri_GeneratedExprs = (ExprState **) palloc0(natts * sizeof(ExprState *));
381  ri_NumGeneratedNeeded = 0;
382 
383  for (int i = 0; i < natts; i++)
384  {
385  if (TupleDescAttr(tupdesc, i)->attgenerated == ATTRIBUTE_GENERATED_STORED)
386  {
387  Expr *expr;
388 
389  /* Fetch the GENERATED AS expression tree */
390  expr = (Expr *) build_column_default(rel, i + 1);
391  if (expr == NULL)
392  elog(ERROR, "no generation expression found for column number %d of table \"%s\"",
393  i + 1, RelationGetRelationName(rel));
394 
395  /*
396  * If it's an update with a known set of update target columns,
397  * see if we can skip the computation.
398  */
399  if (updatedCols)
400  {
401  Bitmapset *attrs_used = NULL;
402 
403  pull_varattnos((Node *) expr, 1, &attrs_used);
404 
405  if (!bms_overlap(updatedCols, attrs_used))
406  continue; /* need not update this column */
407  }
408 
409  /* No luck, so prepare the expression for execution */
410  ri_GeneratedExprs[i] = ExecPrepareExpr(expr, estate);
411  ri_NumGeneratedNeeded++;
412 
413  /* If UPDATE, mark column in resultRelInfo->ri_extraUpdatedCols */
414  if (cmdtype == CMD_UPDATE)
415  resultRelInfo->ri_extraUpdatedCols =
416  bms_add_member(resultRelInfo->ri_extraUpdatedCols,
418  }
419  }
420 
421  /* Save in appropriate set of fields */
422  if (cmdtype == CMD_UPDATE)
423  {
424  /* Don't call twice */
425  Assert(resultRelInfo->ri_GeneratedExprsU == NULL);
426 
427  resultRelInfo->ri_GeneratedExprsU = ri_GeneratedExprs;
428  resultRelInfo->ri_NumGeneratedNeededU = ri_NumGeneratedNeeded;
429  }
430  else
431  {
432  /* Don't call twice */
433  Assert(resultRelInfo->ri_GeneratedExprsI == NULL);
434 
435  resultRelInfo->ri_GeneratedExprsI = ri_GeneratedExprs;
436  resultRelInfo->ri_NumGeneratedNeededI = ri_NumGeneratedNeeded;
437  }
438 
439  MemoryContextSwitchTo(oldContext);
440 }
441 
442 /*
443  * Compute stored generated columns for a tuple
444  */
445 void
447  EState *estate, TupleTableSlot *slot,
448  CmdType cmdtype)
449 {
450  Relation rel = resultRelInfo->ri_RelationDesc;
451  TupleDesc tupdesc = RelationGetDescr(rel);
452  int natts = tupdesc->natts;
453  ExprContext *econtext = GetPerTupleExprContext(estate);
454  ExprState **ri_GeneratedExprs;
455  MemoryContext oldContext;
456  Datum *values;
457  bool *nulls;
458 
459  /* We should not be called unless this is true */
460  Assert(tupdesc->constr && tupdesc->constr->has_generated_stored);
461 
462  /*
463  * Initialize the expressions if we didn't already, and check whether we
464  * can exit early because nothing needs to be computed.
465  */
466  if (cmdtype == CMD_UPDATE)
467  {
468  if (resultRelInfo->ri_GeneratedExprsU == NULL)
469  ExecInitStoredGenerated(resultRelInfo, estate, cmdtype);
470  if (resultRelInfo->ri_NumGeneratedNeededU == 0)
471  return;
472  ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsU;
473  }
474  else
475  {
476  if (resultRelInfo->ri_GeneratedExprsI == NULL)
477  ExecInitStoredGenerated(resultRelInfo, estate, cmdtype);
478  /* Early exit is impossible given the prior Assert */
479  Assert(resultRelInfo->ri_NumGeneratedNeededI > 0);
480  ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsI;
481  }
482 
483  oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
484 
485  values = palloc(sizeof(*values) * natts);
486  nulls = palloc(sizeof(*nulls) * natts);
487 
488  slot_getallattrs(slot);
489  memcpy(nulls, slot->tts_isnull, sizeof(*nulls) * natts);
490 
491  for (int i = 0; i < natts; i++)
492  {
493  Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
494 
495  if (ri_GeneratedExprs[i])
496  {
497  Datum val;
498  bool isnull;
499 
500  Assert(attr->attgenerated == ATTRIBUTE_GENERATED_STORED);
501 
502  econtext->ecxt_scantuple = slot;
503 
504  val = ExecEvalExpr(ri_GeneratedExprs[i], econtext, &isnull);
505 
506  /*
507  * We must make a copy of val as we have no guarantees about where
508  * memory for a pass-by-reference Datum is located.
509  */
510  if (!isnull)
511  val = datumCopy(val, attr->attbyval, attr->attlen);
512 
513  values[i] = val;
514  nulls[i] = isnull;
515  }
516  else
517  {
518  if (!nulls[i])
519  values[i] = datumCopy(slot->tts_values[i], attr->attbyval, attr->attlen);
520  }
521  }
522 
523  ExecClearTuple(slot);
524  memcpy(slot->tts_values, values, sizeof(*values) * natts);
525  memcpy(slot->tts_isnull, nulls, sizeof(*nulls) * natts);
526  ExecStoreVirtualTuple(slot);
527  ExecMaterializeSlot(slot);
528 
529  MemoryContextSwitchTo(oldContext);
530 }
531 
532 /*
533  * ExecInitInsertProjection
534  * Do one-time initialization of projection data for INSERT tuples.
535  *
536  * INSERT queries may need a projection to filter out junk attrs in the tlist.
537  *
538  * This is also a convenient place to verify that the
539  * output of an INSERT matches the target table.
540  */
541 static void
543  ResultRelInfo *resultRelInfo)
544 {
545  ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
546  Plan *subplan = outerPlan(node);
547  EState *estate = mtstate->ps.state;
548  List *insertTargetList = NIL;
549  bool need_projection = false;
550  ListCell *l;
551 
552  /* Extract non-junk columns of the subplan's result tlist. */
553  foreach(l, subplan->targetlist)
554  {
555  TargetEntry *tle = (TargetEntry *) lfirst(l);
556 
557  if (!tle->resjunk)
558  insertTargetList = lappend(insertTargetList, tle);
559  else
560  need_projection = true;
561  }
562 
563  /*
564  * The junk-free list must produce a tuple suitable for the result
565  * relation.
566  */
567  ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc, insertTargetList);
568 
569  /* We'll need a slot matching the table's format. */
570  resultRelInfo->ri_newTupleSlot =
571  table_slot_create(resultRelInfo->ri_RelationDesc,
572  &estate->es_tupleTable);
573 
574  /* Build ProjectionInfo if needed (it probably isn't). */
575  if (need_projection)
576  {
577  TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
578 
579  /* need an expression context to do the projection */
580  if (mtstate->ps.ps_ExprContext == NULL)
581  ExecAssignExprContext(estate, &mtstate->ps);
582 
583  resultRelInfo->ri_projectNew =
584  ExecBuildProjectionInfo(insertTargetList,
585  mtstate->ps.ps_ExprContext,
586  resultRelInfo->ri_newTupleSlot,
587  &mtstate->ps,
588  relDesc);
589  }
590 
591  resultRelInfo->ri_projectNewInfoValid = true;
592 }
593 
594 /*
595  * ExecInitUpdateProjection
596  * Do one-time initialization of projection data for UPDATE tuples.
597  *
598  * UPDATE always needs a projection, because (1) there's always some junk
599  * attrs, and (2) we may need to merge values of not-updated columns from
600  * the old tuple into the final tuple. In UPDATE, the tuple arriving from
601  * the subplan contains only new values for the changed columns, plus row
602  * identity info in the junk attrs.
603  *
604  * This is "one-time" for any given result rel, but we might touch more than
605  * one result rel in the course of an inherited UPDATE, and each one needs
606  * its own projection due to possible column order variation.
607  *
608  * This is also a convenient place to verify that the output of an UPDATE
609  * matches the target table (ExecBuildUpdateProjection does that).
610  */
611 static void
613  ResultRelInfo *resultRelInfo)
614 {
615  ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
616  Plan *subplan = outerPlan(node);
617  EState *estate = mtstate->ps.state;
618  TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
619  int whichrel;
620  List *updateColnos;
621 
622  /*
623  * Usually, mt_lastResultIndex matches the target rel. If it happens not
624  * to, we can get the index the hard way with an integer division.
625  */
626  whichrel = mtstate->mt_lastResultIndex;
627  if (resultRelInfo != mtstate->resultRelInfo + whichrel)
628  {
629  whichrel = resultRelInfo - mtstate->resultRelInfo;
630  Assert(whichrel >= 0 && whichrel < mtstate->mt_nrels);
631  }
632 
633  updateColnos = (List *) list_nth(node->updateColnosLists, whichrel);
634 
635  /*
636  * For UPDATE, we use the old tuple to fill up missing values in the tuple
637  * produced by the subplan to get the new tuple. We need two slots, both
638  * matching the table's desired format.
639  */
640  resultRelInfo->ri_oldTupleSlot =
641  table_slot_create(resultRelInfo->ri_RelationDesc,
642  &estate->es_tupleTable);
643  resultRelInfo->ri_newTupleSlot =
644  table_slot_create(resultRelInfo->ri_RelationDesc,
645  &estate->es_tupleTable);
646 
647  /* need an expression context to do the projection */
648  if (mtstate->ps.ps_ExprContext == NULL)
649  ExecAssignExprContext(estate, &mtstate->ps);
650 
651  resultRelInfo->ri_projectNew =
652  ExecBuildUpdateProjection(subplan->targetlist,
653  false, /* subplan did the evaluation */
654  updateColnos,
655  relDesc,
656  mtstate->ps.ps_ExprContext,
657  resultRelInfo->ri_newTupleSlot,
658  &mtstate->ps);
659 
660  resultRelInfo->ri_projectNewInfoValid = true;
661 }
662 
663 /*
664  * ExecGetInsertNewTuple
665  * This prepares a "new" tuple ready to be inserted into given result
666  * relation, by removing any junk columns of the plan's output tuple
667  * and (if necessary) coercing the tuple to the right tuple format.
668  */
669 static TupleTableSlot *
671  TupleTableSlot *planSlot)
672 {
673  ProjectionInfo *newProj = relinfo->ri_projectNew;
674  ExprContext *econtext;
675 
676  /*
677  * If there's no projection to be done, just make sure the slot is of the
678  * right type for the target rel. If the planSlot is the right type we
679  * can use it as-is, else copy the data into ri_newTupleSlot.
680  */
681  if (newProj == NULL)
682  {
683  if (relinfo->ri_newTupleSlot->tts_ops != planSlot->tts_ops)
684  {
685  ExecCopySlot(relinfo->ri_newTupleSlot, planSlot);
686  return relinfo->ri_newTupleSlot;
687  }
688  else
689  return planSlot;
690  }
691 
692  /*
693  * Else project; since the projection output slot is ri_newTupleSlot, this
694  * will also fix any slot-type problem.
695  *
696  * Note: currently, this is dead code, because INSERT cases don't receive
697  * any junk columns so there's never a projection to be done.
698  */
699  econtext = newProj->pi_exprContext;
700  econtext->ecxt_outertuple = planSlot;
701  return ExecProject(newProj);
702 }
703 
704 /*
705  * ExecGetUpdateNewTuple
706  * This prepares a "new" tuple by combining an UPDATE subplan's output
707  * tuple (which contains values of changed columns) with unchanged
708  * columns taken from the old tuple.
709  *
710  * The subplan tuple might also contain junk columns, which are ignored.
711  * Note that the projection also ensures we have a slot of the right type.
712  */
715  TupleTableSlot *planSlot,
716  TupleTableSlot *oldSlot)
717 {
718  ProjectionInfo *newProj = relinfo->ri_projectNew;
719  ExprContext *econtext;
720 
721  /* Use a few extra Asserts to protect against outside callers */
722  Assert(relinfo->ri_projectNewInfoValid);
723  Assert(planSlot != NULL && !TTS_EMPTY(planSlot));
724  Assert(oldSlot != NULL && !TTS_EMPTY(oldSlot));
725 
726  econtext = newProj->pi_exprContext;
727  econtext->ecxt_outertuple = planSlot;
728  econtext->ecxt_scantuple = oldSlot;
729  return ExecProject(newProj);
730 }
731 
732 /* ----------------------------------------------------------------
733  * ExecInsert
734  *
735  * For INSERT, we have to insert the tuple into the target relation
736  * (or partition thereof) and insert appropriate tuples into the index
737  * relations.
738  *
739  * slot contains the new tuple value to be stored.
740  *
741  * Returns RETURNING result if any, otherwise NULL.
742  * *inserted_tuple is the tuple that's effectively inserted;
743  * *insert_destrel is the relation where it was inserted.
744  * These are only set on success.
745  *
746  * This may change the currently active tuple conversion map in
747  * mtstate->mt_transition_capture, so the callers must take care to
748  * save the previous value to avoid losing track of it.
749  * ----------------------------------------------------------------
750  */
751 static TupleTableSlot *
753  ResultRelInfo *resultRelInfo,
754  TupleTableSlot *slot,
755  bool canSetTag,
756  TupleTableSlot **inserted_tuple,
757  ResultRelInfo **insert_destrel)
758 {
759  ModifyTableState *mtstate = context->mtstate;
760  EState *estate = context->estate;
761  Relation resultRelationDesc;
762  List *recheckIndexes = NIL;
763  TupleTableSlot *planSlot = context->planSlot;
764  TupleTableSlot *result = NULL;
765  TransitionCaptureState *ar_insert_trig_tcs;
766  ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
767  OnConflictAction onconflict = node->onConflictAction;
769  MemoryContext oldContext;
770 
771  /*
772  * If the input result relation is a partitioned table, find the leaf
773  * partition to insert the tuple into.
774  */
775  if (proute)
776  {
777  ResultRelInfo *partRelInfo;
778 
779  slot = ExecPrepareTupleRouting(mtstate, estate, proute,
780  resultRelInfo, slot,
781  &partRelInfo);
782  resultRelInfo = partRelInfo;
783  }
784 
785  ExecMaterializeSlot(slot);
786 
787  resultRelationDesc = resultRelInfo->ri_RelationDesc;
788 
789  /*
790  * Open the table's indexes, if we have not done so already, so that we
791  * can add new index entries for the inserted tuple.
792  */
793  if (resultRelationDesc->rd_rel->relhasindex &&
794  resultRelInfo->ri_IndexRelationDescs == NULL)
795  ExecOpenIndices(resultRelInfo, onconflict != ONCONFLICT_NONE);
796 
797  /*
798  * BEFORE ROW INSERT Triggers.
799  *
800  * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an
801  * INSERT ... ON CONFLICT statement. We cannot check for constraint
802  * violations before firing these triggers, because they can change the
803  * values to insert. Also, they can run arbitrary user-defined code with
804  * side-effects that we can't cancel by just not inserting the tuple.
805  */
806  if (resultRelInfo->ri_TrigDesc &&
807  resultRelInfo->ri_TrigDesc->trig_insert_before_row)
808  {
809  /* Flush any pending inserts, so rows are visible to the triggers */
811  ExecPendingInserts(estate);
812 
813  if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
814  return NULL; /* "do nothing" */
815  }
816 
817  /* INSTEAD OF ROW INSERT Triggers */
818  if (resultRelInfo->ri_TrigDesc &&
819  resultRelInfo->ri_TrigDesc->trig_insert_instead_row)
820  {
821  if (!ExecIRInsertTriggers(estate, resultRelInfo, slot))
822  return NULL; /* "do nothing" */
823  }
824  else if (resultRelInfo->ri_FdwRoutine)
825  {
826  /*
827  * GENERATED expressions might reference the tableoid column, so
828  * (re-)initialize tts_tableOid before evaluating them.
829  */
830  slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
831 
832  /*
833  * Compute stored generated columns
834  */
835  if (resultRelationDesc->rd_att->constr &&
836  resultRelationDesc->rd_att->constr->has_generated_stored)
837  ExecComputeStoredGenerated(resultRelInfo, estate, slot,
838  CMD_INSERT);
839 
840  /*
841  * If the FDW supports batching, and batching is requested, accumulate
842  * rows and insert them in batches. Otherwise use the per-row inserts.
843  */
844  if (resultRelInfo->ri_BatchSize > 1)
845  {
846  bool flushed = false;
847 
848  /*
849  * When we've reached the desired batch size, perform the
850  * insertion.
851  */
852  if (resultRelInfo->ri_NumSlots == resultRelInfo->ri_BatchSize)
853  {
854  ExecBatchInsert(mtstate, resultRelInfo,
855  resultRelInfo->ri_Slots,
856  resultRelInfo->ri_PlanSlots,
857  resultRelInfo->ri_NumSlots,
858  estate, canSetTag);
859  flushed = true;
860  }
861 
862  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
863 
864  if (resultRelInfo->ri_Slots == NULL)
865  {
866  resultRelInfo->ri_Slots = palloc(sizeof(TupleTableSlot *) *
867  resultRelInfo->ri_BatchSize);
868  resultRelInfo->ri_PlanSlots = palloc(sizeof(TupleTableSlot *) *
869  resultRelInfo->ri_BatchSize);
870  }
871 
872  /*
873  * Initialize the batch slots. We don't know how many slots will
874  * be needed, so we initialize them as the batch grows, and we
875  * keep them across batches. To mitigate an inefficiency in how
876  * resource owner handles objects with many references (as with
877  * many slots all referencing the same tuple descriptor) we copy
878  * the appropriate tuple descriptor for each slot.
879  */
880  if (resultRelInfo->ri_NumSlots >= resultRelInfo->ri_NumSlotsInitialized)
881  {
883  TupleDesc plan_tdesc =
885 
886  resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
887  MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
888 
889  resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots] =
890  MakeSingleTupleTableSlot(plan_tdesc, planSlot->tts_ops);
891 
892  /* remember how many batch slots we initialized */
893  resultRelInfo->ri_NumSlotsInitialized++;
894  }
895 
896  ExecCopySlot(resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots],
897  slot);
898 
899  ExecCopySlot(resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots],
900  planSlot);
901 
902  /*
903  * If these are the first tuples stored in the buffers, add the
904  * target rel and the mtstate to the
905  * es_insert_pending_result_relations and
906  * es_insert_pending_modifytables lists respectively, except in
907  * the case where flushing was done above, in which case they
908  * would already have been added to the lists, so no need to do
909  * this.
910  */
911  if (resultRelInfo->ri_NumSlots == 0 && !flushed)
912  {
914  resultRelInfo));
917  resultRelInfo);
919  lappend(estate->es_insert_pending_modifytables, mtstate);
920  }
922  resultRelInfo));
923 
924  resultRelInfo->ri_NumSlots++;
925 
926  MemoryContextSwitchTo(oldContext);
927 
928  return NULL;
929  }
930 
931  /*
932  * insert into foreign table: let the FDW do it
933  */
934  slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate,
935  resultRelInfo,
936  slot,
937  planSlot);
938 
939  if (slot == NULL) /* "do nothing" */
940  return NULL;
941 
942  /*
943  * AFTER ROW Triggers or RETURNING expressions might reference the
944  * tableoid column, so (re-)initialize tts_tableOid before evaluating
945  * them. (This covers the case where the FDW replaced the slot.)
946  */
947  slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
948  }
949  else
950  {
951  WCOKind wco_kind;
952 
953  /*
954  * Constraints and GENERATED expressions might reference the tableoid
955  * column, so (re-)initialize tts_tableOid before evaluating them.
956  */
957  slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
958 
959  /*
960  * Compute stored generated columns
961  */
962  if (resultRelationDesc->rd_att->constr &&
963  resultRelationDesc->rd_att->constr->has_generated_stored)
964  ExecComputeStoredGenerated(resultRelInfo, estate, slot,
965  CMD_INSERT);
966 
967  /*
968  * Check any RLS WITH CHECK policies.
969  *
970  * Normally we should check INSERT policies. But if the insert is the
971  * result of a partition key update that moved the tuple to a new
972  * partition, we should instead check UPDATE policies, because we are
973  * executing policies defined on the target table, and not those
974  * defined on the child partitions.
975  *
976  * If we're running MERGE, we refer to the action that we're executing
977  * to know if we're doing an INSERT or UPDATE to a partition table.
978  */
979  if (mtstate->operation == CMD_UPDATE)
980  wco_kind = WCO_RLS_UPDATE_CHECK;
981  else if (mtstate->operation == CMD_MERGE)
982  wco_kind = (context->relaction->mas_action->commandType == CMD_UPDATE) ?
984  else
985  wco_kind = WCO_RLS_INSERT_CHECK;
986 
987  /*
988  * ExecWithCheckOptions() will skip any WCOs which are not of the kind
989  * we are looking for at this point.
990  */
991  if (resultRelInfo->ri_WithCheckOptions != NIL)
992  ExecWithCheckOptions(wco_kind, resultRelInfo, slot, estate);
993 
994  /*
995  * Check the constraints of the tuple.
996  */
997  if (resultRelationDesc->rd_att->constr)
998  ExecConstraints(resultRelInfo, slot, estate);
999 
1000  /*
1001  * Also check the tuple against the partition constraint, if there is
1002  * one; except that if we got here via tuple-routing, we don't need to
1003  * if there's no BR trigger defined on the partition.
1004  */
1005  if (resultRelationDesc->rd_rel->relispartition &&
1006  (resultRelInfo->ri_RootResultRelInfo == NULL ||
1007  (resultRelInfo->ri_TrigDesc &&
1008  resultRelInfo->ri_TrigDesc->trig_insert_before_row)))
1009  ExecPartitionCheck(resultRelInfo, slot, estate, true);
1010 
1011  if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
1012  {
1013  /* Perform a speculative insertion. */
1014  uint32 specToken;
1015  ItemPointerData conflictTid;
1016  bool specConflict;
1017  List *arbiterIndexes;
1018 
1019  arbiterIndexes = resultRelInfo->ri_onConflictArbiterIndexes;
1020 
1021  /*
1022  * Do a non-conclusive check for conflicts first.
1023  *
1024  * We're not holding any locks yet, so this doesn't guarantee that
1025  * the later insert won't conflict. But it avoids leaving behind
1026  * a lot of canceled speculative insertions, if you run a lot of
1027  * INSERT ON CONFLICT statements that do conflict.
1028  *
1029  * We loop back here if we find a conflict below, either during
1030  * the pre-check, or when we re-check after inserting the tuple
1031  * speculatively. Better allow interrupts in case some bug makes
1032  * this an infinite loop.
1033  */
1034  vlock:
1036  specConflict = false;
1037  if (!ExecCheckIndexConstraints(resultRelInfo, slot, estate,
1038  &conflictTid, arbiterIndexes))
1039  {
1040  /* committed conflict tuple found */
1041  if (onconflict == ONCONFLICT_UPDATE)
1042  {
1043  /*
1044  * In case of ON CONFLICT DO UPDATE, execute the UPDATE
1045  * part. Be prepared to retry if the UPDATE fails because
1046  * of another concurrent UPDATE/DELETE to the conflict
1047  * tuple.
1048  */
1049  TupleTableSlot *returning = NULL;
1050 
1051  if (ExecOnConflictUpdate(context, resultRelInfo,
1052  &conflictTid, slot, canSetTag,
1053  &returning))
1054  {
1055  InstrCountTuples2(&mtstate->ps, 1);
1056  return returning;
1057  }
1058  else
1059  goto vlock;
1060  }
1061  else
1062  {
1063  /*
1064  * In case of ON CONFLICT DO NOTHING, do nothing. However,
1065  * verify that the tuple is visible to the executor's MVCC
1066  * snapshot at higher isolation levels.
1067  *
1068  * Using ExecGetReturningSlot() to store the tuple for the
1069  * recheck isn't that pretty, but we can't trivially use
1070  * the input slot, because it might not be of a compatible
1071  * type. As there's no conflicting usage of
1072  * ExecGetReturningSlot() in the DO NOTHING case...
1073  */
1074  Assert(onconflict == ONCONFLICT_NOTHING);
1075  ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid,
1076  ExecGetReturningSlot(estate, resultRelInfo));
1077  InstrCountTuples2(&mtstate->ps, 1);
1078  return NULL;
1079  }
1080  }
1081 
1082  /*
1083  * Before we start insertion proper, acquire our "speculative
1084  * insertion lock". Others can use that to wait for us to decide
1085  * if we're going to go ahead with the insertion, instead of
1086  * waiting for the whole transaction to complete.
1087  */
1089 
1090  /* insert the tuple, with the speculative token */
1091  table_tuple_insert_speculative(resultRelationDesc, slot,
1092  estate->es_output_cid,
1093  0,
1094  NULL,
1095  specToken);
1096 
1097  /* insert index entries for tuple */
1098  recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1099  slot, estate, false, true,
1100  &specConflict,
1101  arbiterIndexes,
1102  false);
1103 
1104  /* adjust the tuple's state accordingly */
1105  table_tuple_complete_speculative(resultRelationDesc, slot,
1106  specToken, !specConflict);
1107 
1108  /*
1109  * Wake up anyone waiting for our decision. They will re-check
1110  * the tuple, see that it's no longer speculative, and wait on our
1111  * XID as if this was a regularly inserted tuple all along. Or if
1112  * we killed the tuple, they will see it's dead, and proceed as if
1113  * the tuple never existed.
1114  */
1116 
1117  /*
1118  * If there was a conflict, start from the beginning. We'll do
1119  * the pre-check again, which will now find the conflicting tuple
1120  * (unless it aborts before we get there).
1121  */
1122  if (specConflict)
1123  {
1124  list_free(recheckIndexes);
1125  goto vlock;
1126  }
1127 
1128  /* Since there was no insertion conflict, we're done */
1129  }
1130  else
1131  {
1132  /* insert the tuple normally */
1133  table_tuple_insert(resultRelationDesc, slot,
1134  estate->es_output_cid,
1135  0, NULL);
1136 
1137  /* insert index entries for tuple */
1138  if (resultRelInfo->ri_NumIndices > 0)
1139  recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1140  slot, estate, false,
1141  false, NULL, NIL,
1142  false);
1143  }
1144  }
1145 
1146  if (canSetTag)
1147  (estate->es_processed)++;
1148 
1149  /*
1150  * If this insert is the result of a partition key update that moved the
1151  * tuple to a new partition, put this row into the transition NEW TABLE,
1152  * if there is one. We need to do this separately for DELETE and INSERT
1153  * because they happen on different tables.
1154  */
1155  ar_insert_trig_tcs = mtstate->mt_transition_capture;
1156  if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
1158  {
1159  ExecARUpdateTriggers(estate, resultRelInfo,
1160  NULL, NULL,
1161  NULL,
1162  NULL,
1163  slot,
1164  NULL,
1165  mtstate->mt_transition_capture,
1166  false);
1167 
1168  /*
1169  * We've already captured the NEW TABLE row, so make sure any AR
1170  * INSERT trigger fired below doesn't capture it again.
1171  */
1172  ar_insert_trig_tcs = NULL;
1173  }
1174 
1175  /* AFTER ROW INSERT Triggers */
1176  ExecARInsertTriggers(estate, resultRelInfo, slot, recheckIndexes,
1177  ar_insert_trig_tcs);
1178 
1179  list_free(recheckIndexes);
1180 
1181  /*
1182  * Check any WITH CHECK OPTION constraints from parent views. We are
1183  * required to do this after testing all constraints and uniqueness
1184  * violations per the SQL spec, so we do it after actually inserting the
1185  * record into the heap and all indexes.
1186  *
1187  * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
1188  * tuple will never be seen, if it violates the WITH CHECK OPTION.
1189  *
1190  * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
1191  * are looking for at this point.
1192  */
1193  if (resultRelInfo->ri_WithCheckOptions != NIL)
1194  ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1195 
1196  /* Process RETURNING if present */
1197  if (resultRelInfo->ri_projectReturning)
1198  result = ExecProcessReturning(resultRelInfo, slot, planSlot);
1199 
1200  if (inserted_tuple)
1201  *inserted_tuple = slot;
1202  if (insert_destrel)
1203  *insert_destrel = resultRelInfo;
1204 
1205  return result;
1206 }
1207 
1208 /* ----------------------------------------------------------------
1209  * ExecBatchInsert
1210  *
1211  * Insert multiple tuples in an efficient way.
1212  * Currently, this handles inserting into a foreign table without
1213  * RETURNING clause.
1214  * ----------------------------------------------------------------
1215  */
1216 static void
1218  ResultRelInfo *resultRelInfo,
1219  TupleTableSlot **slots,
1220  TupleTableSlot **planSlots,
1221  int numSlots,
1222  EState *estate,
1223  bool canSetTag)
1224 {
1225  int i;
1226  int numInserted = numSlots;
1227  TupleTableSlot *slot = NULL;
1228  TupleTableSlot **rslots;
1229 
1230  /*
1231  * insert into foreign table: let the FDW do it
1232  */
1233  rslots = resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert(estate,
1234  resultRelInfo,
1235  slots,
1236  planSlots,
1237  &numInserted);
1238 
1239  for (i = 0; i < numInserted; i++)
1240  {
1241  slot = rslots[i];
1242 
1243  /*
1244  * AFTER ROW Triggers might reference the tableoid column, so
1245  * (re-)initialize tts_tableOid before evaluating them.
1246  */
1247  slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1248 
1249  /* AFTER ROW INSERT Triggers */
1250  ExecARInsertTriggers(estate, resultRelInfo, slot, NIL,
1251  mtstate->mt_transition_capture);
1252 
1253  /*
1254  * Check any WITH CHECK OPTION constraints from parent views. See the
1255  * comment in ExecInsert.
1256  */
1257  if (resultRelInfo->ri_WithCheckOptions != NIL)
1258  ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1259  }
1260 
1261  if (canSetTag && numInserted > 0)
1262  estate->es_processed += numInserted;
1263 
1264  /* Clean up all the slots, ready for the next batch */
1265  for (i = 0; i < numSlots; i++)
1266  {
1267  ExecClearTuple(slots[i]);
1268  ExecClearTuple(planSlots[i]);
1269  }
1270  resultRelInfo->ri_NumSlots = 0;
1271 }
1272 
1273 /*
1274  * ExecPendingInserts -- flushes all pending inserts to the foreign tables
1275  */
1276 static void
1278 {
1279  ListCell *l1,
1280  *l2;
1281 
1283  l2, estate->es_insert_pending_modifytables)
1284  {
1285  ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l1);
1286  ModifyTableState *mtstate = (ModifyTableState *) lfirst(l2);
1287 
1288  Assert(mtstate);
1289  ExecBatchInsert(mtstate, resultRelInfo,
1290  resultRelInfo->ri_Slots,
1291  resultRelInfo->ri_PlanSlots,
1292  resultRelInfo->ri_NumSlots,
1293  estate, mtstate->canSetTag);
1294  }
1295 
1300 }
1301 
1302 /*
1303  * ExecDeletePrologue -- subroutine for ExecDelete
1304  *
1305  * Prepare executor state for DELETE. Actually, the only thing we have to do
1306  * here is execute BEFORE ROW triggers. We return false if one of them makes
1307  * the delete a no-op; otherwise, return true.
1308  */
1309 static bool
1311  ItemPointer tupleid, HeapTuple oldtuple,
1312  TupleTableSlot **epqreturnslot, TM_Result *result)
1313 {
1314  if (result)
1315  *result = TM_Ok;
1316 
1317  /* BEFORE ROW DELETE triggers */
1318  if (resultRelInfo->ri_TrigDesc &&
1319  resultRelInfo->ri_TrigDesc->trig_delete_before_row)
1320  {
1321  /* Flush any pending inserts, so rows are visible to the triggers */
1323  ExecPendingInserts(context->estate);
1324 
1325  return ExecBRDeleteTriggers(context->estate, context->epqstate,
1326  resultRelInfo, tupleid, oldtuple,
1327  epqreturnslot, result, &context->tmfd);
1328  }
1329 
1330  return true;
1331 }
1332 
1333 /*
1334  * ExecDeleteAct -- subroutine for ExecDelete
1335  *
1336  * Actually delete the tuple from a plain table.
1337  *
1338  * Caller is in charge of doing EvalPlanQual as necessary
1339  */
1340 static TM_Result
1342  ItemPointer tupleid, bool changingPart)
1343 {
1344  EState *estate = context->estate;
1345 
1346  return table_tuple_delete(resultRelInfo->ri_RelationDesc, tupleid,
1347  estate->es_output_cid,
1348  estate->es_snapshot,
1349  estate->es_crosscheck_snapshot,
1350  true /* wait for commit */ ,
1351  &context->tmfd,
1352  changingPart);
1353 }
1354 
1355 /*
1356  * ExecDeleteEpilogue -- subroutine for ExecDelete
1357  *
1358  * Closing steps of tuple deletion; this invokes AFTER FOR EACH ROW triggers,
1359  * including the UPDATE triggers if the deletion is being done as part of a
1360  * cross-partition tuple move.
1361  */
1362 static void
1364  ItemPointer tupleid, HeapTuple oldtuple, bool changingPart)
1365 {
1366  ModifyTableState *mtstate = context->mtstate;
1367  EState *estate = context->estate;
1368  TransitionCaptureState *ar_delete_trig_tcs;
1369 
1370  /*
1371  * If this delete is the result of a partition key update that moved the
1372  * tuple to a new partition, put this row into the transition OLD TABLE,
1373  * if there is one. We need to do this separately for DELETE and INSERT
1374  * because they happen on different tables.
1375  */
1376  ar_delete_trig_tcs = mtstate->mt_transition_capture;
1377  if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture &&
1379  {
1380  ExecARUpdateTriggers(estate, resultRelInfo,
1381  NULL, NULL,
1382  tupleid, oldtuple,
1383  NULL, NULL, mtstate->mt_transition_capture,
1384  false);
1385 
1386  /*
1387  * We've already captured the OLD TABLE row, so make sure any AR
1388  * DELETE trigger fired below doesn't capture it again.
1389  */
1390  ar_delete_trig_tcs = NULL;
1391  }
1392 
1393  /* AFTER ROW DELETE Triggers */
1394  ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
1395  ar_delete_trig_tcs, changingPart);
1396 }
1397 
1398 /* ----------------------------------------------------------------
1399  * ExecDelete
1400  *
1401  * DELETE is like UPDATE, except that we delete the tuple and no
1402  * index modifications are needed.
1403  *
1404  * When deleting from a table, tupleid identifies the tuple to
1405  * delete and oldtuple is NULL. When deleting from a view,
1406  * oldtuple is passed to the INSTEAD OF triggers and identifies
1407  * what to delete, and tupleid is invalid. When deleting from a
1408  * foreign table, tupleid is invalid; the FDW has to figure out
1409  * which row to delete using data from the planSlot. oldtuple is
1410  * passed to foreign table triggers; it is NULL when the foreign
1411  * table has no relevant triggers. We use tupleDeleted to indicate
1412  * whether the tuple is actually deleted, callers can use it to
1413  * decide whether to continue the operation. When this DELETE is a
1414  * part of an UPDATE of partition-key, then the slot returned by
1415  * EvalPlanQual() is passed back using output parameter epqreturnslot.
1416  *
1417  * Returns RETURNING result if any, otherwise NULL.
1418  * ----------------------------------------------------------------
1419  */
1420 static TupleTableSlot *
1422  ResultRelInfo *resultRelInfo,
1423  ItemPointer tupleid,
1424  HeapTuple oldtuple,
1425  bool processReturning,
1426  bool changingPart,
1427  bool canSetTag,
1428  bool *tupleDeleted,
1429  TupleTableSlot **epqreturnslot)
1430 {
1431  EState *estate = context->estate;
1432  Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1433  TupleTableSlot *slot = NULL;
1434  TM_Result result;
1435 
1436  if (tupleDeleted)
1437  *tupleDeleted = false;
1438 
1439  /*
1440  * Prepare for the delete. This includes BEFORE ROW triggers, so we're
1441  * done if it says we are.
1442  */
1443  if (!ExecDeletePrologue(context, resultRelInfo, tupleid, oldtuple,
1444  epqreturnslot, NULL))
1445  return NULL;
1446 
1447  /* INSTEAD OF ROW DELETE Triggers */
1448  if (resultRelInfo->ri_TrigDesc &&
1449  resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
1450  {
1451  bool dodelete;
1452 
1453  Assert(oldtuple != NULL);
1454  dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple);
1455 
1456  if (!dodelete) /* "do nothing" */
1457  return NULL;
1458  }
1459  else if (resultRelInfo->ri_FdwRoutine)
1460  {
1461  /*
1462  * delete from foreign table: let the FDW do it
1463  *
1464  * We offer the returning slot as a place to store RETURNING data,
1465  * although the FDW can return some other slot if it wants.
1466  */
1467  slot = ExecGetReturningSlot(estate, resultRelInfo);
1468  slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate,
1469  resultRelInfo,
1470  slot,
1471  context->planSlot);
1472 
1473  if (slot == NULL) /* "do nothing" */
1474  return NULL;
1475 
1476  /*
1477  * RETURNING expressions might reference the tableoid column, so
1478  * (re)initialize tts_tableOid before evaluating them.
1479  */
1480  if (TTS_EMPTY(slot))
1481  ExecStoreAllNullTuple(slot);
1482 
1483  slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1484  }
1485  else
1486  {
1487  /*
1488  * delete the tuple
1489  *
1490  * Note: if context->estate->es_crosscheck_snapshot isn't
1491  * InvalidSnapshot, we check that the row to be deleted is visible to
1492  * that snapshot, and throw a can't-serialize error if not. This is a
1493  * special-case behavior needed for referential integrity updates in
1494  * transaction-snapshot mode transactions.
1495  */
1496 ldelete:
1497  result = ExecDeleteAct(context, resultRelInfo, tupleid, changingPart);
1498 
1499  switch (result)
1500  {
1501  case TM_SelfModified:
1502 
1503  /*
1504  * The target tuple was already updated or deleted by the
1505  * current command, or by a later command in the current
1506  * transaction. The former case is possible in a join DELETE
1507  * where multiple tuples join to the same target tuple. This
1508  * is somewhat questionable, but Postgres has always allowed
1509  * it: we just ignore additional deletion attempts.
1510  *
1511  * The latter case arises if the tuple is modified by a
1512  * command in a BEFORE trigger, or perhaps by a command in a
1513  * volatile function used in the query. In such situations we
1514  * should not ignore the deletion, but it is equally unsafe to
1515  * proceed. We don't want to discard the original DELETE
1516  * while keeping the triggered actions based on its deletion;
1517  * and it would be no better to allow the original DELETE
1518  * while discarding updates that it triggered. The row update
1519  * carries some information that might be important according
1520  * to business rules; so throwing an error is the only safe
1521  * course.
1522  *
1523  * If a trigger actually intends this type of interaction, it
1524  * can re-execute the DELETE and then return NULL to cancel
1525  * the outer delete.
1526  */
1527  if (context->tmfd.cmax != estate->es_output_cid)
1528  ereport(ERROR,
1529  (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1530  errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1531  errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1532 
1533  /* Else, already deleted by self; nothing to do */
1534  return NULL;
1535 
1536  case TM_Ok:
1537  break;
1538 
1539  case TM_Updated:
1540  {
1541  TupleTableSlot *inputslot;
1542  TupleTableSlot *epqslot;
1543 
1545  ereport(ERROR,
1547  errmsg("could not serialize access due to concurrent update")));
1548 
1549  /*
1550  * Already know that we're going to need to do EPQ, so
1551  * fetch tuple directly into the right slot.
1552  */
1553  EvalPlanQualBegin(context->epqstate);
1554  inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
1555  resultRelInfo->ri_RangeTableIndex);
1556 
1557  result = table_tuple_lock(resultRelationDesc, tupleid,
1558  estate->es_snapshot,
1559  inputslot, estate->es_output_cid,
1562  &context->tmfd);
1563 
1564  switch (result)
1565  {
1566  case TM_Ok:
1567  Assert(context->tmfd.traversed);
1568  epqslot = EvalPlanQual(context->epqstate,
1569  resultRelationDesc,
1570  resultRelInfo->ri_RangeTableIndex,
1571  inputslot);
1572  if (TupIsNull(epqslot))
1573  /* Tuple not passing quals anymore, exiting... */
1574  return NULL;
1575 
1576  /*
1577  * If requested, skip delete and pass back the
1578  * updated row.
1579  */
1580  if (epqreturnslot)
1581  {
1582  *epqreturnslot = epqslot;
1583  return NULL;
1584  }
1585  else
1586  goto ldelete;
1587 
1588  case TM_SelfModified:
1589 
1590  /*
1591  * This can be reached when following an update
1592  * chain from a tuple updated by another session,
1593  * reaching a tuple that was already updated in
1594  * this transaction. If previously updated by this
1595  * command, ignore the delete, otherwise error
1596  * out.
1597  *
1598  * See also TM_SelfModified response to
1599  * table_tuple_delete() above.
1600  */
1601  if (context->tmfd.cmax != estate->es_output_cid)
1602  ereport(ERROR,
1603  (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1604  errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1605  errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1606  return NULL;
1607 
1608  case TM_Deleted:
1609  /* tuple already deleted; nothing to do */
1610  return NULL;
1611 
1612  default:
1613 
1614  /*
1615  * TM_Invisible should be impossible because we're
1616  * waiting for updated row versions, and would
1617  * already have errored out if the first version
1618  * is invisible.
1619  *
1620  * TM_Updated should be impossible, because we're
1621  * locking the latest version via
1622  * TUPLE_LOCK_FLAG_FIND_LAST_VERSION.
1623  */
1624  elog(ERROR, "unexpected table_tuple_lock status: %u",
1625  result);
1626  return NULL;
1627  }
1628 
1629  Assert(false);
1630  break;
1631  }
1632 
1633  case TM_Deleted:
1635  ereport(ERROR,
1637  errmsg("could not serialize access due to concurrent delete")));
1638  /* tuple already deleted; nothing to do */
1639  return NULL;
1640 
1641  default:
1642  elog(ERROR, "unrecognized table_tuple_delete status: %u",
1643  result);
1644  return NULL;
1645  }
1646 
1647  /*
1648  * Note: Normally one would think that we have to delete index tuples
1649  * associated with the heap tuple now...
1650  *
1651  * ... but in POSTGRES, we have no need to do this because VACUUM will
1652  * take care of it later. We can't delete index tuples immediately
1653  * anyway, since the tuple is still visible to other transactions.
1654  */
1655  }
1656 
1657  if (canSetTag)
1658  (estate->es_processed)++;
1659 
1660  /* Tell caller that the delete actually happened. */
1661  if (tupleDeleted)
1662  *tupleDeleted = true;
1663 
1664  ExecDeleteEpilogue(context, resultRelInfo, tupleid, oldtuple, changingPart);
1665 
1666  /* Process RETURNING if present and if requested */
1667  if (processReturning && resultRelInfo->ri_projectReturning)
1668  {
1669  /*
1670  * We have to put the target tuple into a slot, which means first we
1671  * gotta fetch it. We can use the trigger tuple slot.
1672  */
1673  TupleTableSlot *rslot;
1674 
1675  if (resultRelInfo->ri_FdwRoutine)
1676  {
1677  /* FDW must have provided a slot containing the deleted row */
1678  Assert(!TupIsNull(slot));
1679  }
1680  else
1681  {
1682  slot = ExecGetReturningSlot(estate, resultRelInfo);
1683  if (oldtuple != NULL)
1684  {
1685  ExecForceStoreHeapTuple(oldtuple, slot, false);
1686  }
1687  else
1688  {
1689  if (!table_tuple_fetch_row_version(resultRelationDesc, tupleid,
1690  SnapshotAny, slot))
1691  elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1692  }
1693  }
1694 
1695  rslot = ExecProcessReturning(resultRelInfo, slot, context->planSlot);
1696 
1697  /*
1698  * Before releasing the target tuple again, make sure rslot has a
1699  * local copy of any pass-by-reference values.
1700  */
1701  ExecMaterializeSlot(rslot);
1702 
1703  ExecClearTuple(slot);
1704 
1705  return rslot;
1706  }
1707 
1708  return NULL;
1709 }
1710 
1711 /*
1712  * ExecCrossPartitionUpdate --- Move an updated tuple to another partition.
1713  *
1714  * This works by first deleting the old tuple from the current partition,
1715  * followed by inserting the new tuple into the root parent table, that is,
1716  * mtstate->rootResultRelInfo. It will be re-routed from there to the
1717  * correct partition.
1718  *
1719  * Returns true if the tuple has been successfully moved, or if it's found
1720  * that the tuple was concurrently deleted so there's nothing more to do
1721  * for the caller.
1722  *
1723  * False is returned if the tuple we're trying to move is found to have been
1724  * concurrently updated. In that case, the caller must check if the updated
1725  * tuple that's returned in *retry_slot still needs to be re-routed, and call
1726  * this function again or perform a regular update accordingly. For MERGE,
1727  * the updated tuple is not returned in *retry_slot; it has its own retry
1728  * logic.
1729  */
1730 static bool
1732  ResultRelInfo *resultRelInfo,
1733  ItemPointer tupleid, HeapTuple oldtuple,
1734  TupleTableSlot *slot,
1735  bool canSetTag,
1736  UpdateContext *updateCxt,
1737  TupleTableSlot **retry_slot,
1738  TupleTableSlot **inserted_tuple,
1739  ResultRelInfo **insert_destrel)
1740 {
1741  ModifyTableState *mtstate = context->mtstate;
1742  EState *estate = mtstate->ps.state;
1743  TupleConversionMap *tupconv_map;
1744  bool tuple_deleted;
1745  TupleTableSlot *epqslot = NULL;
1746 
1747  context->cpUpdateReturningSlot = NULL;
1748  *retry_slot = NULL;
1749 
1750  /*
1751  * Disallow an INSERT ON CONFLICT DO UPDATE that causes the original row
1752  * to migrate to a different partition. Maybe this can be implemented
1753  * some day, but it seems a fringe feature with little redeeming value.
1754  */
1755  if (((ModifyTable *) mtstate->ps.plan)->onConflictAction == ONCONFLICT_UPDATE)
1756  ereport(ERROR,
1757  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1758  errmsg("invalid ON UPDATE specification"),
1759  errdetail("The result tuple would appear in a different partition than the original tuple.")));
1760 
1761  /*
1762  * When an UPDATE is run directly on a leaf partition, simply fail with a
1763  * partition constraint violation error.
1764  */
1765  if (resultRelInfo == mtstate->rootResultRelInfo)
1766  ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
1767 
1768  /* Initialize tuple routing info if not already done. */
1769  if (mtstate->mt_partition_tuple_routing == NULL)
1770  {
1771  Relation rootRel = mtstate->rootResultRelInfo->ri_RelationDesc;
1772  MemoryContext oldcxt;
1773 
1774  /* Things built here have to last for the query duration. */
1775  oldcxt = MemoryContextSwitchTo(estate->es_query_cxt);
1776 
1777  mtstate->mt_partition_tuple_routing =
1778  ExecSetupPartitionTupleRouting(estate, rootRel);
1779 
1780  /*
1781  * Before a partition's tuple can be re-routed, it must first be
1782  * converted to the root's format, so we'll need a slot for storing
1783  * such tuples.
1784  */
1785  Assert(mtstate->mt_root_tuple_slot == NULL);
1786  mtstate->mt_root_tuple_slot = table_slot_create(rootRel, NULL);
1787 
1788  MemoryContextSwitchTo(oldcxt);
1789  }
1790 
1791  /*
1792  * Row movement, part 1. Delete the tuple, but skip RETURNING processing.
1793  * We want to return rows from INSERT.
1794  */
1795  ExecDelete(context, resultRelInfo,
1796  tupleid, oldtuple,
1797  false, /* processReturning */
1798  true, /* changingPart */
1799  false, /* canSetTag */
1800  &tuple_deleted, &epqslot);
1801 
1802  /*
1803  * For some reason if DELETE didn't happen (e.g. trigger prevented it, or
1804  * it was already deleted by self, or it was concurrently deleted by
1805  * another transaction), then we should skip the insert as well;
1806  * otherwise, an UPDATE could cause an increase in the total number of
1807  * rows across all partitions, which is clearly wrong.
1808  *
1809  * For a normal UPDATE, the case where the tuple has been the subject of a
1810  * concurrent UPDATE or DELETE would be handled by the EvalPlanQual
1811  * machinery, but for an UPDATE that we've translated into a DELETE from
1812  * this partition and an INSERT into some other partition, that's not
1813  * available, because CTID chains can't span relation boundaries. We
1814  * mimic the semantics to a limited extent by skipping the INSERT if the
1815  * DELETE fails to find a tuple. This ensures that two concurrent
1816  * attempts to UPDATE the same tuple at the same time can't turn one tuple
1817  * into two, and that an UPDATE of a just-deleted tuple can't resurrect
1818  * it.
1819  */
1820  if (!tuple_deleted)
1821  {
1822  /*
1823  * epqslot will be typically NULL. But when ExecDelete() finds that
1824  * another transaction has concurrently updated the same row, it
1825  * re-fetches the row, skips the delete, and epqslot is set to the
1826  * re-fetched tuple slot. In that case, we need to do all the checks
1827  * again. For MERGE, we leave everything to the caller (it must do
1828  * additional rechecking, and might end up executing a different
1829  * action entirely).
1830  */
1831  if (context->relaction != NULL)
1832  return false;
1833  else if (TupIsNull(epqslot))
1834  return true;
1835  else
1836  {
1837  /* Fetch the most recent version of old tuple. */
1838  TupleTableSlot *oldSlot;
1839 
1840  /* ... but first, make sure ri_oldTupleSlot is initialized. */
1841  if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
1842  ExecInitUpdateProjection(mtstate, resultRelInfo);
1843  oldSlot = resultRelInfo->ri_oldTupleSlot;
1844  if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
1845  tupleid,
1846  SnapshotAny,
1847  oldSlot))
1848  elog(ERROR, "failed to fetch tuple being updated");
1849  /* and project the new tuple to retry the UPDATE with */
1850  *retry_slot = ExecGetUpdateNewTuple(resultRelInfo, epqslot,
1851  oldSlot);
1852  return false;
1853  }
1854  }
1855 
1856  /*
1857  * resultRelInfo is one of the per-relation resultRelInfos. So we should
1858  * convert the tuple into root's tuple descriptor if needed, since
1859  * ExecInsert() starts the search from root.
1860  */
1861  tupconv_map = ExecGetChildToRootMap(resultRelInfo);
1862  if (tupconv_map != NULL)
1863  slot = execute_attr_map_slot(tupconv_map->attrMap,
1864  slot,
1865  mtstate->mt_root_tuple_slot);
1866 
1867  /* Tuple routing starts from the root table. */
1868  context->cpUpdateReturningSlot =
1869  ExecInsert(context, mtstate->rootResultRelInfo, slot, canSetTag,
1870  inserted_tuple, insert_destrel);
1871 
1872  /*
1873  * Reset the transition state that may possibly have been written by
1874  * INSERT.
1875  */
1876  if (mtstate->mt_transition_capture)
1878 
1879  /* We're done moving. */
1880  return true;
1881 }
1882 
1883 /*
1884  * ExecUpdatePrologue -- subroutine for ExecUpdate
1885  *
1886  * Prepare executor state for UPDATE. This includes running BEFORE ROW
1887  * triggers. We return false if one of them makes the update a no-op;
1888  * otherwise, return true.
1889  */
1890 static bool
1892  ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
1893  TM_Result *result)
1894 {
1895  Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1896 
1897  if (result)
1898  *result = TM_Ok;
1899 
1900  ExecMaterializeSlot(slot);
1901 
1902  /*
1903  * Open the table's indexes, if we have not done so already, so that we
1904  * can add new index entries for the updated tuple.
1905  */
1906  if (resultRelationDesc->rd_rel->relhasindex &&
1907  resultRelInfo->ri_IndexRelationDescs == NULL)
1908  ExecOpenIndices(resultRelInfo, false);
1909 
1910  /* BEFORE ROW UPDATE triggers */
1911  if (resultRelInfo->ri_TrigDesc &&
1912  resultRelInfo->ri_TrigDesc->trig_update_before_row)
1913  {
1914  /* Flush any pending inserts, so rows are visible to the triggers */
1916  ExecPendingInserts(context->estate);
1917 
1918  return ExecBRUpdateTriggers(context->estate, context->epqstate,
1919  resultRelInfo, tupleid, oldtuple, slot,
1920  result, &context->tmfd);
1921  }
1922 
1923  return true;
1924 }
1925 
1926 /*
1927  * ExecUpdatePrepareSlot -- subroutine for ExecUpdateAct
1928  *
1929  * Apply the final modifications to the tuple slot before the update.
1930  * (This is split out because we also need it in the foreign-table code path.)
1931  */
1932 static void
1934  TupleTableSlot *slot,
1935  EState *estate)
1936 {
1937  Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1938 
1939  /*
1940  * Constraints and GENERATED expressions might reference the tableoid
1941  * column, so (re-)initialize tts_tableOid before evaluating them.
1942  */
1943  slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1944 
1945  /*
1946  * Compute stored generated columns
1947  */
1948  if (resultRelationDesc->rd_att->constr &&
1949  resultRelationDesc->rd_att->constr->has_generated_stored)
1950  ExecComputeStoredGenerated(resultRelInfo, estate, slot,
1951  CMD_UPDATE);
1952 }
1953 
1954 /*
1955  * ExecUpdateAct -- subroutine for ExecUpdate
1956  *
1957  * Actually update the tuple, when operating on a plain table. If the
1958  * table is a partition, and the command was called referencing an ancestor
1959  * partitioned table, this routine migrates the resulting tuple to another
1960  * partition.
1961  *
1962  * The caller is in charge of keeping indexes current as necessary. The
1963  * caller is also in charge of doing EvalPlanQual if the tuple is found to
1964  * be concurrently updated. However, in case of a cross-partition update,
1965  * this routine does it.
1966  */
1967 static TM_Result
1969  ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
1970  bool canSetTag, UpdateContext *updateCxt)
1971 {
1972  EState *estate = context->estate;
1973  Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1974  bool partition_constraint_failed;
1975  TM_Result result;
1976 
1977  updateCxt->crossPartUpdate = false;
1978 
1979  /*
1980  * If we move the tuple to a new partition, we loop back here to recompute
1981  * GENERATED values (which are allowed to be different across partitions)
1982  * and recheck any RLS policies and constraints. We do not fire any
1983  * BEFORE triggers of the new partition, however.
1984  */
1985 lreplace:
1986  /* Fill in GENERATEd columns */
1987  ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
1988 
1989  /* ensure slot is independent, consider e.g. EPQ */
1990  ExecMaterializeSlot(slot);
1991 
1992  /*
1993  * If partition constraint fails, this row might get moved to another
1994  * partition, in which case we should check the RLS CHECK policy just
1995  * before inserting into the new partition, rather than doing it here.
1996  * This is because a trigger on that partition might again change the row.
1997  * So skip the WCO checks if the partition constraint fails.
1998  */
1999  partition_constraint_failed =
2000  resultRelationDesc->rd_rel->relispartition &&
2001  !ExecPartitionCheck(resultRelInfo, slot, estate, false);
2002 
2003  /* Check any RLS UPDATE WITH CHECK policies */
2004  if (!partition_constraint_failed &&
2005  resultRelInfo->ri_WithCheckOptions != NIL)
2006  {
2007  /*
2008  * ExecWithCheckOptions() will skip any WCOs which are not of the kind
2009  * we are looking for at this point.
2010  */
2012  resultRelInfo, slot, estate);
2013  }
2014 
2015  /*
2016  * If a partition check failed, try to move the row into the right
2017  * partition.
2018  */
2019  if (partition_constraint_failed)
2020  {
2021  TupleTableSlot *inserted_tuple,
2022  *retry_slot;
2023  ResultRelInfo *insert_destrel = NULL;
2024 
2025  /*
2026  * ExecCrossPartitionUpdate will first DELETE the row from the
2027  * partition it's currently in and then insert it back into the root
2028  * table, which will re-route it to the correct partition. However,
2029  * if the tuple has been concurrently updated, a retry is needed.
2030  */
2031  if (ExecCrossPartitionUpdate(context, resultRelInfo,
2032  tupleid, oldtuple, slot,
2033  canSetTag, updateCxt,
2034  &retry_slot,
2035  &inserted_tuple,
2036  &insert_destrel))
2037  {
2038  /* success! */
2039  updateCxt->updated = true;
2040  updateCxt->crossPartUpdate = true;
2041 
2042  /*
2043  * If the partitioned table being updated is referenced in foreign
2044  * keys, queue up trigger events to check that none of them were
2045  * violated. No special treatment is needed in
2046  * non-cross-partition update situations, because the leaf
2047  * partition's AR update triggers will take care of that. During
2048  * cross-partition updates implemented as delete on the source
2049  * partition followed by insert on the destination partition,
2050  * AR-UPDATE triggers of the root table (that is, the table
2051  * mentioned in the query) must be fired.
2052  *
2053  * NULL insert_destrel means that the move failed to occur, that
2054  * is, the update failed, so no need to anything in that case.
2055  */
2056  if (insert_destrel &&
2057  resultRelInfo->ri_TrigDesc &&
2058  resultRelInfo->ri_TrigDesc->trig_update_after_row)
2060  resultRelInfo,
2061  insert_destrel,
2062  tupleid, slot,
2063  inserted_tuple);
2064 
2065  return TM_Ok;
2066  }
2067 
2068  /*
2069  * No luck, a retry is needed. If running MERGE, we do not do so
2070  * here; instead let it handle that on its own rules.
2071  */
2072  if (context->relaction != NULL)
2073  return TM_Updated;
2074 
2075  /*
2076  * ExecCrossPartitionUpdate installed an updated version of the new
2077  * tuple in the retry slot; start over.
2078  */
2079  slot = retry_slot;
2080  goto lreplace;
2081  }
2082 
2083  /*
2084  * Check the constraints of the tuple. We've already checked the
2085  * partition constraint above; however, we must still ensure the tuple
2086  * passes all other constraints, so we will call ExecConstraints() and
2087  * have it validate all remaining checks.
2088  */
2089  if (resultRelationDesc->rd_att->constr)
2090  ExecConstraints(resultRelInfo, slot, estate);
2091 
2092  /*
2093  * replace the heap tuple
2094  *
2095  * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
2096  * the row to be updated is visible to that snapshot, and throw a
2097  * can't-serialize error if not. This is a special-case behavior needed
2098  * for referential integrity updates in transaction-snapshot mode
2099  * transactions.
2100  */
2101  result = table_tuple_update(resultRelationDesc, tupleid, slot,
2102  estate->es_output_cid,
2103  estate->es_snapshot,
2104  estate->es_crosscheck_snapshot,
2105  true /* wait for commit */ ,
2106  &context->tmfd, &updateCxt->lockmode,
2107  &updateCxt->updateIndexes);
2108  if (result == TM_Ok)
2109  updateCxt->updated = true;
2110 
2111  return result;
2112 }
2113 
2114 /*
2115  * ExecUpdateEpilogue -- subroutine for ExecUpdate
2116  *
2117  * Closing steps of updating a tuple. Must be called if ExecUpdateAct
2118  * returns indicating that the tuple was updated.
2119  */
2120 static void
2122  ResultRelInfo *resultRelInfo, ItemPointer tupleid,
2123  HeapTuple oldtuple, TupleTableSlot *slot)
2124 {
2125  ModifyTableState *mtstate = context->mtstate;
2126  List *recheckIndexes = NIL;
2127 
2128  /* insert index entries for tuple if necessary */
2129  if (resultRelInfo->ri_NumIndices > 0 && (updateCxt->updateIndexes != TU_None))
2130  recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
2131  slot, context->estate,
2132  true, false,
2133  NULL, NIL,
2134  (updateCxt->updateIndexes == TU_Summarizing));
2135 
2136  /* AFTER ROW UPDATE Triggers */
2137  ExecARUpdateTriggers(context->estate, resultRelInfo,
2138  NULL, NULL,
2139  tupleid, oldtuple, slot,
2140  recheckIndexes,
2141  mtstate->operation == CMD_INSERT ?
2142  mtstate->mt_oc_transition_capture :
2143  mtstate->mt_transition_capture,
2144  false);
2145 
2146  list_free(recheckIndexes);
2147 
2148  /*
2149  * Check any WITH CHECK OPTION constraints from parent views. We are
2150  * required to do this after testing all constraints and uniqueness
2151  * violations per the SQL spec, so we do it after actually updating the
2152  * record in the heap and all indexes.
2153  *
2154  * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
2155  * are looking for at this point.
2156  */
2157  if (resultRelInfo->ri_WithCheckOptions != NIL)
2158  ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo,
2159  slot, context->estate);
2160 }
2161 
2162 /*
2163  * Queues up an update event using the target root partitioned table's
2164  * trigger to check that a cross-partition update hasn't broken any foreign
2165  * keys pointing into it.
2166  */
2167 static void
2169  ResultRelInfo *sourcePartInfo,
2170  ResultRelInfo *destPartInfo,
2171  ItemPointer tupleid,
2172  TupleTableSlot *oldslot,
2173  TupleTableSlot *newslot)
2174 {
2175  ListCell *lc;
2176  ResultRelInfo *rootRelInfo;
2177  List *ancestorRels;
2178 
2179  rootRelInfo = sourcePartInfo->ri_RootResultRelInfo;
2180  ancestorRels = ExecGetAncestorResultRels(context->estate, sourcePartInfo);
2181 
2182  /*
2183  * For any foreign keys that point directly into a non-root ancestors of
2184  * the source partition, we can in theory fire an update event to enforce
2185  * those constraints using their triggers, if we could tell that both the
2186  * source and the destination partitions are under the same ancestor. But
2187  * for now, we simply report an error that those cannot be enforced.
2188  */
2189  foreach(lc, ancestorRels)
2190  {
2191  ResultRelInfo *rInfo = lfirst(lc);
2192  TriggerDesc *trigdesc = rInfo->ri_TrigDesc;
2193  bool has_noncloned_fkey = false;
2194 
2195  /* Root ancestor's triggers will be processed. */
2196  if (rInfo == rootRelInfo)
2197  continue;
2198 
2199  if (trigdesc && trigdesc->trig_update_after_row)
2200  {
2201  for (int i = 0; i < trigdesc->numtriggers; i++)
2202  {
2203  Trigger *trig = &trigdesc->triggers[i];
2204 
2205  if (!trig->tgisclone &&
2207  {
2208  has_noncloned_fkey = true;
2209  break;
2210  }
2211  }
2212  }
2213 
2214  if (has_noncloned_fkey)
2215  ereport(ERROR,
2216  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2217  errmsg("cannot move tuple across partitions when a non-root ancestor of the source partition is directly referenced in a foreign key"),
2218  errdetail("A foreign key points to ancestor \"%s\" but not the root ancestor \"%s\".",
2221  errhint("Consider defining the foreign key on table \"%s\".",
2222  RelationGetRelationName(rootRelInfo->ri_RelationDesc))));
2223  }
2224 
2225  /* Perform the root table's triggers. */
2226  ExecARUpdateTriggers(context->estate,
2227  rootRelInfo, sourcePartInfo, destPartInfo,
2228  tupleid, NULL, newslot, NIL, NULL, true);
2229 }
2230 
2231 /* ----------------------------------------------------------------
2232  * ExecUpdate
2233  *
2234  * note: we can't run UPDATE queries with transactions
2235  * off because UPDATEs are actually INSERTs and our
2236  * scan will mistakenly loop forever, updating the tuple
2237  * it just inserted.. This should be fixed but until it
2238  * is, we don't want to get stuck in an infinite loop
2239  * which corrupts your database..
2240  *
2241  * When updating a table, tupleid identifies the tuple to
2242  * update and oldtuple is NULL. When updating a view, oldtuple
2243  * is passed to the INSTEAD OF triggers and identifies what to
2244  * update, and tupleid is invalid. When updating a foreign table,
2245  * tupleid is invalid; the FDW has to figure out which row to
2246  * update using data from the planSlot. oldtuple is passed to
2247  * foreign table triggers; it is NULL when the foreign table has
2248  * no relevant triggers.
2249  *
2250  * slot contains the new tuple value to be stored.
2251  * planSlot is the output of the ModifyTable's subplan; we use it
2252  * to access values from other input tables (for RETURNING),
2253  * row-ID junk columns, etc.
2254  *
2255  * Returns RETURNING result if any, otherwise NULL.
2256  * ----------------------------------------------------------------
2257  */
2258 static TupleTableSlot *
2260  ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2261  bool canSetTag)
2262 {
2263  EState *estate = context->estate;
2264  Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2265  UpdateContext updateCxt = {0};
2266  TM_Result result;
2267 
2268  /*
2269  * abort the operation if not running transactions
2270  */
2272  elog(ERROR, "cannot UPDATE during bootstrap");
2273 
2274  /*
2275  * Prepare for the update. This includes BEFORE ROW triggers, so we're
2276  * done if it says we are.
2277  */
2278  if (!ExecUpdatePrologue(context, resultRelInfo, tupleid, oldtuple, slot, NULL))
2279  return NULL;
2280 
2281  /* INSTEAD OF ROW UPDATE Triggers */
2282  if (resultRelInfo->ri_TrigDesc &&
2283  resultRelInfo->ri_TrigDesc->trig_update_instead_row)
2284  {
2285  if (!ExecIRUpdateTriggers(estate, resultRelInfo,
2286  oldtuple, slot))
2287  return NULL; /* "do nothing" */
2288  }
2289  else if (resultRelInfo->ri_FdwRoutine)
2290  {
2291  /* Fill in GENERATEd columns */
2292  ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2293 
2294  /*
2295  * update in foreign table: let the FDW do it
2296  */
2297  slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate,
2298  resultRelInfo,
2299  slot,
2300  context->planSlot);
2301 
2302  if (slot == NULL) /* "do nothing" */
2303  return NULL;
2304 
2305  /*
2306  * AFTER ROW Triggers or RETURNING expressions might reference the
2307  * tableoid column, so (re-)initialize tts_tableOid before evaluating
2308  * them. (This covers the case where the FDW replaced the slot.)
2309  */
2310  slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2311  }
2312  else
2313  {
2314  /*
2315  * If we generate a new candidate tuple after EvalPlanQual testing, we
2316  * must loop back here to try again. (We don't need to redo triggers,
2317  * however. If there are any BEFORE triggers then trigger.c will have
2318  * done table_tuple_lock to lock the correct tuple, so there's no need
2319  * to do them again.)
2320  */
2321 redo_act:
2322  result = ExecUpdateAct(context, resultRelInfo, tupleid, oldtuple, slot,
2323  canSetTag, &updateCxt);
2324 
2325  /*
2326  * If ExecUpdateAct reports that a cross-partition update was done,
2327  * then the RETURNING tuple (if any) has been projected and there's
2328  * nothing else for us to do.
2329  */
2330  if (updateCxt.crossPartUpdate)
2331  return context->cpUpdateReturningSlot;
2332 
2333  switch (result)
2334  {
2335  case TM_SelfModified:
2336 
2337  /*
2338  * The target tuple was already updated or deleted by the
2339  * current command, or by a later command in the current
2340  * transaction. The former case is possible in a join UPDATE
2341  * where multiple tuples join to the same target tuple. This
2342  * is pretty questionable, but Postgres has always allowed it:
2343  * we just execute the first update action and ignore
2344  * additional update attempts.
2345  *
2346  * The latter case arises if the tuple is modified by a
2347  * command in a BEFORE trigger, or perhaps by a command in a
2348  * volatile function used in the query. In such situations we
2349  * should not ignore the update, but it is equally unsafe to
2350  * proceed. We don't want to discard the original UPDATE
2351  * while keeping the triggered actions based on it; and we
2352  * have no principled way to merge this update with the
2353  * previous ones. So throwing an error is the only safe
2354  * course.
2355  *
2356  * If a trigger actually intends this type of interaction, it
2357  * can re-execute the UPDATE (assuming it can figure out how)
2358  * and then return NULL to cancel the outer update.
2359  */
2360  if (context->tmfd.cmax != estate->es_output_cid)
2361  ereport(ERROR,
2362  (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2363  errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2364  errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2365 
2366  /* Else, already updated by self; nothing to do */
2367  return NULL;
2368 
2369  case TM_Ok:
2370  break;
2371 
2372  case TM_Updated:
2373  {
2374  TupleTableSlot *inputslot;
2375  TupleTableSlot *epqslot;
2376  TupleTableSlot *oldSlot;
2377 
2379  ereport(ERROR,
2381  errmsg("could not serialize access due to concurrent update")));
2382 
2383  /*
2384  * Already know that we're going to need to do EPQ, so
2385  * fetch tuple directly into the right slot.
2386  */
2387  inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
2388  resultRelInfo->ri_RangeTableIndex);
2389 
2390  result = table_tuple_lock(resultRelationDesc, tupleid,
2391  estate->es_snapshot,
2392  inputslot, estate->es_output_cid,
2393  updateCxt.lockmode, LockWaitBlock,
2395  &context->tmfd);
2396 
2397  switch (result)
2398  {
2399  case TM_Ok:
2400  Assert(context->tmfd.traversed);
2401 
2402  epqslot = EvalPlanQual(context->epqstate,
2403  resultRelationDesc,
2404  resultRelInfo->ri_RangeTableIndex,
2405  inputslot);
2406  if (TupIsNull(epqslot))
2407  /* Tuple not passing quals anymore, exiting... */
2408  return NULL;
2409 
2410  /* Make sure ri_oldTupleSlot is initialized. */
2411  if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
2413  resultRelInfo);
2414 
2415  /* Fetch the most recent version of old tuple. */
2416  oldSlot = resultRelInfo->ri_oldTupleSlot;
2417  if (!table_tuple_fetch_row_version(resultRelationDesc,
2418  tupleid,
2419  SnapshotAny,
2420  oldSlot))
2421  elog(ERROR, "failed to fetch tuple being updated");
2422  slot = ExecGetUpdateNewTuple(resultRelInfo,
2423  epqslot, oldSlot);
2424  goto redo_act;
2425 
2426  case TM_Deleted:
2427  /* tuple already deleted; nothing to do */
2428  return NULL;
2429 
2430  case TM_SelfModified:
2431 
2432  /*
2433  * This can be reached when following an update
2434  * chain from a tuple updated by another session,
2435  * reaching a tuple that was already updated in
2436  * this transaction. If previously modified by
2437  * this command, ignore the redundant update,
2438  * otherwise error out.
2439  *
2440  * See also TM_SelfModified response to
2441  * table_tuple_update() above.
2442  */
2443  if (context->tmfd.cmax != estate->es_output_cid)
2444  ereport(ERROR,
2445  (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2446  errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2447  errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2448  return NULL;
2449 
2450  default:
2451  /* see table_tuple_lock call in ExecDelete() */
2452  elog(ERROR, "unexpected table_tuple_lock status: %u",
2453  result);
2454  return NULL;
2455  }
2456  }
2457 
2458  break;
2459 
2460  case TM_Deleted:
2462  ereport(ERROR,
2464  errmsg("could not serialize access due to concurrent delete")));
2465  /* tuple already deleted; nothing to do */
2466  return NULL;
2467 
2468  default:
2469  elog(ERROR, "unrecognized table_tuple_update status: %u",
2470  result);
2471  return NULL;
2472  }
2473  }
2474 
2475  if (canSetTag)
2476  (estate->es_processed)++;
2477 
2478  ExecUpdateEpilogue(context, &updateCxt, resultRelInfo, tupleid, oldtuple,
2479  slot);
2480 
2481  /* Process RETURNING if present */
2482  if (resultRelInfo->ri_projectReturning)
2483  return ExecProcessReturning(resultRelInfo, slot, context->planSlot);
2484 
2485  return NULL;
2486 }
2487 
2488 /*
2489  * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE
2490  *
2491  * Try to lock tuple for update as part of speculative insertion. If
2492  * a qual originating from ON CONFLICT DO UPDATE is satisfied, update
2493  * (but still lock row, even though it may not satisfy estate's
2494  * snapshot).
2495  *
2496  * Returns true if we're done (with or without an update), or false if
2497  * the caller must retry the INSERT from scratch.
2498  */
2499 static bool
2501  ResultRelInfo *resultRelInfo,
2502  ItemPointer conflictTid,
2503  TupleTableSlot *excludedSlot,
2504  bool canSetTag,
2505  TupleTableSlot **returning)
2506 {
2507  ModifyTableState *mtstate = context->mtstate;
2508  ExprContext *econtext = mtstate->ps.ps_ExprContext;
2509  Relation relation = resultRelInfo->ri_RelationDesc;
2510  ExprState *onConflictSetWhere = resultRelInfo->ri_onConflict->oc_WhereClause;
2511  TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing;
2512  TM_FailureData tmfd;
2513  LockTupleMode lockmode;
2514  TM_Result test;
2515  Datum xminDatum;
2516  TransactionId xmin;
2517  bool isnull;
2518 
2519  /* Determine lock mode to use */
2520  lockmode = ExecUpdateLockMode(context->estate, resultRelInfo);
2521 
2522  /*
2523  * Lock tuple for update. Don't follow updates when tuple cannot be
2524  * locked without doing so. A row locking conflict here means our
2525  * previous conclusion that the tuple is conclusively committed is not
2526  * true anymore.
2527  */
2528  test = table_tuple_lock(relation, conflictTid,
2529  context->estate->es_snapshot,
2530  existing, context->estate->es_output_cid,
2531  lockmode, LockWaitBlock, 0,
2532  &tmfd);
2533  switch (test)
2534  {
2535  case TM_Ok:
2536  /* success! */
2537  break;
2538 
2539  case TM_Invisible:
2540 
2541  /*
2542  * This can occur when a just inserted tuple is updated again in
2543  * the same command. E.g. because multiple rows with the same
2544  * conflicting key values are inserted.
2545  *
2546  * This is somewhat similar to the ExecUpdate() TM_SelfModified
2547  * case. We do not want to proceed because it would lead to the
2548  * same row being updated a second time in some unspecified order,
2549  * and in contrast to plain UPDATEs there's no historical behavior
2550  * to break.
2551  *
2552  * It is the user's responsibility to prevent this situation from
2553  * occurring. These problems are why the SQL standard similarly
2554  * specifies that for SQL MERGE, an exception must be raised in
2555  * the event of an attempt to update the same row twice.
2556  */
2557  xminDatum = slot_getsysattr(existing,
2559  &isnull);
2560  Assert(!isnull);
2561  xmin = DatumGetTransactionId(xminDatum);
2562 
2564  ereport(ERROR,
2565  (errcode(ERRCODE_CARDINALITY_VIOLATION),
2566  /* translator: %s is a SQL command name */
2567  errmsg("%s command cannot affect row a second time",
2568  "ON CONFLICT DO UPDATE"),
2569  errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
2570 
2571  /* This shouldn't happen */
2572  elog(ERROR, "attempted to lock invisible tuple");
2573  break;
2574 
2575  case TM_SelfModified:
2576 
2577  /*
2578  * This state should never be reached. As a dirty snapshot is used
2579  * to find conflicting tuples, speculative insertion wouldn't have
2580  * seen this row to conflict with.
2581  */
2582  elog(ERROR, "unexpected self-updated tuple");
2583  break;
2584 
2585  case TM_Updated:
2587  ereport(ERROR,
2589  errmsg("could not serialize access due to concurrent update")));
2590 
2591  /*
2592  * As long as we don't support an UPDATE of INSERT ON CONFLICT for
2593  * a partitioned table we shouldn't reach to a case where tuple to
2594  * be lock is moved to another partition due to concurrent update
2595  * of the partition key.
2596  */
2598 
2599  /*
2600  * Tell caller to try again from the very start.
2601  *
2602  * It does not make sense to use the usual EvalPlanQual() style
2603  * loop here, as the new version of the row might not conflict
2604  * anymore, or the conflicting tuple has actually been deleted.
2605  */
2606  ExecClearTuple(existing);
2607  return false;
2608 
2609  case TM_Deleted:
2611  ereport(ERROR,
2613  errmsg("could not serialize access due to concurrent delete")));
2614 
2615  /* see TM_Updated case */
2617  ExecClearTuple(existing);
2618  return false;
2619 
2620  default:
2621  elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
2622  }
2623 
2624  /* Success, the tuple is locked. */
2625 
2626  /*
2627  * Verify that the tuple is visible to our MVCC snapshot if the current
2628  * isolation level mandates that.
2629  *
2630  * It's not sufficient to rely on the check within ExecUpdate() as e.g.
2631  * CONFLICT ... WHERE clause may prevent us from reaching that.
2632  *
2633  * This means we only ever continue when a new command in the current
2634  * transaction could see the row, even though in READ COMMITTED mode the
2635  * tuple will not be visible according to the current statement's
2636  * snapshot. This is in line with the way UPDATE deals with newer tuple
2637  * versions.
2638  */
2639  ExecCheckTupleVisible(context->estate, relation, existing);
2640 
2641  /*
2642  * Make tuple and any needed join variables available to ExecQual and
2643  * ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while
2644  * the target's existing tuple is installed in the scantuple. EXCLUDED
2645  * has been made to reference INNER_VAR in setrefs.c, but there is no
2646  * other redirection.
2647  */
2648  econtext->ecxt_scantuple = existing;
2649  econtext->ecxt_innertuple = excludedSlot;
2650  econtext->ecxt_outertuple = NULL;
2651 
2652  if (!ExecQual(onConflictSetWhere, econtext))
2653  {
2654  ExecClearTuple(existing); /* see return below */
2655  InstrCountFiltered1(&mtstate->ps, 1);
2656  return true; /* done with the tuple */
2657  }
2658 
2659  if (resultRelInfo->ri_WithCheckOptions != NIL)
2660  {
2661  /*
2662  * Check target's existing tuple against UPDATE-applicable USING
2663  * security barrier quals (if any), enforced here as RLS checks/WCOs.
2664  *
2665  * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security
2666  * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK,
2667  * but that's almost the extent of its special handling for ON
2668  * CONFLICT DO UPDATE.
2669  *
2670  * The rewriter will also have associated UPDATE applicable straight
2671  * RLS checks/WCOs for the benefit of the ExecUpdate() call that
2672  * follows. INSERTs and UPDATEs naturally have mutually exclusive WCO
2673  * kinds, so there is no danger of spurious over-enforcement in the
2674  * INSERT or UPDATE path.
2675  */
2677  existing,
2678  mtstate->ps.state);
2679  }
2680 
2681  /* Project the new tuple version */
2682  ExecProject(resultRelInfo->ri_onConflict->oc_ProjInfo);
2683 
2684  /*
2685  * Note that it is possible that the target tuple has been modified in
2686  * this session, after the above table_tuple_lock. We choose to not error
2687  * out in that case, in line with ExecUpdate's treatment of similar cases.
2688  * This can happen if an UPDATE is triggered from within ExecQual(),
2689  * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
2690  * wCTE in the ON CONFLICT's SET.
2691  */
2692 
2693  /* Execute UPDATE with projection */
2694  *returning = ExecUpdate(context, resultRelInfo,
2695  conflictTid, NULL,
2696  resultRelInfo->ri_onConflict->oc_ProjSlot,
2697  canSetTag);
2698 
2699  /*
2700  * Clear out existing tuple, as there might not be another conflict among
2701  * the next input rows. Don't want to hold resources till the end of the
2702  * query.
2703  */
2704  ExecClearTuple(existing);
2705  return true;
2706 }
2707 
2708 /*
2709  * Perform MERGE.
2710  */
2711 static TupleTableSlot *
2712 ExecMerge(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2713  ItemPointer tupleid, bool canSetTag)
2714 {
2715  bool matched;
2716 
2717  /*-----
2718  * If we are dealing with a WHEN MATCHED case (tupleid is valid), we
2719  * execute the first action for which the additional WHEN MATCHED AND
2720  * quals pass. If an action without quals is found, that action is
2721  * executed.
2722  *
2723  * Similarly, if we are dealing with WHEN NOT MATCHED case, we look at
2724  * the given WHEN NOT MATCHED actions in sequence until one passes.
2725  *
2726  * Things get interesting in case of concurrent update/delete of the
2727  * target tuple. Such concurrent update/delete is detected while we are
2728  * executing a WHEN MATCHED action.
2729  *
2730  * A concurrent update can:
2731  *
2732  * 1. modify the target tuple so that it no longer satisfies the
2733  * additional quals attached to the current WHEN MATCHED action
2734  *
2735  * In this case, we are still dealing with a WHEN MATCHED case.
2736  * We recheck the list of WHEN MATCHED actions from the start and
2737  * choose the first one that satisfies the new target tuple.
2738  *
2739  * 2. modify the target tuple so that the join quals no longer pass and
2740  * hence the source tuple no longer has a match.
2741  *
2742  * In this case, the source tuple no longer matches the target tuple,
2743  * so we now instead find a qualifying WHEN NOT MATCHED action to
2744  * execute.
2745  *
2746  * XXX Hmmm, what if the updated tuple would now match one that was
2747  * considered NOT MATCHED so far?
2748  *
2749  * A concurrent delete changes a WHEN MATCHED case to WHEN NOT MATCHED.
2750  *
2751  * ExecMergeMatched takes care of following the update chain and
2752  * re-finding the qualifying WHEN MATCHED action, as long as the updated
2753  * target tuple still satisfies the join quals, i.e., it remains a WHEN
2754  * MATCHED case. If the tuple gets deleted or the join quals fail, it
2755  * returns and we try ExecMergeNotMatched. Given that ExecMergeMatched
2756  * always make progress by following the update chain and we never switch
2757  * from ExecMergeNotMatched to ExecMergeMatched, there is no risk of a
2758  * livelock.
2759  */
2760  matched = tupleid != NULL;
2761  if (matched)
2762  matched = ExecMergeMatched(context, resultRelInfo, tupleid, canSetTag);
2763 
2764  /*
2765  * Either we were dealing with a NOT MATCHED tuple or ExecMergeMatched()
2766  * returned "false", indicating the previously MATCHED tuple no longer
2767  * matches.
2768  */
2769  if (!matched)
2770  ExecMergeNotMatched(context, resultRelInfo, canSetTag);
2771 
2772  /* No RETURNING support yet */
2773  return NULL;
2774 }
2775 
2776 /*
2777  * Check and execute the first qualifying MATCHED action. The current target
2778  * tuple is identified by tupleid.
2779  *
2780  * We start from the first WHEN MATCHED action and check if the WHEN quals
2781  * pass, if any. If the WHEN quals for the first action do not pass, we
2782  * check the second, then the third and so on. If we reach to the end, no
2783  * action is taken and we return true, indicating that no further action is
2784  * required for this tuple.
2785  *
2786  * If we do find a qualifying action, then we attempt to execute the action.
2787  *
2788  * If the tuple is concurrently updated, EvalPlanQual is run with the updated
2789  * tuple to recheck the join quals. Note that the additional quals associated
2790  * with individual actions are evaluated by this routine via ExecQual, while
2791  * EvalPlanQual checks for the join quals. If EvalPlanQual tells us that the
2792  * updated tuple still passes the join quals, then we restart from the first
2793  * action to look for a qualifying action. Otherwise, we return false --
2794  * meaning that a NOT MATCHED action must now be executed for the current
2795  * source tuple.
2796  */
2797 static bool
2799  ItemPointer tupleid, bool canSetTag)
2800 {
2801  ModifyTableState *mtstate = context->mtstate;
2802  TupleTableSlot *newslot;
2803  EState *estate = context->estate;
2804  ExprContext *econtext = mtstate->ps.ps_ExprContext;
2805  bool isNull;
2806  EPQState *epqstate = &mtstate->mt_epqstate;
2807  ListCell *l;
2808 
2809  /*
2810  * If there are no WHEN MATCHED actions, we are done.
2811  */
2812  if (resultRelInfo->ri_matchedMergeAction == NIL)
2813  return true;
2814 
2815  /*
2816  * Make tuple and any needed join variables available to ExecQual and
2817  * ExecProject. The target's existing tuple is installed in the scantuple.
2818  * Again, this target relation's slot is required only in the case of a
2819  * MATCHED tuple and UPDATE/DELETE actions.
2820  */
2821  econtext->ecxt_scantuple = resultRelInfo->ri_oldTupleSlot;
2822  econtext->ecxt_innertuple = context->planSlot;
2823  econtext->ecxt_outertuple = NULL;
2824 
2825 lmerge_matched:
2826 
2827  /*
2828  * This routine is only invoked for matched rows, and we must have found
2829  * the tupleid of the target row in that case; fetch that tuple.
2830  *
2831  * We use SnapshotAny for this because we might get called again after
2832  * EvalPlanQual returns us a new tuple, which may not be visible to our
2833  * MVCC snapshot.
2834  */
2835 
2836  if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
2837  tupleid,
2838  SnapshotAny,
2839  resultRelInfo->ri_oldTupleSlot))
2840  elog(ERROR, "failed to fetch the target tuple");
2841 
2842  foreach(l, resultRelInfo->ri_matchedMergeAction)
2843  {
2844  MergeActionState *relaction = (MergeActionState *) lfirst(l);
2845  CmdType commandType = relaction->mas_action->commandType;
2846  TM_Result result;
2847  UpdateContext updateCxt = {0};
2848 
2849  /*
2850  * Test condition, if any.
2851  *
2852  * In the absence of any condition, we perform the action
2853  * unconditionally (no need to check separately since ExecQual() will
2854  * return true if there are no conditions to evaluate).
2855  */
2856  if (!ExecQual(relaction->mas_whenqual, econtext))
2857  continue;
2858 
2859  /*
2860  * Check if the existing target tuple meets the USING checks of
2861  * UPDATE/DELETE RLS policies. If those checks fail, we throw an
2862  * error.
2863  *
2864  * The WITH CHECK quals for UPDATE RLS policies are applied in
2865  * ExecUpdateAct() and hence we need not do anything special to handle
2866  * them.
2867  *
2868  * NOTE: We must do this after WHEN quals are evaluated, so that we
2869  * check policies only when they matter.
2870  */
2871  if (resultRelInfo->ri_WithCheckOptions && commandType != CMD_NOTHING)
2872  {
2873  ExecWithCheckOptions(commandType == CMD_UPDATE ?
2875  resultRelInfo,
2876  resultRelInfo->ri_oldTupleSlot,
2877  context->mtstate->ps.state);
2878  }
2879 
2880  /* Perform stated action */
2881  switch (commandType)
2882  {
2883  case CMD_UPDATE:
2884 
2885  /*
2886  * Project the output tuple, and use that to update the table.
2887  * We don't need to filter out junk attributes, because the
2888  * UPDATE action's targetlist doesn't have any.
2889  */
2890  newslot = ExecProject(relaction->mas_proj);
2891 
2892  context->relaction = relaction;
2893  if (!ExecUpdatePrologue(context, resultRelInfo,
2894  tupleid, NULL, newslot, &result))
2895  {
2896  if (result == TM_Ok)
2897  return true; /* "do nothing" */
2898  break; /* concurrent update/delete */
2899  }
2900  result = ExecUpdateAct(context, resultRelInfo, tupleid, NULL,
2901  newslot, false, &updateCxt);
2902 
2903  /*
2904  * As in ExecUpdate(), if ExecUpdateAct() reports that a
2905  * cross-partition update was done, then there's nothing else
2906  * for us to do --- the UPDATE has been turned into a DELETE
2907  * and an INSERT, and we must not perform any of the usual
2908  * post-update tasks.
2909  */
2910  if (updateCxt.crossPartUpdate)
2911  {
2912  mtstate->mt_merge_updated += 1;
2913  if (canSetTag)
2914  (estate->es_processed)++;
2915  return true;
2916  }
2917 
2918  if (result == TM_Ok && updateCxt.updated)
2919  {
2920  ExecUpdateEpilogue(context, &updateCxt, resultRelInfo,
2921  tupleid, NULL, newslot);
2922  mtstate->mt_merge_updated += 1;
2923  }
2924  break;
2925 
2926  case CMD_DELETE:
2927  context->relaction = relaction;
2928  if (!ExecDeletePrologue(context, resultRelInfo, tupleid,
2929  NULL, NULL, &result))
2930  {
2931  if (result == TM_Ok)
2932  return true; /* "do nothing" */
2933  break; /* concurrent update/delete */
2934  }
2935  result = ExecDeleteAct(context, resultRelInfo, tupleid, false);
2936  if (result == TM_Ok)
2937  {
2938  ExecDeleteEpilogue(context, resultRelInfo, tupleid, NULL,
2939  false);
2940  mtstate->mt_merge_deleted += 1;
2941  }
2942  break;
2943 
2944  case CMD_NOTHING:
2945  /* Doing nothing is always OK */
2946  result = TM_Ok;
2947  break;
2948 
2949  default:
2950  elog(ERROR, "unknown action in MERGE WHEN MATCHED clause");
2951  }
2952 
2953  switch (result)
2954  {
2955  case TM_Ok:
2956  /* all good; perform final actions */
2957  if (canSetTag && commandType != CMD_NOTHING)
2958  (estate->es_processed)++;
2959 
2960  break;
2961 
2962  case TM_SelfModified:
2963 
2964  /*
2965  * The SQL standard disallows this for MERGE.
2966  */
2968  ereport(ERROR,
2969  (errcode(ERRCODE_CARDINALITY_VIOLATION),
2970  /* translator: %s is a SQL command name */
2971  errmsg("%s command cannot affect row a second time",
2972  "MERGE"),
2973  errhint("Ensure that not more than one source row matches any one target row.")));
2974  /* This shouldn't happen */
2975  elog(ERROR, "attempted to update or delete invisible tuple");
2976  break;
2977 
2978  case TM_Deleted:
2980  ereport(ERROR,
2982  errmsg("could not serialize access due to concurrent delete")));
2983 
2984  /*
2985  * If the tuple was already deleted, return to let caller
2986  * handle it under NOT MATCHED clauses.
2987  */
2988  return false;
2989 
2990  case TM_Updated:
2991  {
2992  Relation resultRelationDesc;
2993  TupleTableSlot *epqslot,
2994  *inputslot;
2995  LockTupleMode lockmode;
2996 
2997  /*
2998  * The target tuple was concurrently updated by some other
2999  * transaction. Run EvalPlanQual() with the new version of
3000  * the tuple. If it does not return a tuple, then we
3001  * switch to the NOT MATCHED list of actions. If it does
3002  * return a tuple and the join qual is still satisfied,
3003  * then we just need to recheck the MATCHED actions,
3004  * starting from the top, and execute the first qualifying
3005  * action.
3006  */
3007  resultRelationDesc = resultRelInfo->ri_RelationDesc;
3008  lockmode = ExecUpdateLockMode(estate, resultRelInfo);
3009 
3010  inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
3011  resultRelInfo->ri_RangeTableIndex);
3012 
3013  result = table_tuple_lock(resultRelationDesc, tupleid,
3014  estate->es_snapshot,
3015  inputslot, estate->es_output_cid,
3016  lockmode, LockWaitBlock,
3018  &context->tmfd);
3019  switch (result)
3020  {
3021  case TM_Ok:
3022  epqslot = EvalPlanQual(epqstate,
3023  resultRelationDesc,
3024  resultRelInfo->ri_RangeTableIndex,
3025  inputslot);
3026 
3027  /*
3028  * If we got no tuple, or the tuple we get has a
3029  * NULL ctid, go back to caller: this one is not a
3030  * MATCHED tuple anymore, so they can retry with
3031  * NOT MATCHED actions.
3032  */
3033  if (TupIsNull(epqslot))
3034  return false;
3035 
3036  (void) ExecGetJunkAttribute(epqslot,
3037  resultRelInfo->ri_RowIdAttNo,
3038  &isNull);
3039  if (isNull)
3040  return false;
3041 
3042  /*
3043  * When a tuple was updated and migrated to
3044  * another partition concurrently, the current
3045  * MERGE implementation can't follow. There's
3046  * probably a better way to handle this case, but
3047  * it'd require recognizing the relation to which
3048  * the tuple moved, and setting our current
3049  * resultRelInfo to that.
3050  */
3052  ereport(ERROR,
3054  errmsg("tuple to be deleted was already moved to another partition due to concurrent update")));
3055 
3056  /*
3057  * A non-NULL ctid means that we are still dealing
3058  * with MATCHED case. Restart the loop so that we
3059  * apply all the MATCHED rules again, to ensure
3060  * that the first qualifying WHEN MATCHED action
3061  * is executed.
3062  *
3063  * Update tupleid to that of the new tuple, for
3064  * the refetch we do at the top.
3065  */
3066  ItemPointerCopy(&context->tmfd.ctid, tupleid);
3067  goto lmerge_matched;
3068 
3069  case TM_Deleted:
3070 
3071  /*
3072  * tuple already deleted; tell caller to run NOT
3073  * MATCHED actions
3074  */
3075  return false;
3076 
3077  case TM_SelfModified:
3078 
3079  /*
3080  * This can be reached when following an update
3081  * chain from a tuple updated by another session,
3082  * reaching a tuple that was already updated in
3083  * this transaction. If previously modified by
3084  * this command, ignore the redundant update,
3085  * otherwise error out.
3086  *
3087  * See also response to TM_SelfModified in
3088  * ExecUpdate().
3089  */
3090  if (context->tmfd.cmax != estate->es_output_cid)
3091  ereport(ERROR,
3092  (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3093  errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3094  errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3095  return false;
3096 
3097  default:
3098  /* see table_tuple_lock call in ExecDelete() */
3099  elog(ERROR, "unexpected table_tuple_lock status: %u",
3100  result);
3101  return false;
3102  }
3103  }
3104 
3105  case TM_Invisible:
3106  case TM_WouldBlock:
3107  case TM_BeingModified:
3108  /* these should not occur */
3109  elog(ERROR, "unexpected tuple operation result: %d", result);
3110  break;
3111  }
3112 
3113  /*
3114  * We've activated one of the WHEN clauses, so we don't search
3115  * further. This is required behaviour, not an optimization.
3116  */
3117  break;
3118  }
3119 
3120  /*
3121  * Successfully executed an action or no qualifying action was found.
3122  */
3123  return true;
3124 }
3125 
3126 /*
3127  * Execute the first qualifying NOT MATCHED action.
3128  */
3129 static void
3131  bool canSetTag)
3132 {
3133  ModifyTableState *mtstate = context->mtstate;
3134  ExprContext *econtext = mtstate->ps.ps_ExprContext;
3135  List *actionStates = NIL;
3136  ListCell *l;
3137 
3138  /*
3139  * For INSERT actions, the root relation's merge action is OK since the
3140  * INSERT's targetlist and the WHEN conditions can only refer to the
3141  * source relation and hence it does not matter which result relation we
3142  * work with.
3143  *
3144  * XXX does this mean that we can avoid creating copies of actionStates on
3145  * partitioned tables, for not-matched actions?
3146  */
3147  actionStates = resultRelInfo->ri_notMatchedMergeAction;
3148 
3149  /*
3150  * Make source tuple available to ExecQual and ExecProject. We don't need
3151  * the target tuple, since the WHEN quals and targetlist can't refer to
3152  * the target columns.
3153  */
3154  econtext->ecxt_scantuple = NULL;
3155  econtext->ecxt_innertuple = context->planSlot;
3156  econtext->ecxt_outertuple = NULL;
3157 
3158  foreach(l, actionStates)
3159  {
3161  CmdType commandType = action->mas_action->commandType;
3162  TupleTableSlot *newslot;
3163 
3164  /*
3165  * Test condition, if any.
3166  *
3167  * In the absence of any condition, we perform the action
3168  * unconditionally (no need to check separately since ExecQual() will
3169  * return true if there are no conditions to evaluate).
3170  */
3171  if (!ExecQual(action->mas_whenqual, econtext))
3172  continue;
3173 
3174  /* Perform stated action */
3175  switch (commandType)
3176  {
3177  case CMD_INSERT:
3178 
3179  /*
3180  * Project the tuple. In case of a partitioned table, the
3181  * projection was already built to use the root's descriptor,
3182  * so we don't need to map the tuple here.
3183  */
3184  newslot = ExecProject(action->mas_proj);
3185  context->relaction = action;
3186 
3187  (void) ExecInsert(context, mtstate->rootResultRelInfo, newslot,
3188  canSetTag, NULL, NULL);
3189  mtstate->mt_merge_inserted += 1;
3190  break;
3191  case CMD_NOTHING:
3192  /* Do nothing */
3193  break;
3194  default:
3195  elog(ERROR, "unknown action in MERGE WHEN NOT MATCHED clause");
3196  }
3197 
3198  /*
3199  * We've activated one of the WHEN clauses, so we don't search
3200  * further. This is required behaviour, not an optimization.
3201  */
3202  break;
3203  }
3204 }
3205 
3206 /*
3207  * Initialize state for execution of MERGE.
3208  */
3209 void
3211 {
3212  ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
3213  ResultRelInfo *rootRelInfo = mtstate->rootResultRelInfo;
3214  ResultRelInfo *resultRelInfo;
3215  ExprContext *econtext;
3216  ListCell *lc;
3217  int i;
3218 
3219  if (node->mergeActionLists == NIL)
3220  return;
3221 
3222  mtstate->mt_merge_subcommands = 0;
3223 
3224  if (mtstate->ps.ps_ExprContext == NULL)
3225  ExecAssignExprContext(estate, &mtstate->ps);
3226  econtext = mtstate->ps.ps_ExprContext;
3227 
3228  /*
3229  * Create a MergeActionState for each action on the mergeActionList and
3230  * add it to either a list of matched actions or not-matched actions.
3231  *
3232  * Similar logic appears in ExecInitPartitionInfo(), so if changing
3233  * anything here, do so there too.
3234  */
3235  i = 0;
3236  foreach(lc, node->mergeActionLists)
3237  {
3238  List *mergeActionList = lfirst(lc);
3239  TupleDesc relationDesc;
3240  ListCell *l;
3241 
3242  resultRelInfo = mtstate->resultRelInfo + i;
3243  i++;
3244  relationDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3245 
3246  /* initialize slots for MERGE fetches from this rel */
3247  if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
3248  ExecInitMergeTupleSlots(mtstate, resultRelInfo);
3249 
3250  foreach(l, mergeActionList)
3251  {
3253  MergeActionState *action_state;
3254  TupleTableSlot *tgtslot;
3255  TupleDesc tgtdesc;
3256  List **list;
3257 
3258  /*
3259  * Build action merge state for this rel. (For partitions,
3260  * equivalent code exists in ExecInitPartitionInfo.)
3261  */
3262  action_state = makeNode(MergeActionState);
3263  action_state->mas_action = action;
3264  action_state->mas_whenqual = ExecInitQual((List *) action->qual,
3265  &mtstate->ps);
3266 
3267  /*
3268  * We create two lists - one for WHEN MATCHED actions and one for
3269  * WHEN NOT MATCHED actions - and stick the MergeActionState into
3270  * the appropriate list.
3271  */
3272  if (action_state->mas_action->matched)
3273  list = &resultRelInfo->ri_matchedMergeAction;
3274  else
3275  list = &resultRelInfo->ri_notMatchedMergeAction;
3276  *list = lappend(*list, action_state);
3277 
3278  switch (action->commandType)
3279  {
3280  case CMD_INSERT:
3281  ExecCheckPlanOutput(rootRelInfo->ri_RelationDesc,
3282  action->targetList);
3283 
3284  /*
3285  * If the MERGE targets a partitioned table, any INSERT
3286  * actions must be routed through it, not the child
3287  * relations. Initialize the routing struct and the root
3288  * table's "new" tuple slot for that, if not already done.
3289  * The projection we prepare, for all relations, uses the
3290  * root relation descriptor, and targets the plan's root
3291  * slot. (This is consistent with the fact that we
3292  * checked the plan output to match the root relation,
3293  * above.)
3294  */
3295  if (rootRelInfo->ri_RelationDesc->rd_rel->relkind ==
3296  RELKIND_PARTITIONED_TABLE)
3297  {
3298  if (mtstate->mt_partition_tuple_routing == NULL)
3299  {
3300  /*
3301  * Initialize planstate for routing if not already
3302  * done.
3303  *
3304  * Note that the slot is managed as a standalone
3305  * slot belonging to ModifyTableState, so we pass
3306  * NULL for the 2nd argument.
3307  */
3308  mtstate->mt_root_tuple_slot =
3309  table_slot_create(rootRelInfo->ri_RelationDesc,
3310  NULL);
3311  mtstate->mt_partition_tuple_routing =
3313  rootRelInfo->ri_RelationDesc);
3314  }
3315  tgtslot = mtstate->mt_root_tuple_slot;
3316  tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
3317  }
3318  else
3319  {
3320  /* not partitioned? use the stock relation and slot */
3321  tgtslot = resultRelInfo->ri_newTupleSlot;
3322  tgtdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3323  }
3324 
3325  action_state->mas_proj =
3326  ExecBuildProjectionInfo(action->targetList, econtext,
3327  tgtslot,
3328  &mtstate->ps,
3329  tgtdesc);
3330 
3331  mtstate->mt_merge_subcommands |= MERGE_INSERT;
3332  break;
3333  case CMD_UPDATE:
3334  action_state->mas_proj =
3335  ExecBuildUpdateProjection(action->targetList,
3336  true,
3337  action->updateColnos,
3338  relationDesc,
3339  econtext,
3340  resultRelInfo->ri_newTupleSlot,
3341  &mtstate->ps);
3342  mtstate->mt_merge_subcommands |= MERGE_UPDATE;
3343  break;
3344  case CMD_DELETE:
3345  mtstate->mt_merge_subcommands |= MERGE_DELETE;
3346  break;
3347  case CMD_NOTHING:
3348  break;
3349  default:
3350  elog(ERROR, "unknown operation");
3351  break;
3352  }
3353  }
3354  }
3355 }
3356 
3357 /*
3358  * Initializes the tuple slots in a ResultRelInfo for any MERGE action.
3359  *
3360  * We mark 'projectNewInfoValid' even though the projections themselves
3361  * are not initialized here.
3362  */
3363 void
3365  ResultRelInfo *resultRelInfo)
3366 {
3367  EState *estate = mtstate->ps.state;
3368 
3369  Assert(!resultRelInfo->ri_projectNewInfoValid);
3370 
3371  resultRelInfo->ri_oldTupleSlot =
3372  table_slot_create(resultRelInfo->ri_RelationDesc,
3373  &estate->es_tupleTable);
3374  resultRelInfo->ri_newTupleSlot =
3375  table_slot_create(resultRelInfo->ri_RelationDesc,
3376  &estate->es_tupleTable);
3377  resultRelInfo->ri_projectNewInfoValid = true;
3378 }
3379 
3380 /*
3381  * Process BEFORE EACH STATEMENT triggers
3382  */
3383 static void
3385 {
3386  ModifyTable *plan = (ModifyTable *) node->ps.plan;
3387  ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
3388 
3389  switch (node->operation)
3390  {
3391  case CMD_INSERT:
3392  ExecBSInsertTriggers(node->ps.state, resultRelInfo);
3393  if (plan->onConflictAction == ONCONFLICT_UPDATE)
3395  resultRelInfo);
3396  break;
3397  case CMD_UPDATE:
3398  ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
3399  break;
3400  case CMD_DELETE:
3401  ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
3402  break;
3403  case CMD_MERGE:
3404  if (node->mt_merge_subcommands & MERGE_INSERT)
3405  ExecBSInsertTriggers(node->ps.state, resultRelInfo);
3406  if (node->mt_merge_subcommands & MERGE_UPDATE)
3407  ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
3408  if (node->mt_merge_subcommands & MERGE_DELETE)
3409  ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
3410  break;
3411  default:
3412  elog(ERROR, "unknown operation");
3413  break;
3414  }
3415 }
3416 
3417 /*
3418  * Process AFTER EACH STATEMENT triggers
3419  */
3420 static void
3422 {
3423  ModifyTable *plan = (ModifyTable *) node->ps.plan;
3424  ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
3425 
3426  switch (node->operation)
3427  {
3428  case CMD_INSERT:
3429  if (plan->onConflictAction == ONCONFLICT_UPDATE)
3431  resultRelInfo,
3432  node->mt_oc_transition_capture);
3433  ExecASInsertTriggers(node->ps.state, resultRelInfo,
3434  node->mt_transition_capture);
3435  break;
3436  case CMD_UPDATE:
3437  ExecASUpdateTriggers(node->ps.state, resultRelInfo,
3438  node->mt_transition_capture);
3439  break;
3440  case CMD_DELETE:
3441  ExecASDeleteTriggers(node->ps.state, resultRelInfo,
3442  node->mt_transition_capture);
3443  break;
3444  case CMD_MERGE:
3445  if (node->mt_merge_subcommands & MERGE_DELETE)
3446  ExecASDeleteTriggers(node->ps.state, resultRelInfo,
3447  node->mt_transition_capture);
3448  if (node->mt_merge_subcommands & MERGE_UPDATE)
3449  ExecASUpdateTriggers(node->ps.state, resultRelInfo,
3450  node->mt_transition_capture);
3451  if (node->mt_merge_subcommands & MERGE_INSERT)
3452  ExecASInsertTriggers(node->ps.state, resultRelInfo,
3453  node->mt_transition_capture);
3454  break;
3455  default:
3456  elog(ERROR, "unknown operation");
3457  break;
3458  }
3459 }
3460 
3461 /*
3462  * Set up the state needed for collecting transition tuples for AFTER
3463  * triggers.
3464  */
3465 static void
3467 {
3468  ModifyTable *plan = (ModifyTable *) mtstate->ps.plan;
3469  ResultRelInfo *targetRelInfo = mtstate->rootResultRelInfo;
3470 
3471  /* Check for transition tables on the directly targeted relation. */
3472  mtstate->mt_transition_capture =
3473  MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
3474  RelationGetRelid(targetRelInfo->ri_RelationDesc),
3475  mtstate->operation);
3476  if (plan->operation == CMD_INSERT &&
3477  plan->onConflictAction == ONCONFLICT_UPDATE)
3478  mtstate->mt_oc_transition_capture =
3479  MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
3480  RelationGetRelid(targetRelInfo->ri_RelationDesc),
3481  CMD_UPDATE);
3482 }
3483 
3484 /*
3485  * ExecPrepareTupleRouting --- prepare for routing one tuple
3486  *
3487  * Determine the partition in which the tuple in slot is to be inserted,
3488  * and return its ResultRelInfo in *partRelInfo. The return value is
3489  * a slot holding the tuple of the partition rowtype.
3490  *
3491  * This also sets the transition table information in mtstate based on the
3492  * selected partition.
3493  */
3494 static TupleTableSlot *
3496  EState *estate,
3497  PartitionTupleRouting *proute,
3498  ResultRelInfo *targetRelInfo,
3499  TupleTableSlot *slot,
3500  ResultRelInfo **partRelInfo)
3501 {
3502  ResultRelInfo *partrel;
3503  TupleConversionMap *map;
3504 
3505  /*
3506  * Lookup the target partition's ResultRelInfo. If ExecFindPartition does
3507  * not find a valid partition for the tuple in 'slot' then an error is
3508  * raised. An error may also be raised if the found partition is not a
3509  * valid target for INSERTs. This is required since a partitioned table
3510  * UPDATE to another partition becomes a DELETE+INSERT.
3511  */
3512  partrel = ExecFindPartition(mtstate, targetRelInfo, proute, slot, estate);
3513 
3514  /*
3515  * If we're capturing transition tuples, we might need to convert from the
3516  * partition rowtype to root partitioned table's rowtype. But if there
3517  * are no BEFORE triggers on the partition that could change the tuple, we
3518  * can just remember the original unconverted tuple to avoid a needless
3519  * round trip conversion.
3520  */
3521  if (mtstate->mt_transition_capture != NULL)
3522  {
3523  bool has_before_insert_row_trig;
3524 
3525  has_before_insert_row_trig = (partrel->ri_TrigDesc &&
3527 
3529  !has_before_insert_row_trig ? slot : NULL;
3530  }
3531 
3532  /*
3533  * Convert the tuple, if necessary.
3534  */
3535  map = ExecGetRootToChildMap(partrel, estate);
3536  if (map != NULL)
3537  {
3538  TupleTableSlot *new_slot = partrel->ri_PartitionTupleSlot;
3539 
3540  slot = execute_attr_map_slot(map->attrMap, slot, new_slot);
3541  }
3542 
3543  *partRelInfo = partrel;
3544  return slot;
3545 }
3546 
3547 /* ----------------------------------------------------------------
3548  * ExecModifyTable
3549  *
3550  * Perform table modifications as required, and return RETURNING results
3551  * if needed.
3552  * ----------------------------------------------------------------
3553  */
3554 static TupleTableSlot *
3556 {
3557  ModifyTableState *node = castNode(ModifyTableState, pstate);
3558  ModifyTableContext context;
3559  EState *estate = node->ps.state;
3560  CmdType operation = node->operation;
3561  ResultRelInfo *resultRelInfo;
3562  PlanState *subplanstate;
3563  TupleTableSlot *slot;
3564  TupleTableSlot *oldSlot;
3565  ItemPointerData tuple_ctid;
3566  HeapTupleData oldtupdata;
3567  HeapTuple oldtuple;
3568  ItemPointer tupleid;
3569 
3571 
3572  /*
3573  * This should NOT get called during EvalPlanQual; we should have passed a
3574  * subplan tree to EvalPlanQual, instead. Use a runtime test not just
3575  * Assert because this condition is easy to miss in testing. (Note:
3576  * although ModifyTable should not get executed within an EvalPlanQual
3577  * operation, we do have to allow it to be initialized and shut down in
3578  * case it is within a CTE subplan. Hence this test must be here, not in
3579  * ExecInitModifyTable.)
3580  */
3581  if (estate->es_epq_active != NULL)
3582  elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
3583 
3584  /*
3585  * If we've already completed processing, don't try to do more. We need
3586  * this test because ExecPostprocessPlan might call us an extra time, and
3587  * our subplan's nodes aren't necessarily robust against being called
3588  * extra times.
3589  */
3590  if (node->mt_done)
3591  return NULL;
3592 
3593  /*
3594  * On first call, fire BEFORE STATEMENT triggers before proceeding.
3595  */
3596  if (node->fireBSTriggers)
3597  {
3598  fireBSTriggers(node);
3599  node->fireBSTriggers = false;
3600  }
3601 
3602  /* Preload local variables */
3603  resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex;
3604  subplanstate = outerPlanState(node);
3605 
3606  /* Set global context */
3607  context.mtstate = node;
3608  context.epqstate = &node->mt_epqstate;
3609  context.estate = estate;
3610 
3611  /*
3612  * Fetch rows from subplan, and execute the required table modification
3613  * for each row.
3614  */
3615  for (;;)
3616  {
3617  /*
3618  * Reset the per-output-tuple exprcontext. This is needed because
3619  * triggers expect to use that context as workspace. It's a bit ugly
3620  * to do this below the top level of the plan, however. We might need
3621  * to rethink this later.
3622  */
3623  ResetPerTupleExprContext(estate);
3624 
3625  /*
3626  * Reset per-tuple memory context used for processing on conflict and
3627  * returning clauses, to free any expression evaluation storage
3628  * allocated in the previous cycle.
3629  */
3630  if (pstate->ps_ExprContext)
3632 
3633  context.planSlot = ExecProcNode(subplanstate);
3634 
3635  /* No more tuples to process? */
3636  if (TupIsNull(context.planSlot))
3637  break;
3638 
3639  /*
3640  * When there are multiple result relations, each tuple contains a
3641  * junk column that gives the OID of the rel from which it came.
3642  * Extract it and select the correct result relation.
3643  */
3645  {
3646  Datum datum;
3647  bool isNull;
3648  Oid resultoid;
3649 
3650  datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno,
3651  &isNull);
3652  if (isNull)
3653  {
3654  /*
3655  * For commands other than MERGE, any tuples having InvalidOid
3656  * for tableoid are errors. For MERGE, we may need to handle
3657  * them as WHEN NOT MATCHED clauses if any, so do that.
3658  *
3659  * Note that we use the node's toplevel resultRelInfo, not any
3660  * specific partition's.
3661  */
3662  if (operation == CMD_MERGE)
3663  {
3664  EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
3665 
3666  ExecMerge(&context, node->resultRelInfo, NULL, node->canSetTag);
3667  continue; /* no RETURNING support yet */
3668  }
3669 
3670  elog(ERROR, "tableoid is NULL");
3671  }
3672  resultoid = DatumGetObjectId(datum);
3673 
3674  /* If it's not the same as last time, we need to locate the rel */
3675  if (resultoid != node->mt_lastResultOid)
3676  resultRelInfo = ExecLookupResultRelByOid(node, resultoid,
3677  false, true);
3678  }
3679 
3680  /*
3681  * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do
3682  * here is compute the RETURNING expressions.
3683  */
3684  if (resultRelInfo->ri_usesFdwDirectModify)
3685  {
3686  Assert(resultRelInfo->ri_projectReturning);
3687 
3688  /*
3689  * A scan slot containing the data that was actually inserted,
3690  * updated or deleted has already been made available to
3691  * ExecProcessReturning by IterateDirectModify, so no need to
3692  * provide it here.
3693  */
3694  slot = ExecProcessReturning(resultRelInfo, NULL, context.planSlot);
3695 
3696  return slot;
3697  }
3698 
3699  EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
3700  slot = context.planSlot;
3701 
3702  tupleid = NULL;
3703  oldtuple = NULL;
3704 
3705  /*
3706  * For UPDATE/DELETE/MERGE, fetch the row identity info for the tuple
3707  * to be updated/deleted/merged. For a heap relation, that's a TID;
3708  * otherwise we may have a wholerow junk attr that carries the old
3709  * tuple in toto. Keep this in step with the part of
3710  * ExecInitModifyTable that sets up ri_RowIdAttNo.
3711  */
3712  if (operation == CMD_UPDATE || operation == CMD_DELETE ||
3713  operation == CMD_MERGE)
3714  {
3715  char relkind;
3716  Datum datum;
3717  bool isNull;
3718 
3719  relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
3720  if (relkind == RELKIND_RELATION ||
3721  relkind == RELKIND_MATVIEW ||
3722  relkind == RELKIND_PARTITIONED_TABLE)
3723  {
3724  /* ri_RowIdAttNo refers to a ctid attribute */
3725  Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo));
3726  datum = ExecGetJunkAttribute(slot,
3727  resultRelInfo->ri_RowIdAttNo,
3728  &isNull);
3729 
3730  /*
3731  * For commands other than MERGE, any tuples having a null row
3732  * identifier are errors. For MERGE, we may need to handle
3733  * them as WHEN NOT MATCHED clauses if any, so do that.
3734  *
3735  * Note that we use the node's toplevel resultRelInfo, not any
3736  * specific partition's.
3737  */
3738  if (isNull)
3739  {
3740  if (operation == CMD_MERGE)
3741  {
3742  EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
3743 
3744  ExecMerge(&context, node->resultRelInfo, NULL, node->canSetTag);
3745  continue; /* no RETURNING support yet */
3746  }
3747 
3748  elog(ERROR, "ctid is NULL");
3749  }
3750 
3751  tupleid = (ItemPointer) DatumGetPointer(datum);
3752  tuple_ctid = *tupleid; /* be sure we don't free ctid!! */
3753  tupleid = &tuple_ctid;
3754  }
3755 
3756  /*
3757  * Use the wholerow attribute, when available, to reconstruct the
3758  * old relation tuple. The old tuple serves one or both of two
3759  * purposes: 1) it serves as the OLD tuple for row triggers, 2) it
3760  * provides values for any unchanged columns for the NEW tuple of
3761  * an UPDATE, because the subplan does not produce all the columns
3762  * of the target table.
3763  *
3764  * Note that the wholerow attribute does not carry system columns,
3765  * so foreign table triggers miss seeing those, except that we
3766  * know enough here to set t_tableOid. Quite separately from
3767  * this, the FDW may fetch its own junk attrs to identify the row.
3768  *
3769  * Other relevant relkinds, currently limited to views, always
3770  * have a wholerow attribute.
3771  */
3772  else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
3773  {
3774  datum = ExecGetJunkAttribute(slot,
3775  resultRelInfo->ri_RowIdAttNo,
3776  &isNull);
3777  /* shouldn't ever get a null result... */
3778  if (isNull)
3779  elog(ERROR, "wholerow is NULL");
3780 
3781  oldtupdata.t_data = DatumGetHeapTupleHeader(datum);
3782  oldtupdata.t_len =
3784  ItemPointerSetInvalid(&(oldtupdata.t_self));
3785  /* Historically, view triggers see invalid t_tableOid. */
3786  oldtupdata.t_tableOid =
3787  (relkind == RELKIND_VIEW) ? InvalidOid :
3788  RelationGetRelid(resultRelInfo->ri_RelationDesc);
3789 
3790  oldtuple = &oldtupdata;
3791  }
3792  else
3793  {
3794  /* Only foreign tables are allowed to omit a row-ID attr */
3795  Assert(relkind == RELKIND_FOREIGN_TABLE);
3796  }
3797  }
3798 
3799  switch (operation)
3800  {
3801  case CMD_INSERT:
3802  /* Initialize projection info if first time for this table */
3803  if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
3804  ExecInitInsertProjection(node, resultRelInfo);
3805  slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot);
3806  slot = ExecInsert(&context, resultRelInfo, slot,
3807  node->canSetTag, NULL, NULL);
3808  break;
3809 
3810  case CMD_UPDATE:
3811  /* Initialize projection info if first time for this table */
3812  if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
3813  ExecInitUpdateProjection(node, resultRelInfo);
3814 
3815  /*
3816  * Make the new tuple by combining plan's output tuple with
3817  * the old tuple being updated.
3818  */
3819  oldSlot = resultRelInfo->ri_oldTupleSlot;
3820  if (oldtuple != NULL)
3821  {
3822  /* Use the wholerow junk attr as the old tuple. */
3823  ExecForceStoreHeapTuple(oldtuple, oldSlot, false);
3824  }
3825  else
3826  {
3827  /* Fetch the most recent version of old tuple. */
3828  Relation relation = resultRelInfo->ri_RelationDesc;
3829 
3830  if (!table_tuple_fetch_row_version(relation, tupleid,
3831  SnapshotAny,
3832  oldSlot))
3833  elog(ERROR, "failed to fetch tuple being updated");
3834  }
3835  slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot,
3836  oldSlot);
3837  context.relaction = NULL;
3838 
3839  /* Now apply the update. */
3840  slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple,
3841  slot, node->canSetTag);
3842  break;
3843 
3844  case CMD_DELETE:
3845  slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple,
3846  true, false, node->canSetTag, NULL, NULL);
3847  break;
3848 
3849  case CMD_MERGE:
3850  slot = ExecMerge(&context, resultRelInfo, tupleid, node->canSetTag);
3851  break;
3852 
3853  default:
3854  elog(ERROR, "unknown operation");
3855  break;
3856  }
3857 
3858  /*
3859  * If we got a RETURNING result, return it to caller. We'll continue
3860  * the work on next call.
3861  */
3862  if (slot)
3863  return slot;
3864  }
3865 
3866  /*
3867  * Insert remaining tuples for batch insert.
3868  */
3869  if (estate->es_insert_pending_result_relations != NIL)
3870  ExecPendingInserts(estate);
3871 
3872  /*
3873  * We're done, but fire AFTER STATEMENT triggers before exiting.
3874  */
3875  fireASTriggers(node);
3876 
3877  node->mt_done = true;
3878 
3879  return NULL;
3880 }
3881 
3882 /*
3883  * ExecLookupResultRelByOid
3884  * If the table with given OID is among the result relations to be
3885  * updated by the given ModifyTable node, return its ResultRelInfo.
3886  *
3887  * If not found, return NULL if missing_ok, else raise error.
3888  *
3889  * If update_cache is true, then upon successful lookup, update the node's
3890  * one-element cache. ONLY ExecModifyTable may pass true for this.
3891  */
3892 ResultRelInfo *
3894  bool missing_ok, bool update_cache)
3895 {
3896  if (node->mt_resultOidHash)
3897  {
3898  /* Use the pre-built hash table to locate the rel */
3899  MTTargetRelLookup *mtlookup;
3900 
3901  mtlookup = (MTTargetRelLookup *)
3902  hash_search(node->mt_resultOidHash, &resultoid, HASH_FIND, NULL);
3903  if (mtlookup)
3904  {
3905  if (update_cache)
3906  {
3907  node->mt_lastResultOid = resultoid;
3908  node->mt_lastResultIndex = mtlookup->relationIndex;
3909  }
3910  return node->resultRelInfo + mtlookup->relationIndex;
3911  }
3912  }
3913  else
3914  {
3915  /* With few target rels, just search the ResultRelInfo array */
3916  for (int ndx = 0; ndx < node->mt_nrels; ndx++)
3917  {
3918  ResultRelInfo *rInfo = node->resultRelInfo + ndx;
3919 
3920  if (RelationGetRelid(rInfo->ri_RelationDesc) == resultoid)
3921  {
3922  if (update_cache)
3923  {
3924  node->mt_lastResultOid = resultoid;
3925  node->mt_lastResultIndex = ndx;
3926  }
3927  return rInfo;
3928  }
3929  }
3930  }
3931 
3932  if (!missing_ok)
3933  elog(ERROR, "incorrect result relation OID %u", resultoid);
3934  return NULL;
3935 }
3936 
3937 /* ----------------------------------------------------------------
3938  * ExecInitModifyTable
3939  * ----------------------------------------------------------------
3940  */
3942 ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
3943 {
3944  ModifyTableState *mtstate;
3945  Plan *subplan = outerPlan(node);
3946  CmdType operation = node->operation;
3947  int nrels = list_length(node->resultRelations);
3948  ResultRelInfo *resultRelInfo;
3949  List *arowmarks;
3950  ListCell *l;
3951  int i;
3952  Relation rel;
3953 
3954  /* check for unsupported flags */
3955  Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
3956 
3957  /*
3958  * create state structure
3959  */
3960  mtstate = makeNode(ModifyTableState);
3961  mtstate->ps.plan = (Plan *) node;
3962  mtstate->ps.state = estate;
3963  mtstate->ps.ExecProcNode = ExecModifyTable;
3964 
3965  mtstate->operation = operation;
3966  mtstate->canSetTag = node->canSetTag;
3967  mtstate->mt_done = false;
3968 
3969  mtstate->mt_nrels = nrels;
3970  mtstate->resultRelInfo = (ResultRelInfo *)
3971  palloc(nrels * sizeof(ResultRelInfo));
3972 
3973  mtstate->mt_merge_inserted = 0;
3974  mtstate->mt_merge_updated = 0;
3975  mtstate->mt_merge_deleted = 0;
3976 
3977  /*----------
3978  * Resolve the target relation. This is the same as:
3979  *
3980  * - the relation for which we will fire FOR STATEMENT triggers,
3981  * - the relation into whose tuple format all captured transition tuples
3982  * must be converted, and
3983  * - the root partitioned table used for tuple routing.
3984  *
3985  * If it's a partitioned or inherited table, the root partition or
3986  * appendrel RTE doesn't appear elsewhere in the plan and its RT index is
3987  * given explicitly in node->rootRelation. Otherwise, the target relation
3988  * is the sole relation in the node->resultRelations list.
3989  *----------
3990  */
3991  if (node->rootRelation > 0)
3992  {
3994  ExecInitResultRelation(estate, mtstate->rootResultRelInfo,
3995  node->rootRelation);
3996  }
3997  else
3998  {
3999  Assert(list_length(node->resultRelations) == 1);
4000  mtstate->rootResultRelInfo = mtstate->resultRelInfo;
4001  ExecInitResultRelation(estate, mtstate->resultRelInfo,
4002  linitial_int(node->resultRelations));
4003  }
4004 
4005  /* set up epqstate with dummy subplan data for the moment */
4006  EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL,
4007  node->epqParam, node->resultRelations);
4008  mtstate->fireBSTriggers = true;
4009 
4010  /*
4011  * Build state for collecting transition tuples. This requires having a
4012  * valid trigger query context, so skip it in explain-only mode.
4013  */
4014  if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
4015  ExecSetupTransitionCaptureState(mtstate, estate);
4016 
4017  /*
4018  * Open all the result relations and initialize the ResultRelInfo structs.
4019  * (But root relation was initialized above, if it's part of the array.)
4020  * We must do this before initializing the subplan, because direct-modify
4021  * FDWs expect their ResultRelInfos to be available.
4022  */
4023  resultRelInfo = mtstate->resultRelInfo;
4024  i = 0;
4025  foreach(l, node->resultRelations)
4026  {
4027  Index resultRelation = lfirst_int(l);
4028 
4029  if (resultRelInfo != mtstate->rootResultRelInfo)
4030  {
4031  ExecInitResultRelation(estate, resultRelInfo, resultRelation);
4032 
4033  /*
4034  * For child result relations, store the root result relation
4035  * pointer. We do so for the convenience of places that want to
4036  * look at the query's original target relation but don't have the
4037  * mtstate handy.
4038  */
4039  resultRelInfo->ri_RootResultRelInfo = mtstate->rootResultRelInfo;
4040  }
4041 
4042  /* Initialize the usesFdwDirectModify flag */
4043  resultRelInfo->ri_usesFdwDirectModify =
4045 
4046  /*
4047  * Verify result relation is a valid target for the current operation
4048  */
4049  CheckValidResultRel(resultRelInfo, operation);
4050 
4051  resultRelInfo++;
4052  i++;
4053  }
4054 
4055  /*
4056  * Now we may initialize the subplan.
4057  */
4058  outerPlanState(mtstate) = ExecInitNode(subplan, estate, eflags);
4059 
4060  /*
4061  * Do additional per-result-relation initialization.
4062  */
4063  for (i = 0; i < nrels; i++)
4064  {
4065  resultRelInfo = &mtstate->resultRelInfo[i];
4066 
4067  /* Let FDWs init themselves for foreign-table result rels */
4068  if (!resultRelInfo->ri_usesFdwDirectModify &&
4069  resultRelInfo->ri_FdwRoutine != NULL &&
4070  resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL)
4071  {
4072  List *fdw_private = (List *) list_nth(node->fdwPrivLists, i);
4073 
4074  resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate,
4075  resultRelInfo,
4076  fdw_private,
4077  i,
4078  eflags);
4079  }
4080 
4081  /*
4082  * For UPDATE/DELETE/MERGE, find the appropriate junk attr now, either
4083  * a 'ctid' or 'wholerow' attribute depending on relkind. For foreign
4084  * tables, the FDW might have created additional junk attr(s), but
4085  * those are no concern of ours.
4086  */
4087  if (operation == CMD_UPDATE || operation == CMD_DELETE ||
4088  operation == CMD_MERGE)
4089  {
4090  char relkind;
4091 
4092  relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
4093  if (relkind == RELKIND_RELATION ||
4094  relkind == RELKIND_MATVIEW ||
4095  relkind == RELKIND_PARTITIONED_TABLE)
4096  {
4097  resultRelInfo->ri_RowIdAttNo =
4098  ExecFindJunkAttributeInTlist(subplan->targetlist, "ctid");
4099  if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4100  elog(ERROR, "could not find junk ctid column");
4101  }
4102  else if (relkind == RELKIND_FOREIGN_TABLE)
4103  {
4104  /*
4105  * We don't support MERGE with foreign tables for now. (It's
4106  * problematic because the implementation uses CTID.)
4107  */
4108  Assert(operation != CMD_MERGE);
4109 
4110  /*
4111  * When there is a row-level trigger, there should be a
4112  * wholerow attribute. We also require it to be present in
4113  * UPDATE and MERGE, so we can get the values of unchanged
4114  * columns.
4115  */
4116  resultRelInfo->ri_RowIdAttNo =
4118  "wholerow");
4119  if ((mtstate->operation == CMD_UPDATE || mtstate->operation == CMD_MERGE) &&
4120  !AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4121  elog(ERROR, "could not find junk wholerow column");
4122  }
4123  else
4124  {
4125  /* No support for MERGE */
4126  Assert(operation != CMD_MERGE);
4127  /* Other valid target relkinds must provide wholerow */
4128  resultRelInfo->ri_RowIdAttNo =
4130  "wholerow");
4131  if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4132  elog(ERROR, "could not find junk wholerow column");
4133  }
4134  }
4135  }
4136 
4137  /*
4138  * If this is an inherited update/delete/merge, there will be a junk
4139  * attribute named "tableoid" present in the subplan's targetlist. It
4140  * will be used to identify the result relation for a given tuple to be
4141  * updated/deleted/merged.
4142  */
4143  mtstate->mt_resultOidAttno =
4144  ExecFindJunkAttributeInTlist(subplan->targetlist, "tableoid");
4145  Assert(AttributeNumberIsValid(mtstate->mt_resultOidAttno) || nrels == 1);
4146  mtstate->mt_lastResultOid = InvalidOid; /* force lookup at first tuple */
4147  mtstate->mt_lastResultIndex = 0; /* must be zero if no such attr */
4148 
4149  /* Get the root target relation */
4150  rel = mtstate->rootResultRelInfo->ri_RelationDesc;
4151 
4152  /*
4153  * Build state for tuple routing if it's a partitioned INSERT. An UPDATE
4154  * or MERGE might need this too, but only if it actually moves tuples
4155  * between partitions; in that case setup is done by
4156  * ExecCrossPartitionUpdate.
4157  */
4158  if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
4159  operation == CMD_INSERT)
4160  mtstate->mt_partition_tuple_routing =
4161  ExecSetupPartitionTupleRouting(estate, rel);
4162 
4163  /*
4164  * Initialize any WITH CHECK OPTION constraints if needed.
4165  */
4166  resultRelInfo = mtstate->resultRelInfo;
4167  foreach(l, node->withCheckOptionLists)
4168  {
4169  List *wcoList = (List *) lfirst(l);
4170  List *wcoExprs = NIL;
4171  ListCell *ll;
4172 
4173  foreach(ll, wcoList)
4174  {
4175  WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
4176  ExprState *wcoExpr = ExecInitQual((List *) wco->qual,
4177  &mtstate->ps);
4178 
4179  wcoExprs = lappend(wcoExprs, wcoExpr);
4180  }
4181 
4182  resultRelInfo->ri_WithCheckOptions = wcoList;
4183  resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
4184  resultRelInfo++;
4185  }
4186 
4187  /*
4188  * Initialize RETURNING projections if needed.
4189  */
4190  if (node->returningLists)
4191  {
4192  TupleTableSlot *slot;
4193  ExprContext *econtext;
4194 
4195  /*
4196  * Initialize result tuple slot and assign its rowtype using the first
4197  * RETURNING list. We assume the rest will look the same.
4198  */
4199  mtstate->ps.plan->targetlist = (List *) linitial(node->returningLists);
4200 
4201  /* Set up a slot for the output of the RETURNING projection(s) */
4203  slot = mtstate->ps.ps_ResultTupleSlot;
4204 
4205  /* Need an econtext too */
4206  if (mtstate->ps.ps_ExprContext == NULL)
4207  ExecAssignExprContext(estate, &mtstate->ps);
4208  econtext = mtstate->ps.ps_ExprContext;
4209 
4210  /*
4211  * Build a projection for each result rel.
4212  */
4213  resultRelInfo = mtstate->resultRelInfo;
4214  foreach(l, node->returningLists)
4215  {
4216  List *rlist = (List *) lfirst(l);
4217 
4218  resultRelInfo->ri_returningList = rlist;
4219  resultRelInfo->ri_projectReturning =
4220  ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps,
4221  resultRelInfo->ri_RelationDesc->rd_att);
4222  resultRelInfo++;
4223  }
4224  }
4225  else
4226  {
4227  /*
4228  * We still must construct a dummy result tuple type, because InitPlan
4229  * expects one (maybe should change that?).
4230  */
4231  mtstate->ps.plan->targetlist = NIL;
4232  ExecInitResultTypeTL(&mtstate->ps);
4233 
4234  mtstate->ps.ps_ExprContext = NULL;
4235  }
4236 
4237  /* Set the list of arbiter indexes if needed for ON CONFLICT */
4238  resultRelInfo = mtstate->resultRelInfo;
4239  if (node->onConflictAction != ONCONFLICT_NONE)
4240  {
4241  /* insert may only have one relation, inheritance is not expanded */
4242  Assert(nrels == 1);
4243  resultRelInfo->ri_onConflictArbiterIndexes = node->arbiterIndexes;
4244  }
4245 
4246  /*
4247  * If needed, Initialize target list, projection and qual for ON CONFLICT
4248  * DO UPDATE.
4249  */
4250  if (node->onConflictAction == ONCONFLICT_UPDATE)
4251  {
4253  ExprContext *econtext;
4254  TupleDesc relationDesc;
4255 
4256  /* already exists if created by RETURNING processing above */
4257  if (mtstate->ps.ps_ExprContext == NULL)
4258  ExecAssignExprContext(estate, &mtstate->ps);
4259 
4260  econtext = mtstate->ps.ps_ExprContext;
4261  relationDesc = resultRelInfo->ri_RelationDesc->rd_att;
4262 
4263  /* create state for DO UPDATE SET operation */
4264  resultRelInfo->ri_onConflict = onconfl;
4265 
4266  /* initialize slot for the existing tuple */
4267  onconfl->oc_Existing =
4268  table_slot_create(resultRelInfo->ri_RelationDesc,
4269  &mtstate->ps.state->es_tupleTable);
4270 
4271  /*
4272  * Create the tuple slot for the UPDATE SET projection. We want a slot
4273  * of the table's type here, because the slot will be used to insert
4274  * into the table, and for RETURNING processing - which may access
4275  * system attributes.
4276  */
4277  onconfl->oc_ProjSlot =
4278  table_slot_create(resultRelInfo->ri_RelationDesc,
4279  &mtstate->ps.state->es_tupleTable);
4280 
4281  /* build UPDATE SET projection state */
4282  onconfl->oc_ProjInfo =
4284  true,
4285  node->onConflictCols,
4286  relationDesc,
4287  econtext,
4288  onconfl->oc_ProjSlot,
4289  &mtstate->ps);
4290 
4291  /* initialize state to evaluate the WHERE clause, if any */
4292  if (node->onConflictWhere)
4293  {
4294  ExprState *qualexpr;
4295 
4296  qualexpr = ExecInitQual((List *) node->onConflictWhere,
4297  &mtstate->ps);
4298  onconfl->oc_WhereClause = qualexpr;
4299  }
4300  }
4301 
4302  /*
4303  * If we have any secondary relations in an UPDATE or DELETE, they need to
4304  * be treated like non-locked relations in SELECT FOR UPDATE, i.e., the
4305  * EvalPlanQual mechanism needs to be told about them. This also goes for
4306  * the source relations in a MERGE. Locate the relevant ExecRowMarks.
4307  */
4308  arowmarks = NIL;
4309  foreach(l, node->rowMarks)
4310  {
4312  ExecRowMark *erm;
4313  ExecAuxRowMark *aerm;
4314 
4315  /* ignore "parent" rowmarks; they are irrelevant at runtime */
4316  if (rc->isParent)
4317  continue;
4318 
4319  /* Find ExecRowMark and build ExecAuxRowMark */
4320  erm = ExecFindRowMark(estate, rc->rti, false);
4321  aerm = ExecBuildAuxRowMark(erm, subplan->targetlist);
4322  arowmarks = lappend(arowmarks, aerm);
4323  }
4324 
4325  /* For a MERGE command, initialize its state */
4326  if (mtstate->operation == CMD_MERGE)
4327  ExecInitMerge(mtstate, estate);
4328 
4329  EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan, arowmarks);
4330 
4331  /*
4332  * If there are a lot of result relations, use a hash table to speed the
4333  * lookups. If there are not a lot, a simple linear search is faster.
4334  *
4335  * It's not clear where the threshold is, but try 64 for starters. In a
4336  * debugging build, use a small threshold so that we get some test
4337  * coverage of both code paths.
4338  */
4339 #ifdef USE_ASSERT_CHECKING
4340 #define MT_NRELS_HASH 4
4341 #else
4342 #define MT_NRELS_HASH 64
4343 #endif
4344  if (nrels >= MT_NRELS_HASH)
4345  {
4346  HASHCTL hash_ctl;
4347 
4348  hash_ctl.keysize = sizeof(Oid);
4349  hash_ctl.entrysize = sizeof(MTTargetRelLookup);
4350  hash_ctl.hcxt = CurrentMemoryContext;
4351  mtstate->mt_resultOidHash =
4352  hash_create("ModifyTable target hash",
4353  nrels, &hash_ctl,
4355  for (i = 0; i < nrels; i++)
4356  {
4357  Oid hashkey;
4358  MTTargetRelLookup *mtlookup;
4359  bool found;
4360 
4361  resultRelInfo = &mtstate->resultRelInfo[i];
4362  hashkey = RelationGetRelid(resultRelInfo->ri_RelationDesc);
4363  mtlookup = (MTTargetRelLookup *)
4364  hash_search(mtstate->mt_resultOidHash, &hashkey,
4365  HASH_ENTER, &found);
4366  Assert(!found);
4367  mtlookup->relationIndex = i;
4368  }
4369  }
4370  else
4371  mtstate->mt_resultOidHash = NULL;
4372 
4373  /*
4374  * Determine if the FDW supports batch insert and determine the batch size
4375  * (a FDW may support batching, but it may be disabled for the
4376  * server/table).
4377  *
4378  * We only do this for INSERT, so that for UPDATE/DELETE the batch size
4379  * remains set to 0.
4380  */
4381  if (operation == CMD_INSERT)
4382  {
4383  /* insert may only have one relation, inheritance is not expanded */
4384  Assert(nrels == 1);
4385  resultRelInfo = mtstate->resultRelInfo;
4386  if (!resultRelInfo->ri_usesFdwDirectModify &&
4387  resultRelInfo->ri_FdwRoutine != NULL &&
4388  resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize &&
4389  resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert)
4390  {
4391  resultRelInfo->ri_BatchSize =
4392  resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize(resultRelInfo);
4393  Assert(resultRelInfo->ri_BatchSize >= 1);
4394  }
4395  else
4396  resultRelInfo->ri_BatchSize = 1;
4397  }
4398 
4399  /*
4400  * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
4401  * to estate->es_auxmodifytables so that it will be run to completion by
4402  * ExecPostprocessPlan. (It'd actually work fine to add the primary
4403  * ModifyTable node too, but there's no need.) Note the use of lcons not
4404  * lappend: we need later-initialized ModifyTable nodes to be shut down
4405  * before earlier ones. This ensures that we don't throw away RETURNING
4406  * rows that need to be seen by a later CTE subplan.
4407  */
4408  if (!mtstate->canSetTag)
4409  estate->es_auxmodifytables = lcons(mtstate,
4410  estate->es_auxmodifytables);
4411 
4412  return mtstate;
4413 }
4414 
4415 /* ----------------------------------------------------------------
4416  * ExecEndModifyTable
4417  *
4418  * Shuts down the plan.
4419  *
4420  * Returns nothing of interest.
4421  * ----------------------------------------------------------------
4422  */
4423 void
4425 {
4426  int i;
4427 
4428  /*
4429  * Allow any FDWs to shut down
4430  */
4431  for (i = 0; i < node->mt_nrels; i++)
4432  {
4433  int j;
4434  ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
4435 
4436  if (!resultRelInfo->ri_usesFdwDirectModify &&
4437  resultRelInfo->ri_FdwRoutine != NULL &&
4438  resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL)
4439  resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state,
4440  resultRelInfo);
4441 
4442  /*
4443  * Cleanup the initialized batch slots. This only matters for FDWs
4444  * with batching, but the other cases will have ri_NumSlotsInitialized
4445  * == 0.
4446  */
4447  for (j = 0; j < resultRelInfo->ri_NumSlotsInitialized; j++)
4448  {
4449  ExecDropSingleTupleTableSlot(resultRelInfo->ri_Slots[j]);
4450  ExecDropSingleTupleTableSlot(resultRelInfo->ri_PlanSlots[j]);
4451  }
4452  }
4453 
4454  /*
4455  * Close all the partitioned tables, leaf partitions, and their indices
4456  * and release the slot used for tuple routing, if set.
4457  */
4458  if (node->mt_partition_tuple_routing)
4459  {
4461 
4462  if (node->mt_root_tuple_slot)
4464  }
4465 
4466  /*
4467  * Terminate EPQ execution if active
4468  */
4469  EvalPlanQualEnd(&node->mt_epqstate);
4470 
4471  /*
4472  * shut down subplan
4473  */
4474  ExecEndNode(outerPlanState(node));
4475 }
4476 
4477 void
4479 {
4480  /*
4481  * Currently, we don't need to support rescan on ModifyTable nodes. The
4482  * semantics of that would be a bit debatable anyway.
4483  */
4484  elog(ERROR, "ExecReScanModifyTable is not implemented");
4485 }
#define AttributeNumberIsValid(attributeNumber)
Definition: attnum.h:34
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:460
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:753
bool bms_overlap(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:527
static Datum values[MAXATTR]
Definition: bootstrap.c:156
unsigned int uint32
Definition: c.h:495
#define unlikely(x)
Definition: c.h:300
unsigned int Index
Definition: c.h:603
uint32 TransactionId
Definition: c.h:641
Datum datumCopy(Datum value, bool typByVal, int typLen)
Definition: datum.c:132
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:953
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:350
int errdetail(const char *fmt,...)
Definition: elog.c:1202
int errhint(const char *fmt,...)
Definition: elog.c:1316
int errcode(int sqlerrcode)
Definition: elog.c:858
int errmsg(const char *fmt,...)
Definition: elog.c:1069
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
ExprState * ExecPrepareExpr(Expr *node, EState *estate)
Definition: execExpr.c:736
ProjectionInfo * ExecBuildUpdateProjection(List *targetList, bool evalTargetList, List *targetColnos, TupleDesc relDesc, ExprContext *econtext, TupleTableSlot *slot, PlanState *parent)
Definition: execExpr.c:518
ExprState * ExecInitQual(List *qual, PlanState *parent)
Definition: execExpr.c:214
ProjectionInfo * ExecBuildProjectionInfo(List *targetList, ExprContext *econtext, TupleTableSlot *slot, PlanState *parent, TupleDesc inputDesc)
Definition: execExpr.c:358
bool ExecCheckIndexConstraints(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate, ItemPointer conflictTid, List *arbiterIndexes)
Definition: execIndexing.c:527
List * ExecInsertIndexTuples(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate, bool update, bool noDupErr, bool *specConflict, List *arbiterIndexes, bool onlySummarizing)
Definition: execIndexing.c:298
void ExecOpenIndices(ResultRelInfo *resultRelInfo, bool speculative)
Definition: execIndexing.c:156
AttrNumber ExecFindJunkAttributeInTlist(List *targetlist, const char *attrName)
Definition: execJunk.c:222
LockTupleMode ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo)
Definition: execMain.c:2375
void EvalPlanQualBegin(EPQState *epqstate)
Definition: execMain.c:2775
TupleTableSlot * EvalPlanQual(EPQState *epqstate, Relation relation, Index rti, TupleTableSlot *inputslot)
Definition: execMain.c:2494
bool ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate, bool emitError)
Definition: execMain.c:1816
void EvalPlanQualInit(EPQState *epqstate, EState *parentestate, Plan *subplan, List *auxrowmarks, int epqParam, List *resultRelations)
Definition: execMain.c:2563
void ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate)
Definition: execMain.c:2075
ExecAuxRowMark * ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
Definition: execMain.c:2424
void EvalPlanQualEnd(EPQState *epqstate)
Definition: execMain.c:3006
void EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks)
Definition: execMain.c:2605
ExecRowMark * ExecFindRowMark(EState *estate, Index rti, bool missing_ok)
Definition: execMain.c:2401
List * ExecGetAncestorResultRels(EState *estate, ResultRelInfo *resultRelInfo)
Definition: execMain.c:1396
TupleTableSlot * EvalPlanQualSlot(EPQState *epqstate, Relation relation, Index rti)
Definition: execMain.c:2622
void ExecPartitionCheckEmitError(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate)
Definition: execMain.c:1869
void ExecConstraints(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate)
Definition: execMain.c:1940
void CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation)
Definition: execMain.c:1024
ResultRelInfo * ExecFindPartition(ModifyTableState *mtstate, ResultRelInfo *rootResultRelInfo, PartitionTupleRouting *proute, TupleTableSlot *slot, EState *estate)
PartitionTupleRouting * ExecSetupPartitionTupleRouting(EState *estate, Relation rel)
void ExecCleanupTupleRouting(ModifyTableState *mtstate, PartitionTupleRouting *proute)
void ExecEndNode(PlanState *node)
Definition: execProcnode.c:557
PlanState * ExecInitNode(Plan *node, EState *estate, int eflags)
Definition: execProcnode.c:142
const TupleTableSlotOps TTSOpsVirtual
Definition: execTuples.c:83
TupleTableSlot * ExecStoreVirtualTuple(TupleTableSlot *slot)
Definition: execTuples.c:1553
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
Definition: execTuples.c:1255
TupleTableSlot * ExecStoreAllNullTuple(TupleTableSlot *slot)
Definition: execTuples.c:1577
void ExecInitResultTypeTL(PlanState *planstate)
Definition: execTuples.c:1756
void ExecInitResultTupleSlotTL(PlanState *planstate, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1800
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1239
void ExecForceStoreHeapTuple(HeapTuple tuple, TupleTableSlot *slot, bool shouldFree)
Definition: execTuples.c:1470
Bitmapset * ExecGetUpdatedCols(ResultRelInfo *relinfo, EState *estate)
Definition: execUtils.c:1293
TupleTableSlot * ExecGetReturningSlot(EState *estate, ResultRelInfo *relInfo)
Definition: execUtils.c:1187
void ExecInitResultRelation(EState *estate, ResultRelInfo *resultRelInfo, Index rti)
Definition: execUtils.c:819
void ExecAssignExprContext(EState *estate, PlanState *planstate)
Definition: execUtils.c:488
TupleConversionMap * ExecGetRootToChildMap(ResultRelInfo *resultRelInfo, EState *estate)
Definition: execUtils.c:1237
TupleConversionMap * ExecGetChildToRootMap(ResultRelInfo *resultRelInfo)
Definition: execUtils.c:1211
#define MERGE_UPDATE
Definition: execnodes.h:1265
#define InstrCountFiltered1(node, delta)
Definition: execnodes.h:1140
#define outerPlanState(node)
Definition: execnodes.h:1132
#define InstrCountTuples2(node, delta)
Definition: execnodes.h:1135
#define MERGE_INSERT
Definition: execnodes.h:1264
#define MERGE_DELETE
Definition: execnodes.h:1266
#define EXEC_FLAG_BACKWARD
Definition: executor.h:68
#define ResetPerTupleExprContext(estate)
Definition: executor.h:558
#define GetPerTupleExprContext(estate)
Definition: executor.h:549
static TupleTableSlot * ExecProject(ProjectionInfo *projInfo)
Definition: executor.h:375
#define ResetExprContext(econtext)
Definition: executor.h:543
#define GetPerTupleMemoryContext(estate)
Definition: executor.h:554
static bool ExecQual(ExprState *state, ExprContext *econtext)
Definition: executor.h:412
#define EvalPlanQualSetSlot(epqstate, slot)
Definition: executor.h:243
static Datum ExecEvalExpr(ExprState *state, ExprContext *econtext, bool *isNull)
Definition: executor.h:332
#define EXEC_FLAG_EXPLAIN_ONLY
Definition: executor.h:65
static Datum ExecGetJunkAttribute(TupleTableSlot *slot, AttrNumber attno, bool *isNull)
Definition: executor.h:190
#define EXEC_FLAG_MARK
Definition: executor.h:69
static TupleTableSlot * ExecProcNode(PlanState *node)
Definition: executor.h:268
#define DatumGetHeapTupleHeader(X)
Definition: fmgr.h:295
char * format_type_be(Oid type_oid)
Definition: format_type.c:343
@ HASH_FIND
Definition: hsearch.h:113
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_CONTEXT
Definition: hsearch.h:102
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_BLOBS
Definition: hsearch.h:97
#define HeapTupleHeaderGetDatumLength(tup)
Definition: htup_details.h:450
long val
Definition: informix.c:664
int j
Definition: isn.c:74
int i
Definition: isn.c:73
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:77
static void ItemPointerSetInvalid(ItemPointerData *pointer)
Definition: itemptr.h:184
static bool ItemPointerIndicatesMovedPartitions(const ItemPointerData *pointer)
Definition: itemptr.h:197
ItemPointerData * ItemPointer
Definition: itemptr.h:49
static void ItemPointerCopy(const ItemPointerData *fromPointer, ItemPointerData *toPointer)
Definition: itemptr.h:172
Assert(fmt[strlen(fmt) - 1] !='\n')
List * lappend(List *list, void *datum)
Definition: list.c:338
bool list_member_ptr(const List *list, const void *datum)
Definition: list.c:681
void list_free(List *list)
Definition: list.c:1545
List * lcons(void *datum, List *list)
Definition: list.c:494
uint32 SpeculativeInsertionLockAcquire(TransactionId xid)
Definition: lmgr.c:783
void SpeculativeInsertionLockRelease(TransactionId xid)
Definition: lmgr.c:809
@ LockWaitBlock
Definition: lockoptions.h:39
LockTupleMode
Definition: lockoptions.h:50
@ LockTupleExclusive
Definition: lockoptions.h:58
void * palloc0(Size size)
Definition: mcxt.c:1257
MemoryContext CurrentMemoryContext
Definition: mcxt.c:135
void * palloc(Size size)
Definition: mcxt.c:1226
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:417
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:121
Oid exprType(const Node *expr)
Definition: nodeFuncs.c:43
static void ExecInitInsertProjection(ModifyTableState *mtstate, ResultRelInfo *resultRelInfo)
static void ExecPendingInserts(EState *estate)
static void ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
void ExecInitMergeTupleSlots(ModifyTableState *mtstate, ResultRelInfo *resultRelInfo)
static TupleTableSlot * ExecMerge(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, bool canSetTag)
struct ModifyTableContext ModifyTableContext
static void ExecUpdatePrepareSlot(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate)
static TupleTableSlot * ExecInsert(ModifyTableContext *context, ResultRelInfo *resultRelInfo, TupleTableSlot *slot, bool canSetTag, TupleTableSlot **inserted_tuple, ResultRelInfo **insert_destrel)
void ExecInitStoredGenerated(ResultRelInfo *resultRelInfo, EState *estate, CmdType cmdtype)
static void ExecMergeNotMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo, bool canSetTag)
static bool ExecDeletePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot **epqreturnslot, TM_Result *result)
static void ExecCheckPlanOutput(Relation resultRel, List *targetList)
static TupleTableSlot * ExecModifyTable(PlanState *pstate)
static void ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context, ResultRelInfo *sourcePartInfo, ResultRelInfo *destPartInfo, ItemPointer tupleid, TupleTableSlot *oldslot, TupleTableSlot *newslot)
TupleTableSlot * ExecGetUpdateNewTuple(ResultRelInfo *relinfo, TupleTableSlot *planSlot, TupleTableSlot *oldSlot)
static void ExecInitUpdateProjection(ModifyTableState *mtstate, ResultRelInfo *resultRelInfo)
static void ExecCheckTIDVisible(EState *estate, ResultRelInfo *relinfo, ItemPointer tid, TupleTableSlot *tempSlot)
static TM_Result ExecDeleteAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, bool changingPart)
static void ExecCheckTupleVisible(EState *estate, Relation rel, TupleTableSlot *slot)
static TupleTableSlot * ExecGetInsertNewTuple(ResultRelInfo *relinfo, TupleTableSlot *planSlot)
void ExecComputeStoredGenerated(ResultRelInfo *resultRelInfo, EState *estate, TupleTableSlot *slot, CmdType cmdtype)
static TupleTableSlot * ExecPrepareTupleRouting(ModifyTableState *mtstate, EState *estate, PartitionTupleRouting *proute, ResultRelInfo *targetRelInfo, TupleTableSlot *slot, ResultRelInfo **partRelInfo)
struct MTTargetRelLookup MTTargetRelLookup
static TupleTableSlot * ExecUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot, bool canSetTag)
ResultRelInfo * ExecLookupResultRelByOid(ModifyTableState *node, Oid resultoid, bool missing_ok, bool update_cache)
struct UpdateContext UpdateContext
#define MT_NRELS_HASH
static TM_Result ExecUpdateAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot, bool canSetTag, UpdateContext *updateCxt)
static void ExecUpdateEpilogue(ModifyTableContext *context, UpdateContext *updateCxt, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot)
static bool ExecCrossPartitionUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot, bool canSetTag, UpdateContext *updateCxt, TupleTableSlot **retry_slot, TupleTableSlot **inserted_tuple, ResultRelInfo **insert_destrel)
static void fireBSTriggers(ModifyTableState *node)
static TupleTableSlot * ExecProcessReturning(ResultRelInfo *resultRelInfo, TupleTableSlot *tupleSlot, TupleTableSlot *planSlot)
void ExecReScanModifyTable(ModifyTableState *node)
void ExecEndModifyTable(ModifyTableState *node)
ModifyTableState * ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
static void fireASTriggers(ModifyTableState *node)
static bool ExecOnConflictUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer conflictTid, TupleTableSlot *excludedSlot, bool canSetTag, TupleTableSlot **returning)
static void ExecBatchInsert(ModifyTableState *mtstate, ResultRelInfo *resultRelInfo, TupleTableSlot **slots, TupleTableSlot **planSlots, int numSlots, EState *estate, bool canSetTag)
static bool ExecUpdatePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot, TM_Result *result)
static void ExecDeleteEpilogue(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, bool changingPart)
static bool ExecMergeMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, bool canSetTag)
static void ExecInitMerge(ModifyTableState *mtstate, EState *estate)
static TupleTableSlot * ExecDelete(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, bool processReturning, bool changingPart, bool canSetTag, bool *tupleDeleted, TupleTableSlot **epqreturnslot)
#define IsA(nodeptr, _type_)
Definition: nodes.h:179
OnConflictAction
Definition: nodes.h:427
@ ONCONFLICT_NONE
Definition: nodes.h:428
@ ONCONFLICT_UPDATE
Definition: nodes.h:430
@ ONCONFLICT_NOTHING
Definition: nodes.h:429
CmdType
Definition: nodes.h:274
@ CMD_MERGE
Definition: nodes.h:280
@ CMD_INSERT
Definition: nodes.h:278
@ CMD_DELETE
Definition: nodes.h:279
@ CMD_UPDATE
Definition: nodes.h:277
@ CMD_NOTHING
Definition: nodes.h:283
#define makeNode(_type_)
Definition: nodes.h:176
#define castNode(_type_, nodeptr)
Definition: nodes.h:197
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:138
WCOKind
Definition: parsenodes.h:1305
@ WCO_RLS_MERGE_UPDATE_CHECK
Definition: parsenodes.h:1310
@ WCO_RLS_CONFLICT_CHECK
Definition: parsenodes.h:1309
@ WCO_RLS_INSERT_CHECK
Definition: parsenodes.h:1307
@ WCO_VIEW_CHECK
Definition: parsenodes.h:1306
@ WCO_RLS_UPDATE_CHECK
Definition: parsenodes.h:1308
@ WCO_RLS_MERGE_DELETE_CHECK
Definition: parsenodes.h:1311
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:209
#define lfirst(lc)
Definition: pg_list.h:172
#define lfirst_node(type, lc)
Definition: pg_list.h:176
static int list_length(const List *l)
Definition: pg_list.h:152
#define NIL
Definition: pg_list.h:68
#define forboth(cell1, list1, cell2, list2)
Definition: pg_list.h:467
#define lfirst_int(lc)
Definition: pg_list.h:173
#define linitial_int(l)
Definition: pg_list.h:179
#define linitial(l)
Definition: pg_list.h:178
static void * list_nth(const List *list, int n)
Definition: pg_list.h:299
#define plan(x)
Definition: pg_regress.c:162
#define ERRCODE_T_R_SERIALIZATION_FAILURE
Definition: pgbench.c:76
#define outerPlan(node)
Definition: plannodes.h:182
uintptr_t Datum
Definition: postgres.h:64
static Oid DatumGetObjectId(Datum X)
Definition: postgres.h:242
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:312
static TransactionId DatumGetTransactionId(Datum X)
Definition: postgres.h:262
#define InvalidOid
Definition: postgres_ext.h:36
unsigned int Oid
Definition: postgres_ext.h:31
static void test(void)
#define RelationGetRelid(relation)
Definition: rel.h:504
#define RelationGetDescr(relation)
Definition: rel.h:530
#define RelationGetRelationName(relation)
Definition: rel.h:538
Node * build_column_default(Relation rel, int attrno)
int RI_FKey_trigger_type(Oid tgfoid)
Definition: ri_triggers.c:3010
#define SnapshotAny
Definition: snapmgr.h:33
uint64 es_processed
Definition: execnodes.h:662
List * es_insert_pending_result_relations
Definition: execnodes.h:714
MemoryContext es_query_cxt
Definition: execnodes.h:658
List * es_tupleTable
Definition: execnodes.h:660
struct EPQState * es_epq_active
Definition: execnodes.h:690
CommandId es_output_cid
Definition: execnodes.h:630
List * es_insert_pending_modifytables
Definition: execnodes.h:715
Snapshot es_snapshot
Definition: execnodes.h:615
List * es_auxmodifytables
Definition: execnodes.h:675
Snapshot es_crosscheck_snapshot
Definition: execnodes.h:616
TupleTableSlot * ecxt_innertuple
Definition: execnodes.h:250
TupleTableSlot * ecxt_scantuple
Definition: execnodes.h:248
TupleTableSlot * ecxt_outertuple
Definition: execnodes.h:252
BeginForeignModify_function BeginForeignModify
Definition: fdwapi.h:231
EndForeignModify_function EndForeignModify
Definition: fdwapi.h:237
ExecForeignInsert_function ExecForeignInsert
Definition: fdwapi.h:232
ExecForeignUpdate_function ExecForeignUpdate
Definition: fdwapi.h:235
ExecForeignBatchInsert_function ExecForeignBatchInsert
Definition: fdwapi.h:233
GetForeignModifyBatchSize_function GetForeignModifyBatchSize
Definition: fdwapi.h:234
ExecForeignDelete_function ExecForeignDelete
Definition: fdwapi.h:236
Size keysize
Definition: hsearch.h:75
Size entrysize
Definition: hsearch.h:76
MemoryContext hcxt
Definition: hsearch.h:86
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
Oid t_tableOid
Definition: htup.h:66
Definition: pg_list.h:54
MergeAction * mas_action
Definition: execnodes.h:418
ProjectionInfo * mas_proj
Definition: execnodes.h:419
ExprState * mas_whenqual
Definition: execnodes.h:421
bool matched
Definition: primnodes.h:1738
CmdType commandType
Definition: primnodes.h:1739
MergeActionState * relaction
TM_FailureData tmfd
TupleTableSlot * planSlot
TupleTableSlot * cpUpdateReturningSlot
ModifyTableState * mtstate
CmdType operation
Definition: execnodes.h:1275
ResultRelInfo * resultRelInfo
Definition: execnodes.h:1279
double mt_merge_deleted
Definition: execnodes.h:1324
struct PartitionTupleRouting * mt_partition_tuple_routing
Definition: execnodes.h:1310
double mt_merge_inserted
Definition: execnodes.h:1322
TupleTableSlot * mt_root_tuple_slot
Definition: execnodes.h:1307
EPQState mt_epqstate
Definition: execnodes.h:1289
int mt_merge_subcommands
Definition: execnodes.h:1319
double mt_merge_updated
Definition: execnodes.h:1323
PlanState ps
Definition: execnodes.h:1274
HTAB * mt_resultOidHash
Definition: execnodes.h:1301
ResultRelInfo * rootResultRelInfo
Definition: execnodes.h:1287
struct TransitionCaptureState * mt_transition_capture
Definition: execnodes.h:1313
struct TransitionCaptureState * mt_oc_transition_capture
Definition: execnodes.h:1316
List * updateColnosLists
Definition: plannodes.h:238
List * arbiterIndexes
Definition: plannodes.h:246
List * onConflictCols
Definition: plannodes.h:248
CmdType operation
Definition: plannodes.h:232
int epqParam
Definition: plannodes.h:244
List * resultRelations
Definition: plannodes.h:237
Bitmapset * fdwDirectModifyPlans
Definition: plannodes.h:242
List * onConflictSet
Definition: plannodes.h:247
List * mergeActionLists
Definition: plannodes.h:252
bool canSetTag
Definition: plannodes.h:233
List * fdwPrivLists
Definition: plannodes.h:241
List * returningLists
Definition: plannodes.h:240
List * withCheckOptionLists
Definition: plannodes.h:239
Index rootRelation
Definition: plannodes.h:235
Node * onConflictWhere
Definition: plannodes.h:249
List * rowMarks
Definition: plannodes.h:243
OnConflictAction onConflictAction
Definition: plannodes.h:245
Definition: nodes.h:129
TupleTableSlot * oc_ProjSlot
Definition: execnodes.h:403
TupleTableSlot * oc_Existing
Definition: execnodes.h:402
ExprState * oc_WhereClause
Definition: execnodes.h:405
ProjectionInfo * oc_ProjInfo
Definition: execnodes.h:404
bool isParent
Definition: plannodes.h:1387
Plan * plan
Definition: execnodes.h:1036
EState * state
Definition: execnodes.h:1038
ExprContext * ps_ExprContext
Definition: execnodes.h:1075
TupleTableSlot * ps_ResultTupleSlot
Definition: execnodes.h:1074
ExecProcNodeMtd ExecProcNode
Definition: execnodes.h:1042
List * targetlist
Definition: plannodes.h:152
ExprContext * pi_exprContext
Definition: execnodes.h:357
TriggerDesc * trigdesc
Definition: rel.h:117
TupleDesc rd_att
Definition: rel.h:112
Form_pg_class rd_rel
Definition: rel.h:111
List * ri_matchedMergeAction
Definition: execnodes.h:541
TupleTableSlot * ri_PartitionTupleSlot
Definition: execnodes.h:574
List * ri_notMatchedMergeAction
Definition: execnodes.h:542
bool ri_projectNewInfoValid
Definition: execnodes.h:476
OnConflictSetState * ri_onConflict
Definition: execnodes.h:538
int ri_NumIndices
Definition: execnodes.h:452
List * ri_onConflictArbiterIndexes
Definition: execnodes.h:535
struct ResultRelInfo * ri_RootResultRelInfo
Definition: execnodes.h:573
TupleTableSlot ** ri_Slots
Definition: execnodes.h:508
Relation ri_RelationDesc
Definition: execnodes.h:449
RelationPtr ri_IndexRelationDescs
Definition: execnodes.h:455
int ri_NumSlotsInitialized
Definition: execnodes.h:506
List * ri_WithCheckOptions
Definition: execnodes.h:512
TupleTableSlot * ri_oldTupleSlot
Definition: execnodes.h:474
TriggerDesc * ri_TrigDesc
Definition: execnodes.h:479
Bitmapset * ri_extraUpdatedCols
Definition: execnodes.h:467
Index ri_RangeTableIndex
Definition: execnodes.h:446
ExprState ** ri_GeneratedExprsI
Definition: execnodes.h:521
int ri_NumGeneratedNeededU
Definition: execnodes.h:526
TupleTableSlot * ri_newTupleSlot
Definition: execnodes.h:472
List * ri_WithCheckOptionExprs
Definition: execnodes.h:515
ProjectionInfo * ri_projectNew
Definition: execnodes.h:470
ProjectionInfo * ri_projectReturning
Definition: execnodes.h:532
ExprState ** ri_GeneratedExprsU
Definition: execnodes.h:522
struct FdwRoutine * ri_FdwRoutine
Definition: execnodes.h:496
List * ri_returningList
Definition: execnodes.h:529
TupleTableSlot ** ri_PlanSlots
Definition: execnodes.h:509
bool ri_usesFdwDirectModify
Definition: execnodes.h:502
AttrNumber ri_RowIdAttNo
Definition: execnodes.h:464
int ri_NumGeneratedNeededI
Definition: execnodes.h:525
int ri_BatchSize
Definition: execnodes.h:507
bool traversed
Definition: tableam.h:145
TransactionId xmax
Definition: tableam.h:143
CommandId cmax
Definition: tableam.h:144
ItemPointerData ctid
Definition: tableam.h:142
Expr * expr
Definition: primnodes.h:1922
TupleTableSlot * tcs_original_insert_tuple
Definition: trigger.h:76
int numtriggers
Definition: reltrigger.h:50
bool trig_delete_before_row
Definition: reltrigger.h:66
bool trig_update_instead_row
Definition: reltrigger.h:63
Trigger * triggers
Definition: reltrigger.h:49
bool trig_delete_instead_row
Definition: reltrigger.h:68
bool trig_update_after_row
Definition: reltrigger.h:62
bool trig_insert_instead_row
Definition: reltrigger.h:58
bool trig_update_before_row
Definition: reltrigger.h:61
bool trig_insert_before_row
Definition: reltrigger.h:56
Oid tgfoid
Definition: reltrigger.h:28
bool tgisclone
Definition: reltrigger.h:32
bool has_generated_stored
Definition: tupdesc.h:45
AttrMap * attrMap
Definition: tupconvert.h:28
TupleConstr * constr
Definition: tupdesc.h:85
Oid tts_tableOid
Definition: tuptable.h:130
TupleDesc tts_tupleDescriptor
Definition: tuptable.h:123
const TupleTableSlotOps *const tts_ops
Definition: tuptable.h:121
bool * tts_isnull
Definition: tuptable.h:127
Datum * tts_values
Definition: tuptable.h:125
TU_UpdateIndexes updateIndexes
LockTupleMode lockmode
#define MinTransactionIdAttributeNumber
Definition: sysattr.h:22
#define FirstLowInvalidHeapAttributeNumber
Definition: sysattr.h:27
TupleTableSlot * table_slot_create(Relation relation, List **reglist)
Definition: tableam.c:91
TU_UpdateIndexes
Definition: tableam.h:110
@ TU_Summarizing
Definition: tableam.h:118
@ TU_None
Definition: tableam.h:112
TM_Result
Definition: tableam.h:72
@ TM_Ok
Definition: tableam.h:77
@ TM_BeingModified
Definition: tableam.h:99
@ TM_Deleted
Definition: tableam.h:92
@ TM_WouldBlock
Definition: tableam.h:102
@ TM_Updated
Definition: tableam.h:89
@ TM_SelfModified
Definition: tableam.h:83
@ TM_Invisible
Definition: tableam.h:80
static TM_Result table_tuple_lock(Relation rel, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, uint8 flags, TM_FailureData *tmfd)
Definition: tableam.h:1575
static void table_tuple_complete_speculative(Relation rel, TupleTableSlot *slot, uint32 specToken, bool succeeded)
Definition: tableam.h:1430
static TM_Result table_tuple_update(Relation rel, ItemPointer otid, TupleTableSlot *slot, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode, TU_UpdateIndexes *update_indexes)
Definition: tableam.h:1530
static TM_Result table_tuple_delete(Relation rel, ItemPointer tid, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool changingPart)
Definition: tableam.h:1486
#define TUPLE_LOCK_FLAG_FIND_LAST_VERSION
Definition: tableam.h:260
static void table_tuple_insert(Relation rel, TupleTableSlot *slot, CommandId cid, int options, struct BulkInsertStateData *bistate)
Definition: tableam.h:1397
static void table_tuple_insert_speculative(Relation rel, TupleTableSlot *slot, CommandId cid, int options, struct BulkInsertStateData *bistate, uint32 specToken)
Definition: tableam.h:1416
static bool table_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot, Snapshot snapshot)
Definition: tableam.h:1330
static bool table_tuple_fetch_row_version(Relation rel, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot)
Definition: tableam.h:1283
bool ExecBRUpdateTriggers(EState *estate, EPQState *epqstate, ResultRelInfo *relinfo, ItemPointer tupleid, HeapTuple fdw_trigtuple, TupleTableSlot *newslot, TM_Result *tmresult, TM_FailureData *tmfd)
Definition: trigger.c:2940
void ExecARDeleteTriggers(EState *estate, ResultRelInfo *relinfo, ItemPointer tupleid, HeapTuple fdw_trigtuple, TransitionCaptureState *transition_capture, bool is_crosspart_update)
Definition: trigger.c:2780
void ExecBSInsertTriggers(EState *estate, ResultRelInfo *relinfo)
Definition: trigger.c:2400
bool ExecBRInsertTriggers(EState *estate, ResultRelInfo *relinfo, TupleTableSlot *slot)
Definition: trigger.c:2464
bool ExecIRDeleteTriggers(EState *estate, ResultRelInfo *relinfo, HeapTuple trigtuple)
Definition: trigger.c:2817
void ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
Definition: trigger.c:2618
bool ExecIRInsertTriggers(EState *estate, ResultRelInfo *relinfo, TupleTableSlot *slot)
Definition: trigger.c:2557
void ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo, ResultRelInfo *src_partinfo, ResultRelInfo *dst_partinfo, ItemPointer tupleid, HeapTuple fdw_trigtuple, TupleTableSlot *newslot, List *recheckIndexes, TransitionCaptureState *transition_capture, bool is_crosspart_update)
Definition: trigger.c:3085
void ExecASUpdateTriggers(EState *estate, ResultRelInfo *relinfo, TransitionCaptureState *transition_capture)
Definition: trigger.c:2922
void ExecASDeleteTriggers(EState *estate, ResultRelInfo *relinfo, TransitionCaptureState *transition_capture)
Definition: trigger.c:2669
bool ExecBRDeleteTriggers(EState *estate, EPQState *epqstate, ResultRelInfo *relinfo, ItemPointer tupleid, HeapTuple fdw_trigtuple, TupleTableSlot **epqslot, TM_Result *tmresult, TM_FailureData *tmfd)
Definition: trigger.c:2689
void ExecARInsertTriggers(EState *estate, ResultRelInfo *relinfo, TupleTableSlot *slot, List *recheckIndexes, TransitionCaptureState *transition_capture)
Definition: trigger.c:2540
TransitionCaptureState * MakeTransitionCaptureState(TriggerDesc *trigdesc, Oid relid, CmdType cmdType)
Definition: trigger.c:4870
void ExecASInsertTriggers(EState *estate, ResultRelInfo *relinfo, TransitionCaptureState *transition_capture)
Definition: trigger.c:2451
bool ExecIRUpdateTriggers(EState *estate, ResultRelInfo *relinfo, HeapTuple trigtuple, TupleTableSlot *newslot)
Definition: trigger.c:3144
void ExecBSUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
Definition: trigger.c:2864
#define RI_TRIGGER_PK
Definition: trigger.h:282
TupleTableSlot * execute_attr_map_slot(AttrMap *attrMap, TupleTableSlot *in_slot, TupleTableSlot *out_slot)
Definition: tupconvert.c:192
TupleDesc CreateTupleDescCopy(TupleDesc tupdesc)
Definition: tupdesc.c:133
#define TupleDescAttr(tupdesc, i)
Definition: tupdesc.h:92
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:432
#define TTS_EMPTY(slot)
Definition: tuptable.h:96
static TupleTableSlot * ExecCopySlot(TupleTableSlot *dstslot, TupleTableSlot *srcslot)
Definition: tuptable.h:482
static Datum slot_getsysattr(TupleTableSlot *slot, int attnum, bool *isnull)
Definition: tuptable.h:409
#define TupIsNull(slot)
Definition: tuptable.h:299
static void slot_getallattrs(TupleTableSlot *slot)
Definition: tuptable.h:361
static void ExecMaterializeSlot(TupleTableSlot *slot)
Definition: tuptable.h:450
void pull_varattnos(Node *node, Index varno, Bitmapset **varattnos)
Definition: var.c:291
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:926
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:445
#define IsolationUsesXactSnapshot()
Definition: xact.h:51