PostgreSQL Source Code  git master
nodeModifyTable.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * nodeModifyTable.c
4  * routines to handle ModifyTable nodes.
5  *
6  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/executor/nodeModifyTable.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 /* INTERFACE ROUTINES
16  * ExecInitModifyTable - initialize the ModifyTable node
17  * ExecModifyTable - retrieve the next tuple from the node
18  * ExecEndModifyTable - shut down the ModifyTable node
19  * ExecReScanModifyTable - rescan the ModifyTable node
20  *
21  * NOTES
22  * Each ModifyTable node contains a list of one or more subplans,
23  * much like an Append node. There is one subplan per result relation.
24  * The key reason for this is that in an inherited UPDATE command, each
25  * result relation could have a different schema (more or different
26  * columns) requiring a different plan tree to produce it. In an
27  * inherited DELETE, all the subplans should produce the same output
28  * rowtype, but we might still find that different plans are appropriate
29  * for different child relations.
30  *
31  * If the query specifies RETURNING, then the ModifyTable returns a
32  * RETURNING tuple after completing each row insert, update, or delete.
33  * It must be called again to continue the operation. Without RETURNING,
34  * we just loop within the node until all the work is done, then
35  * return NULL. This avoids useless call/return overhead.
36  */
37 
38 #include "postgres.h"
39 
40 #include "access/heapam.h"
41 #include "access/htup_details.h"
42 #include "access/tableam.h"
43 #include "access/xact.h"
44 #include "catalog/catalog.h"
45 #include "commands/trigger.h"
46 #include "executor/execPartition.h"
47 #include "executor/executor.h"
49 #include "foreign/fdwapi.h"
50 #include "miscadmin.h"
51 #include "nodes/nodeFuncs.h"
52 #include "rewrite/rewriteHandler.h"
53 #include "storage/bufmgr.h"
54 #include "storage/lmgr.h"
55 #include "utils/builtins.h"
56 #include "utils/datum.h"
57 #include "utils/memutils.h"
58 #include "utils/rel.h"
59 
60 
61 static bool ExecOnConflictUpdate(ModifyTableState *mtstate,
62  ResultRelInfo *resultRelInfo,
63  ItemPointer conflictTid,
64  TupleTableSlot *planSlot,
65  TupleTableSlot *excludedSlot,
66  EState *estate,
67  bool canSetTag,
68  TupleTableSlot **returning);
70  EState *estate,
71  PartitionTupleRouting *proute,
72  ResultRelInfo *targetRelInfo,
73  TupleTableSlot *slot);
77  int whichplan);
78 
79 /*
80  * Verify that the tuples to be produced by INSERT or UPDATE match the
81  * target relation's rowtype
82  *
83  * We do this to guard against stale plans. If plan invalidation is
84  * functioning properly then we should never get a failure here, but better
85  * safe than sorry. Note that this is called after we have obtained lock
86  * on the target rel, so the rowtype can't change underneath us.
87  *
88  * The plan output is represented by its targetlist, because that makes
89  * handling the dropped-column case easier.
90  */
91 static void
92 ExecCheckPlanOutput(Relation resultRel, List *targetList)
93 {
94  TupleDesc resultDesc = RelationGetDescr(resultRel);
95  int attno = 0;
96  ListCell *lc;
97 
98  foreach(lc, targetList)
99  {
100  TargetEntry *tle = (TargetEntry *) lfirst(lc);
101  Form_pg_attribute attr;
102 
103  if (tle->resjunk)
104  continue; /* ignore junk tlist items */
105 
106  if (attno >= resultDesc->natts)
107  ereport(ERROR,
108  (errcode(ERRCODE_DATATYPE_MISMATCH),
109  errmsg("table row type and query-specified row type do not match"),
110  errdetail("Query has too many columns.")));
111  attr = TupleDescAttr(resultDesc, attno);
112  attno++;
113 
114  if (!attr->attisdropped)
115  {
116  /* Normal case: demand type match */
117  if (exprType((Node *) tle->expr) != attr->atttypid)
118  ereport(ERROR,
119  (errcode(ERRCODE_DATATYPE_MISMATCH),
120  errmsg("table row type and query-specified row type do not match"),
121  errdetail("Table has type %s at ordinal position %d, but query expects %s.",
122  format_type_be(attr->atttypid),
123  attno,
124  format_type_be(exprType((Node *) tle->expr)))));
125  }
126  else
127  {
128  /*
129  * For a dropped column, we can't check atttypid (it's likely 0).
130  * In any case the planner has most likely inserted an INT4 null.
131  * What we insist on is just *some* NULL constant.
132  */
133  if (!IsA(tle->expr, Const) ||
134  !((Const *) tle->expr)->constisnull)
135  ereport(ERROR,
136  (errcode(ERRCODE_DATATYPE_MISMATCH),
137  errmsg("table row type and query-specified row type do not match"),
138  errdetail("Query provides a value for a dropped column at ordinal position %d.",
139  attno)));
140  }
141  }
142  if (attno != resultDesc->natts)
143  ereport(ERROR,
144  (errcode(ERRCODE_DATATYPE_MISMATCH),
145  errmsg("table row type and query-specified row type do not match"),
146  errdetail("Query has too few columns.")));
147 }
148 
149 /*
150  * ExecProcessReturning --- evaluate a RETURNING list
151  *
152  * resultRelInfo: current result rel
153  * tupleSlot: slot holding tuple actually inserted/updated/deleted
154  * planSlot: slot holding tuple returned by top subplan node
155  *
156  * Note: If tupleSlot is NULL, the FDW should have already provided econtext's
157  * scan tuple.
158  *
159  * Returns a slot holding the result tuple
160  */
161 static TupleTableSlot *
163  TupleTableSlot *tupleSlot,
164  TupleTableSlot *planSlot)
165 {
166  ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning;
167  ExprContext *econtext = projectReturning->pi_exprContext;
168 
169  /* Make tuple and any needed join variables available to ExecProject */
170  if (tupleSlot)
171  econtext->ecxt_scantuple = tupleSlot;
172  econtext->ecxt_outertuple = planSlot;
173 
174  /*
175  * RETURNING expressions might reference the tableoid column, so
176  * reinitialize tts_tableOid before evaluating them.
177  */
178  econtext->ecxt_scantuple->tts_tableOid =
179  RelationGetRelid(resultRelInfo->ri_RelationDesc);
180 
181  /* Compute the RETURNING expressions */
182  return ExecProject(projectReturning);
183 }
184 
185 /*
186  * ExecCheckTupleVisible -- verify tuple is visible
187  *
188  * It would not be consistent with guarantees of the higher isolation levels to
189  * proceed with avoiding insertion (taking speculative insertion's alternative
190  * path) on the basis of another tuple that is not visible to MVCC snapshot.
191  * Check for the need to raise a serialization failure, and do so as necessary.
192  */
193 static void
195  Relation rel,
196  TupleTableSlot *slot)
197 {
199  return;
200 
201  if (!table_tuple_satisfies_snapshot(rel, slot, estate->es_snapshot))
202  {
203  Datum xminDatum;
204  TransactionId xmin;
205  bool isnull;
206 
207  xminDatum = slot_getsysattr(slot, MinTransactionIdAttributeNumber, &isnull);
208  Assert(!isnull);
209  xmin = DatumGetTransactionId(xminDatum);
210 
211  /*
212  * We should not raise a serialization failure if the conflict is
213  * against a tuple inserted by our own transaction, even if it's not
214  * visible to our snapshot. (This would happen, for example, if
215  * conflicting keys are proposed for insertion in a single command.)
216  */
218  ereport(ERROR,
219  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
220  errmsg("could not serialize access due to concurrent update")));
221  }
222 }
223 
224 /*
225  * ExecCheckTIDVisible -- convenience variant of ExecCheckTupleVisible()
226  */
227 static void
229  ResultRelInfo *relinfo,
230  ItemPointer tid,
231  TupleTableSlot *tempSlot)
232 {
233  Relation rel = relinfo->ri_RelationDesc;
234 
235  /* Redundantly check isolation level */
237  return;
238 
239  if (!table_tuple_fetch_row_version(rel, tid, SnapshotAny, tempSlot))
240  elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
241  ExecCheckTupleVisible(estate, rel, tempSlot);
242  ExecClearTuple(tempSlot);
243 }
244 
245 /*
246  * Compute stored generated columns for a tuple
247  */
248 void
250 {
251  ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
252  Relation rel = resultRelInfo->ri_RelationDesc;
253  TupleDesc tupdesc = RelationGetDescr(rel);
254  int natts = tupdesc->natts;
255  MemoryContext oldContext;
256  Datum *values;
257  bool *nulls;
258 
259  Assert(tupdesc->constr && tupdesc->constr->has_generated_stored);
260 
261  /*
262  * If first time through for this result relation, build expression
263  * nodetrees for rel's stored generation expressions. Keep them in the
264  * per-query memory context so they'll survive throughout the query.
265  */
266  if (resultRelInfo->ri_GeneratedExprs == NULL)
267  {
268  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
269 
270  resultRelInfo->ri_GeneratedExprs =
271  (ExprState **) palloc(natts * sizeof(ExprState *));
272 
273  for (int i = 0; i < natts; i++)
274  {
275  if (TupleDescAttr(tupdesc, i)->attgenerated == ATTRIBUTE_GENERATED_STORED)
276  {
277  Expr *expr;
278 
279  expr = (Expr *) build_column_default(rel, i + 1);
280  if (expr == NULL)
281  elog(ERROR, "no generation expression found for column number %d of table \"%s\"",
282  i + 1, RelationGetRelationName(rel));
283 
284  resultRelInfo->ri_GeneratedExprs[i] = ExecPrepareExpr(expr, estate);
285  }
286  }
287 
288  MemoryContextSwitchTo(oldContext);
289  }
290 
291  oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
292 
293  values = palloc(sizeof(*values) * natts);
294  nulls = palloc(sizeof(*nulls) * natts);
295 
296  slot_getallattrs(slot);
297  memcpy(nulls, slot->tts_isnull, sizeof(*nulls) * natts);
298 
299  for (int i = 0; i < natts; i++)
300  {
301  Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
302 
303  if (attr->attgenerated == ATTRIBUTE_GENERATED_STORED)
304  {
305  ExprContext *econtext;
306  Datum val;
307  bool isnull;
308 
309  econtext = GetPerTupleExprContext(estate);
310  econtext->ecxt_scantuple = slot;
311 
312  val = ExecEvalExpr(resultRelInfo->ri_GeneratedExprs[i], econtext, &isnull);
313 
314  values[i] = val;
315  nulls[i] = isnull;
316  }
317  else
318  {
319  if (!nulls[i])
320  values[i] = datumCopy(slot->tts_values[i], attr->attbyval, attr->attlen);
321  }
322  }
323 
324  ExecClearTuple(slot);
325  memcpy(slot->tts_values, values, sizeof(*values) * natts);
326  memcpy(slot->tts_isnull, nulls, sizeof(*nulls) * natts);
327  ExecStoreVirtualTuple(slot);
328  ExecMaterializeSlot(slot);
329 
330  MemoryContextSwitchTo(oldContext);
331 }
332 
333 /* ----------------------------------------------------------------
334  * ExecInsert
335  *
336  * For INSERT, we have to insert the tuple into the target relation
337  * and insert appropriate tuples into the index relations.
338  *
339  * Returns RETURNING result if any, otherwise NULL.
340  * ----------------------------------------------------------------
341  */
342 static TupleTableSlot *
344  TupleTableSlot *slot,
345  TupleTableSlot *planSlot,
346  EState *estate,
347  bool canSetTag)
348 {
349  ResultRelInfo *resultRelInfo;
350  Relation resultRelationDesc;
351  List *recheckIndexes = NIL;
352  TupleTableSlot *result = NULL;
353  TransitionCaptureState *ar_insert_trig_tcs;
354  ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
355  OnConflictAction onconflict = node->onConflictAction;
356 
357  ExecMaterializeSlot(slot);
358 
359  /*
360  * get information on the (current) result relation
361  */
362  resultRelInfo = estate->es_result_relation_info;
363  resultRelationDesc = resultRelInfo->ri_RelationDesc;
364 
365  /*
366  * BEFORE ROW INSERT Triggers.
367  *
368  * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an
369  * INSERT ... ON CONFLICT statement. We cannot check for constraint
370  * violations before firing these triggers, because they can change the
371  * values to insert. Also, they can run arbitrary user-defined code with
372  * side-effects that we can't cancel by just not inserting the tuple.
373  */
374  if (resultRelInfo->ri_TrigDesc &&
375  resultRelInfo->ri_TrigDesc->trig_insert_before_row)
376  {
377  if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
378  return NULL; /* "do nothing" */
379  }
380 
381  /* INSTEAD OF ROW INSERT Triggers */
382  if (resultRelInfo->ri_TrigDesc &&
383  resultRelInfo->ri_TrigDesc->trig_insert_instead_row)
384  {
385  if (!ExecIRInsertTriggers(estate, resultRelInfo, slot))
386  return NULL; /* "do nothing" */
387  }
388  else if (resultRelInfo->ri_FdwRoutine)
389  {
390  /*
391  * Compute stored generated columns
392  */
393  if (resultRelationDesc->rd_att->constr &&
394  resultRelationDesc->rd_att->constr->has_generated_stored)
395  ExecComputeStoredGenerated(estate, slot);
396 
397  /*
398  * insert into foreign table: let the FDW do it
399  */
400  slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate,
401  resultRelInfo,
402  slot,
403  planSlot);
404 
405  if (slot == NULL) /* "do nothing" */
406  return NULL;
407 
408  /*
409  * AFTER ROW Triggers or RETURNING expressions might reference the
410  * tableoid column, so (re-)initialize tts_tableOid before evaluating
411  * them.
412  */
413  slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
414  }
415  else
416  {
417  WCOKind wco_kind;
418 
419  /*
420  * Constraints might reference the tableoid column, so (re-)initialize
421  * tts_tableOid before evaluating them.
422  */
423  slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
424 
425  /*
426  * Compute stored generated columns
427  */
428  if (resultRelationDesc->rd_att->constr &&
429  resultRelationDesc->rd_att->constr->has_generated_stored)
430  ExecComputeStoredGenerated(estate, slot);
431 
432  /*
433  * Check any RLS WITH CHECK policies.
434  *
435  * Normally we should check INSERT policies. But if the insert is the
436  * result of a partition key update that moved the tuple to a new
437  * partition, we should instead check UPDATE policies, because we are
438  * executing policies defined on the target table, and not those
439  * defined on the child partitions.
440  */
441  wco_kind = (mtstate->operation == CMD_UPDATE) ?
443 
444  /*
445  * ExecWithCheckOptions() will skip any WCOs which are not of the kind
446  * we are looking for at this point.
447  */
448  if (resultRelInfo->ri_WithCheckOptions != NIL)
449  ExecWithCheckOptions(wco_kind, resultRelInfo, slot, estate);
450 
451  /*
452  * Check the constraints of the tuple.
453  */
454  if (resultRelationDesc->rd_att->constr)
455  ExecConstraints(resultRelInfo, slot, estate);
456 
457  /*
458  * Also check the tuple against the partition constraint, if there is
459  * one; except that if we got here via tuple-routing, we don't need to
460  * if there's no BR trigger defined on the partition.
461  */
462  if (resultRelInfo->ri_PartitionCheck &&
463  (resultRelInfo->ri_PartitionRoot == NULL ||
464  (resultRelInfo->ri_TrigDesc &&
465  resultRelInfo->ri_TrigDesc->trig_insert_before_row)))
466  ExecPartitionCheck(resultRelInfo, slot, estate, true);
467 
468  if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
469  {
470  /* Perform a speculative insertion. */
471  uint32 specToken;
472  ItemPointerData conflictTid;
473  bool specConflict;
474  List *arbiterIndexes;
475 
476  arbiterIndexes = resultRelInfo->ri_onConflictArbiterIndexes;
477 
478  /*
479  * Do a non-conclusive check for conflicts first.
480  *
481  * We're not holding any locks yet, so this doesn't guarantee that
482  * the later insert won't conflict. But it avoids leaving behind
483  * a lot of canceled speculative insertions, if you run a lot of
484  * INSERT ON CONFLICT statements that do conflict.
485  *
486  * We loop back here if we find a conflict below, either during
487  * the pre-check, or when we re-check after inserting the tuple
488  * speculatively.
489  */
490  vlock:
491  specConflict = false;
492  if (!ExecCheckIndexConstraints(slot, estate, &conflictTid,
493  arbiterIndexes))
494  {
495  /* committed conflict tuple found */
496  if (onconflict == ONCONFLICT_UPDATE)
497  {
498  /*
499  * In case of ON CONFLICT DO UPDATE, execute the UPDATE
500  * part. Be prepared to retry if the UPDATE fails because
501  * of another concurrent UPDATE/DELETE to the conflict
502  * tuple.
503  */
504  TupleTableSlot *returning = NULL;
505 
506  if (ExecOnConflictUpdate(mtstate, resultRelInfo,
507  &conflictTid, planSlot, slot,
508  estate, canSetTag, &returning))
509  {
510  InstrCountTuples2(&mtstate->ps, 1);
511  return returning;
512  }
513  else
514  goto vlock;
515  }
516  else
517  {
518  /*
519  * In case of ON CONFLICT DO NOTHING, do nothing. However,
520  * verify that the tuple is visible to the executor's MVCC
521  * snapshot at higher isolation levels.
522  *
523  * Using ExecGetReturningSlot() to store the tuple for the
524  * recheck isn't that pretty, but we can't trivially use
525  * the input slot, because it might not be of a compatible
526  * type. As there's no conflicting usage of
527  * ExecGetReturningSlot() in the DO NOTHING case...
528  */
529  Assert(onconflict == ONCONFLICT_NOTHING);
530  ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid,
531  ExecGetReturningSlot(estate, resultRelInfo));
532  InstrCountTuples2(&mtstate->ps, 1);
533  return NULL;
534  }
535  }
536 
537  /*
538  * Before we start insertion proper, acquire our "speculative
539  * insertion lock". Others can use that to wait for us to decide
540  * if we're going to go ahead with the insertion, instead of
541  * waiting for the whole transaction to complete.
542  */
544 
545  /* insert the tuple, with the speculative token */
546  table_tuple_insert_speculative(resultRelationDesc, slot,
547  estate->es_output_cid,
548  0,
549  NULL,
550  specToken);
551 
552  /* insert index entries for tuple */
553  recheckIndexes = ExecInsertIndexTuples(slot, estate, true,
554  &specConflict,
555  arbiterIndexes);
556 
557  /* adjust the tuple's state accordingly */
558  table_tuple_complete_speculative(resultRelationDesc, slot,
559  specToken, !specConflict);
560 
561  /*
562  * Wake up anyone waiting for our decision. They will re-check
563  * the tuple, see that it's no longer speculative, and wait on our
564  * XID as if this was a regularly inserted tuple all along. Or if
565  * we killed the tuple, they will see it's dead, and proceed as if
566  * the tuple never existed.
567  */
569 
570  /*
571  * If there was a conflict, start from the beginning. We'll do
572  * the pre-check again, which will now find the conflicting tuple
573  * (unless it aborts before we get there).
574  */
575  if (specConflict)
576  {
577  list_free(recheckIndexes);
578  goto vlock;
579  }
580 
581  /* Since there was no insertion conflict, we're done */
582  }
583  else
584  {
585  /* insert the tuple normally */
586  table_tuple_insert(resultRelationDesc, slot,
587  estate->es_output_cid,
588  0, NULL);
589 
590  /* insert index entries for tuple */
591  if (resultRelInfo->ri_NumIndices > 0)
592  recheckIndexes = ExecInsertIndexTuples(slot, estate, false, NULL,
593  NIL);
594  }
595  }
596 
597  if (canSetTag)
598  {
599  (estate->es_processed)++;
600  setLastTid(&slot->tts_tid);
601  }
602 
603  /*
604  * If this insert is the result of a partition key update that moved the
605  * tuple to a new partition, put this row into the transition NEW TABLE,
606  * if there is one. We need to do this separately for DELETE and INSERT
607  * because they happen on different tables.
608  */
609  ar_insert_trig_tcs = mtstate->mt_transition_capture;
610  if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
612  {
613  ExecARUpdateTriggers(estate, resultRelInfo, NULL,
614  NULL,
615  slot,
616  NULL,
617  mtstate->mt_transition_capture);
618 
619  /*
620  * We've already captured the NEW TABLE row, so make sure any AR
621  * INSERT trigger fired below doesn't capture it again.
622  */
623  ar_insert_trig_tcs = NULL;
624  }
625 
626  /* AFTER ROW INSERT Triggers */
627  ExecARInsertTriggers(estate, resultRelInfo, slot, recheckIndexes,
628  ar_insert_trig_tcs);
629 
630  list_free(recheckIndexes);
631 
632  /*
633  * Check any WITH CHECK OPTION constraints from parent views. We are
634  * required to do this after testing all constraints and uniqueness
635  * violations per the SQL spec, so we do it after actually inserting the
636  * record into the heap and all indexes.
637  *
638  * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
639  * tuple will never be seen, if it violates the WITH CHECK OPTION.
640  *
641  * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
642  * are looking for at this point.
643  */
644  if (resultRelInfo->ri_WithCheckOptions != NIL)
645  ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
646 
647  /* Process RETURNING if present */
648  if (resultRelInfo->ri_projectReturning)
649  result = ExecProcessReturning(resultRelInfo, slot, planSlot);
650 
651  return result;
652 }
653 
654 /* ----------------------------------------------------------------
655  * ExecDelete
656  *
657  * DELETE is like UPDATE, except that we delete the tuple and no
658  * index modifications are needed.
659  *
660  * When deleting from a table, tupleid identifies the tuple to
661  * delete and oldtuple is NULL. When deleting from a view,
662  * oldtuple is passed to the INSTEAD OF triggers and identifies
663  * what to delete, and tupleid is invalid. When deleting from a
664  * foreign table, tupleid is invalid; the FDW has to figure out
665  * which row to delete using data from the planSlot. oldtuple is
666  * passed to foreign table triggers; it is NULL when the foreign
667  * table has no relevant triggers. We use tupleDeleted to indicate
668  * whether the tuple is actually deleted, callers can use it to
669  * decide whether to continue the operation. When this DELETE is a
670  * part of an UPDATE of partition-key, then the slot returned by
671  * EvalPlanQual() is passed back using output parameter epqslot.
672  *
673  * Returns RETURNING result if any, otherwise NULL.
674  * ----------------------------------------------------------------
675  */
676 static TupleTableSlot *
678  ItemPointer tupleid,
679  HeapTuple oldtuple,
680  TupleTableSlot *planSlot,
681  EPQState *epqstate,
682  EState *estate,
683  bool processReturning,
684  bool canSetTag,
685  bool changingPart,
686  bool *tupleDeleted,
687  TupleTableSlot **epqreturnslot)
688 {
689  ResultRelInfo *resultRelInfo;
690  Relation resultRelationDesc;
691  TM_Result result;
692  TM_FailureData tmfd;
693  TupleTableSlot *slot = NULL;
694  TransitionCaptureState *ar_delete_trig_tcs;
695 
696  if (tupleDeleted)
697  *tupleDeleted = false;
698 
699  /*
700  * get information on the (current) result relation
701  */
702  resultRelInfo = estate->es_result_relation_info;
703  resultRelationDesc = resultRelInfo->ri_RelationDesc;
704 
705  /* BEFORE ROW DELETE Triggers */
706  if (resultRelInfo->ri_TrigDesc &&
707  resultRelInfo->ri_TrigDesc->trig_delete_before_row)
708  {
709  bool dodelete;
710 
711  dodelete = ExecBRDeleteTriggers(estate, epqstate, resultRelInfo,
712  tupleid, oldtuple, epqreturnslot);
713 
714  if (!dodelete) /* "do nothing" */
715  return NULL;
716  }
717 
718  /* INSTEAD OF ROW DELETE Triggers */
719  if (resultRelInfo->ri_TrigDesc &&
720  resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
721  {
722  bool dodelete;
723 
724  Assert(oldtuple != NULL);
725  dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple);
726 
727  if (!dodelete) /* "do nothing" */
728  return NULL;
729  }
730  else if (resultRelInfo->ri_FdwRoutine)
731  {
732  /*
733  * delete from foreign table: let the FDW do it
734  *
735  * We offer the returning slot as a place to store RETURNING data,
736  * although the FDW can return some other slot if it wants.
737  */
738  slot = ExecGetReturningSlot(estate, resultRelInfo);
739  slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate,
740  resultRelInfo,
741  slot,
742  planSlot);
743 
744  if (slot == NULL) /* "do nothing" */
745  return NULL;
746 
747  /*
748  * RETURNING expressions might reference the tableoid column, so
749  * (re)initialize tts_tableOid before evaluating them.
750  */
751  if (TTS_EMPTY(slot))
752  ExecStoreAllNullTuple(slot);
753 
754  slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
755  }
756  else
757  {
758  /*
759  * delete the tuple
760  *
761  * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check
762  * that the row to be deleted is visible to that snapshot, and throw a
763  * can't-serialize error if not. This is a special-case behavior
764  * needed for referential integrity updates in transaction-snapshot
765  * mode transactions.
766  */
767 ldelete:;
768  result = table_tuple_delete(resultRelationDesc, tupleid,
769  estate->es_output_cid,
770  estate->es_snapshot,
771  estate->es_crosscheck_snapshot,
772  true /* wait for commit */ ,
773  &tmfd,
774  changingPart);
775 
776  switch (result)
777  {
778  case TM_SelfModified:
779 
780  /*
781  * The target tuple was already updated or deleted by the
782  * current command, or by a later command in the current
783  * transaction. The former case is possible in a join DELETE
784  * where multiple tuples join to the same target tuple. This
785  * is somewhat questionable, but Postgres has always allowed
786  * it: we just ignore additional deletion attempts.
787  *
788  * The latter case arises if the tuple is modified by a
789  * command in a BEFORE trigger, or perhaps by a command in a
790  * volatile function used in the query. In such situations we
791  * should not ignore the deletion, but it is equally unsafe to
792  * proceed. We don't want to discard the original DELETE
793  * while keeping the triggered actions based on its deletion;
794  * and it would be no better to allow the original DELETE
795  * while discarding updates that it triggered. The row update
796  * carries some information that might be important according
797  * to business rules; so throwing an error is the only safe
798  * course.
799  *
800  * If a trigger actually intends this type of interaction, it
801  * can re-execute the DELETE and then return NULL to cancel
802  * the outer delete.
803  */
804  if (tmfd.cmax != estate->es_output_cid)
805  ereport(ERROR,
806  (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
807  errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
808  errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
809 
810  /* Else, already deleted by self; nothing to do */
811  return NULL;
812 
813  case TM_Ok:
814  break;
815 
816  case TM_Updated:
817  {
818  TupleTableSlot *inputslot;
819  TupleTableSlot *epqslot;
820 
822  ereport(ERROR,
823  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
824  errmsg("could not serialize access due to concurrent update")));
825 
826  /*
827  * Already know that we're going to need to do EPQ, so
828  * fetch tuple directly into the right slot.
829  */
830  EvalPlanQualBegin(epqstate);
831  inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
832  resultRelInfo->ri_RangeTableIndex);
833 
834  result = table_tuple_lock(resultRelationDesc, tupleid,
835  estate->es_snapshot,
836  inputslot, estate->es_output_cid,
839  &tmfd);
840 
841  switch (result)
842  {
843  case TM_Ok:
844  Assert(tmfd.traversed);
845  epqslot = EvalPlanQual(epqstate,
846  resultRelationDesc,
847  resultRelInfo->ri_RangeTableIndex,
848  inputslot);
849  if (TupIsNull(epqslot))
850  /* Tuple not passing quals anymore, exiting... */
851  return NULL;
852 
853  /*
854  * If requested, skip delete and pass back the
855  * updated row.
856  */
857  if (epqreturnslot)
858  {
859  *epqreturnslot = epqslot;
860  return NULL;
861  }
862  else
863  goto ldelete;
864 
865  case TM_SelfModified:
866 
867  /*
868  * This can be reached when following an update
869  * chain from a tuple updated by another session,
870  * reaching a tuple that was already updated in
871  * this transaction. If previously updated by this
872  * command, ignore the delete, otherwise error
873  * out.
874  *
875  * See also TM_SelfModified response to
876  * table_tuple_delete() above.
877  */
878  if (tmfd.cmax != estate->es_output_cid)
879  ereport(ERROR,
880  (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
881  errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
882  errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
883  return NULL;
884 
885  case TM_Deleted:
886  /* tuple already deleted; nothing to do */
887  return NULL;
888 
889  default:
890 
891  /*
892  * TM_Invisible should be impossible because we're
893  * waiting for updated row versions, and would
894  * already have errored out if the first version
895  * is invisible.
896  *
897  * TM_Updated should be impossible, because we're
898  * locking the latest version via
899  * TUPLE_LOCK_FLAG_FIND_LAST_VERSION.
900  */
901  elog(ERROR, "unexpected table_tuple_lock status: %u",
902  result);
903  return NULL;
904  }
905 
906  Assert(false);
907  break;
908  }
909 
910  case TM_Deleted:
912  ereport(ERROR,
913  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
914  errmsg("could not serialize access due to concurrent delete")));
915  /* tuple already deleted; nothing to do */
916  return NULL;
917 
918  default:
919  elog(ERROR, "unrecognized table_tuple_delete status: %u",
920  result);
921  return NULL;
922  }
923 
924  /*
925  * Note: Normally one would think that we have to delete index tuples
926  * associated with the heap tuple now...
927  *
928  * ... but in POSTGRES, we have no need to do this because VACUUM will
929  * take care of it later. We can't delete index tuples immediately
930  * anyway, since the tuple is still visible to other transactions.
931  */
932  }
933 
934  if (canSetTag)
935  (estate->es_processed)++;
936 
937  /* Tell caller that the delete actually happened. */
938  if (tupleDeleted)
939  *tupleDeleted = true;
940 
941  /*
942  * If this delete is the result of a partition key update that moved the
943  * tuple to a new partition, put this row into the transition OLD TABLE,
944  * if there is one. We need to do this separately for DELETE and INSERT
945  * because they happen on different tables.
946  */
947  ar_delete_trig_tcs = mtstate->mt_transition_capture;
948  if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
950  {
951  ExecARUpdateTriggers(estate, resultRelInfo,
952  tupleid,
953  oldtuple,
954  NULL,
955  NULL,
956  mtstate->mt_transition_capture);
957 
958  /*
959  * We've already captured the NEW TABLE row, so make sure any AR
960  * DELETE trigger fired below doesn't capture it again.
961  */
962  ar_delete_trig_tcs = NULL;
963  }
964 
965  /* AFTER ROW DELETE Triggers */
966  ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
967  ar_delete_trig_tcs);
968 
969  /* Process RETURNING if present and if requested */
970  if (processReturning && resultRelInfo->ri_projectReturning)
971  {
972  /*
973  * We have to put the target tuple into a slot, which means first we
974  * gotta fetch it. We can use the trigger tuple slot.
975  */
976  TupleTableSlot *rslot;
977 
978  if (resultRelInfo->ri_FdwRoutine)
979  {
980  /* FDW must have provided a slot containing the deleted row */
981  Assert(!TupIsNull(slot));
982  }
983  else
984  {
985  slot = ExecGetReturningSlot(estate, resultRelInfo);
986  if (oldtuple != NULL)
987  {
988  ExecForceStoreHeapTuple(oldtuple, slot, false);
989  }
990  else
991  {
992  if (!table_tuple_fetch_row_version(resultRelationDesc, tupleid,
993  SnapshotAny, slot))
994  elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
995  }
996  }
997 
998  rslot = ExecProcessReturning(resultRelInfo, slot, planSlot);
999 
1000  /*
1001  * Before releasing the target tuple again, make sure rslot has a
1002  * local copy of any pass-by-reference values.
1003  */
1004  ExecMaterializeSlot(rslot);
1005 
1006  ExecClearTuple(slot);
1007 
1008  return rslot;
1009  }
1010 
1011  return NULL;
1012 }
1013 
1014 /* ----------------------------------------------------------------
1015  * ExecUpdate
1016  *
1017  * note: we can't run UPDATE queries with transactions
1018  * off because UPDATEs are actually INSERTs and our
1019  * scan will mistakenly loop forever, updating the tuple
1020  * it just inserted.. This should be fixed but until it
1021  * is, we don't want to get stuck in an infinite loop
1022  * which corrupts your database..
1023  *
1024  * When updating a table, tupleid identifies the tuple to
1025  * update and oldtuple is NULL. When updating a view, oldtuple
1026  * is passed to the INSTEAD OF triggers and identifies what to
1027  * update, and tupleid is invalid. When updating a foreign table,
1028  * tupleid is invalid; the FDW has to figure out which row to
1029  * update using data from the planSlot. oldtuple is passed to
1030  * foreign table triggers; it is NULL when the foreign table has
1031  * no relevant triggers.
1032  *
1033  * Returns RETURNING result if any, otherwise NULL.
1034  * ----------------------------------------------------------------
1035  */
1036 static TupleTableSlot *
1038  ItemPointer tupleid,
1039  HeapTuple oldtuple,
1040  TupleTableSlot *slot,
1041  TupleTableSlot *planSlot,
1042  EPQState *epqstate,
1043  EState *estate,
1044  bool canSetTag)
1045 {
1046  ResultRelInfo *resultRelInfo;
1047  Relation resultRelationDesc;
1048  TM_Result result;
1049  TM_FailureData tmfd;
1050  List *recheckIndexes = NIL;
1051  TupleConversionMap *saved_tcs_map = NULL;
1052 
1053  /*
1054  * abort the operation if not running transactions
1055  */
1057  elog(ERROR, "cannot UPDATE during bootstrap");
1058 
1059  ExecMaterializeSlot(slot);
1060 
1061  /*
1062  * get information on the (current) result relation
1063  */
1064  resultRelInfo = estate->es_result_relation_info;
1065  resultRelationDesc = resultRelInfo->ri_RelationDesc;
1066 
1067  /* BEFORE ROW UPDATE Triggers */
1068  if (resultRelInfo->ri_TrigDesc &&
1069  resultRelInfo->ri_TrigDesc->trig_update_before_row)
1070  {
1071  if (!ExecBRUpdateTriggers(estate, epqstate, resultRelInfo,
1072  tupleid, oldtuple, slot))
1073  return NULL; /* "do nothing" */
1074  }
1075 
1076  /* INSTEAD OF ROW UPDATE Triggers */
1077  if (resultRelInfo->ri_TrigDesc &&
1078  resultRelInfo->ri_TrigDesc->trig_update_instead_row)
1079  {
1080  if (!ExecIRUpdateTriggers(estate, resultRelInfo,
1081  oldtuple, slot))
1082  return NULL; /* "do nothing" */
1083  }
1084  else if (resultRelInfo->ri_FdwRoutine)
1085  {
1086  /*
1087  * Compute stored generated columns
1088  */
1089  if (resultRelationDesc->rd_att->constr &&
1090  resultRelationDesc->rd_att->constr->has_generated_stored)
1091  ExecComputeStoredGenerated(estate, slot);
1092 
1093  /*
1094  * update in foreign table: let the FDW do it
1095  */
1096  slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate,
1097  resultRelInfo,
1098  slot,
1099  planSlot);
1100 
1101  if (slot == NULL) /* "do nothing" */
1102  return NULL;
1103 
1104  /*
1105  * AFTER ROW Triggers or RETURNING expressions might reference the
1106  * tableoid column, so (re-)initialize tts_tableOid before evaluating
1107  * them.
1108  */
1109  slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1110  }
1111  else
1112  {
1113  LockTupleMode lockmode;
1114  bool partition_constraint_failed;
1115  bool update_indexes;
1116 
1117  /*
1118  * Constraints might reference the tableoid column, so (re-)initialize
1119  * tts_tableOid before evaluating them.
1120  */
1121  slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1122 
1123  /*
1124  * Compute stored generated columns
1125  */
1126  if (resultRelationDesc->rd_att->constr &&
1127  resultRelationDesc->rd_att->constr->has_generated_stored)
1128  ExecComputeStoredGenerated(estate, slot);
1129 
1130  /*
1131  * Check any RLS UPDATE WITH CHECK policies
1132  *
1133  * If we generate a new candidate tuple after EvalPlanQual testing, we
1134  * must loop back here and recheck any RLS policies and constraints.
1135  * (We don't need to redo triggers, however. If there are any BEFORE
1136  * triggers then trigger.c will have done table_tuple_lock to lock the
1137  * correct tuple, so there's no need to do them again.)
1138  */
1139 lreplace:;
1140 
1141  /* ensure slot is independent, consider e.g. EPQ */
1142  ExecMaterializeSlot(slot);
1143 
1144  /*
1145  * If partition constraint fails, this row might get moved to another
1146  * partition, in which case we should check the RLS CHECK policy just
1147  * before inserting into the new partition, rather than doing it here.
1148  * This is because a trigger on that partition might again change the
1149  * row. So skip the WCO checks if the partition constraint fails.
1150  */
1151  partition_constraint_failed =
1152  resultRelInfo->ri_PartitionCheck &&
1153  !ExecPartitionCheck(resultRelInfo, slot, estate, false);
1154 
1155  if (!partition_constraint_failed &&
1156  resultRelInfo->ri_WithCheckOptions != NIL)
1157  {
1158  /*
1159  * ExecWithCheckOptions() will skip any WCOs which are not of the
1160  * kind we are looking for at this point.
1161  */
1163  resultRelInfo, slot, estate);
1164  }
1165 
1166  /*
1167  * If a partition check failed, try to move the row into the right
1168  * partition.
1169  */
1170  if (partition_constraint_failed)
1171  {
1172  bool tuple_deleted;
1173  TupleTableSlot *ret_slot;
1174  TupleTableSlot *epqslot = NULL;
1176  int map_index;
1177  TupleConversionMap *tupconv_map;
1178 
1179  /*
1180  * Disallow an INSERT ON CONFLICT DO UPDATE that causes the
1181  * original row to migrate to a different partition. Maybe this
1182  * can be implemented some day, but it seems a fringe feature with
1183  * little redeeming value.
1184  */
1185  if (((ModifyTable *) mtstate->ps.plan)->onConflictAction == ONCONFLICT_UPDATE)
1186  ereport(ERROR,
1187  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1188  errmsg("invalid ON UPDATE specification"),
1189  errdetail("The result tuple would appear in a different partition than the original tuple.")));
1190 
1191  /*
1192  * When an UPDATE is run on a leaf partition, we will not have
1193  * partition tuple routing set up. In that case, fail with
1194  * partition constraint violation error.
1195  */
1196  if (proute == NULL)
1197  ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
1198 
1199  /*
1200  * Row movement, part 1. Delete the tuple, but skip RETURNING
1201  * processing. We want to return rows from INSERT.
1202  */
1203  ExecDelete(mtstate, tupleid, oldtuple, planSlot, epqstate,
1204  estate, false, false /* canSetTag */ ,
1205  true /* changingPart */ , &tuple_deleted, &epqslot);
1206 
1207  /*
1208  * For some reason if DELETE didn't happen (e.g. trigger prevented
1209  * it, or it was already deleted by self, or it was concurrently
1210  * deleted by another transaction), then we should skip the insert
1211  * as well; otherwise, an UPDATE could cause an increase in the
1212  * total number of rows across all partitions, which is clearly
1213  * wrong.
1214  *
1215  * For a normal UPDATE, the case where the tuple has been the
1216  * subject of a concurrent UPDATE or DELETE would be handled by
1217  * the EvalPlanQual machinery, but for an UPDATE that we've
1218  * translated into a DELETE from this partition and an INSERT into
1219  * some other partition, that's not available, because CTID chains
1220  * can't span relation boundaries. We mimic the semantics to a
1221  * limited extent by skipping the INSERT if the DELETE fails to
1222  * find a tuple. This ensures that two concurrent attempts to
1223  * UPDATE the same tuple at the same time can't turn one tuple
1224  * into two, and that an UPDATE of a just-deleted tuple can't
1225  * resurrect it.
1226  */
1227  if (!tuple_deleted)
1228  {
1229  /*
1230  * epqslot will be typically NULL. But when ExecDelete()
1231  * finds that another transaction has concurrently updated the
1232  * same row, it re-fetches the row, skips the delete, and
1233  * epqslot is set to the re-fetched tuple slot. In that case,
1234  * we need to do all the checks again.
1235  */
1236  if (TupIsNull(epqslot))
1237  return NULL;
1238  else
1239  {
1240  slot = ExecFilterJunk(resultRelInfo->ri_junkFilter, epqslot);
1241  goto lreplace;
1242  }
1243  }
1244 
1245  /*
1246  * Updates set the transition capture map only when a new subplan
1247  * is chosen. But for inserts, it is set for each row. So after
1248  * INSERT, we need to revert back to the map created for UPDATE;
1249  * otherwise the next UPDATE will incorrectly use the one created
1250  * for INSERT. So first save the one created for UPDATE.
1251  */
1252  if (mtstate->mt_transition_capture)
1253  saved_tcs_map = mtstate->mt_transition_capture->tcs_map;
1254 
1255  /*
1256  * resultRelInfo is one of the per-subplan resultRelInfos. So we
1257  * should convert the tuple into root's tuple descriptor, since
1258  * ExecInsert() starts the search from root. The tuple conversion
1259  * map list is in the order of mtstate->resultRelInfo[], so to
1260  * retrieve the one for this resultRel, we need to know the
1261  * position of the resultRel in mtstate->resultRelInfo[].
1262  */
1263  map_index = resultRelInfo - mtstate->resultRelInfo;
1264  Assert(map_index >= 0 && map_index < mtstate->mt_nplans);
1265  tupconv_map = tupconv_map_for_subplan(mtstate, map_index);
1266  if (tupconv_map != NULL)
1267  slot = execute_attr_map_slot(tupconv_map->attrMap,
1268  slot,
1269  mtstate->mt_root_tuple_slot);
1270 
1271  /*
1272  * Prepare for tuple routing, making it look like we're inserting
1273  * into the root.
1274  */
1275  Assert(mtstate->rootResultRelInfo != NULL);
1276  slot = ExecPrepareTupleRouting(mtstate, estate, proute,
1277  mtstate->rootResultRelInfo, slot);
1278 
1279  ret_slot = ExecInsert(mtstate, slot, planSlot,
1280  estate, canSetTag);
1281 
1282  /* Revert ExecPrepareTupleRouting's node change. */
1283  estate->es_result_relation_info = resultRelInfo;
1284  if (mtstate->mt_transition_capture)
1285  {
1287  mtstate->mt_transition_capture->tcs_map = saved_tcs_map;
1288  }
1289 
1290  return ret_slot;
1291  }
1292 
1293  /*
1294  * Check the constraints of the tuple. We've already checked the
1295  * partition constraint above; however, we must still ensure the tuple
1296  * passes all other constraints, so we will call ExecConstraints() and
1297  * have it validate all remaining checks.
1298  */
1299  if (resultRelationDesc->rd_att->constr)
1300  ExecConstraints(resultRelInfo, slot, estate);
1301 
1302  /*
1303  * replace the heap tuple
1304  *
1305  * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check
1306  * that the row to be updated is visible to that snapshot, and throw a
1307  * can't-serialize error if not. This is a special-case behavior
1308  * needed for referential integrity updates in transaction-snapshot
1309  * mode transactions.
1310  */
1311  result = table_tuple_update(resultRelationDesc, tupleid, slot,
1312  estate->es_output_cid,
1313  estate->es_snapshot,
1314  estate->es_crosscheck_snapshot,
1315  true /* wait for commit */ ,
1316  &tmfd, &lockmode, &update_indexes);
1317 
1318  switch (result)
1319  {
1320  case TM_SelfModified:
1321 
1322  /*
1323  * The target tuple was already updated or deleted by the
1324  * current command, or by a later command in the current
1325  * transaction. The former case is possible in a join UPDATE
1326  * where multiple tuples join to the same target tuple. This
1327  * is pretty questionable, but Postgres has always allowed it:
1328  * we just execute the first update action and ignore
1329  * additional update attempts.
1330  *
1331  * The latter case arises if the tuple is modified by a
1332  * command in a BEFORE trigger, or perhaps by a command in a
1333  * volatile function used in the query. In such situations we
1334  * should not ignore the update, but it is equally unsafe to
1335  * proceed. We don't want to discard the original UPDATE
1336  * while keeping the triggered actions based on it; and we
1337  * have no principled way to merge this update with the
1338  * previous ones. So throwing an error is the only safe
1339  * course.
1340  *
1341  * If a trigger actually intends this type of interaction, it
1342  * can re-execute the UPDATE (assuming it can figure out how)
1343  * and then return NULL to cancel the outer update.
1344  */
1345  if (tmfd.cmax != estate->es_output_cid)
1346  ereport(ERROR,
1347  (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1348  errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
1349  errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1350 
1351  /* Else, already updated by self; nothing to do */
1352  return NULL;
1353 
1354  case TM_Ok:
1355  break;
1356 
1357  case TM_Updated:
1358  {
1359  TupleTableSlot *inputslot;
1360  TupleTableSlot *epqslot;
1361 
1363  ereport(ERROR,
1364  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1365  errmsg("could not serialize access due to concurrent update")));
1366 
1367  /*
1368  * Already know that we're going to need to do EPQ, so
1369  * fetch tuple directly into the right slot.
1370  */
1371  inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
1372  resultRelInfo->ri_RangeTableIndex);
1373 
1374  result = table_tuple_lock(resultRelationDesc, tupleid,
1375  estate->es_snapshot,
1376  inputslot, estate->es_output_cid,
1377  lockmode, LockWaitBlock,
1379  &tmfd);
1380 
1381  switch (result)
1382  {
1383  case TM_Ok:
1384  Assert(tmfd.traversed);
1385 
1386  epqslot = EvalPlanQual(epqstate,
1387  resultRelationDesc,
1388  resultRelInfo->ri_RangeTableIndex,
1389  inputslot);
1390  if (TupIsNull(epqslot))
1391  /* Tuple not passing quals anymore, exiting... */
1392  return NULL;
1393 
1394  slot = ExecFilterJunk(resultRelInfo->ri_junkFilter, epqslot);
1395  goto lreplace;
1396 
1397  case TM_Deleted:
1398  /* tuple already deleted; nothing to do */
1399  return NULL;
1400 
1401  case TM_SelfModified:
1402 
1403  /*
1404  * This can be reached when following an update
1405  * chain from a tuple updated by another session,
1406  * reaching a tuple that was already updated in
1407  * this transaction. If previously modified by
1408  * this command, ignore the redundant update,
1409  * otherwise error out.
1410  *
1411  * See also TM_SelfModified response to
1412  * table_tuple_update() above.
1413  */
1414  if (tmfd.cmax != estate->es_output_cid)
1415  ereport(ERROR,
1416  (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1417  errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
1418  errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1419  return NULL;
1420 
1421  default:
1422  /* see table_tuple_lock call in ExecDelete() */
1423  elog(ERROR, "unexpected table_tuple_lock status: %u",
1424  result);
1425  return NULL;
1426  }
1427  }
1428 
1429  break;
1430 
1431  case TM_Deleted:
1433  ereport(ERROR,
1434  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1435  errmsg("could not serialize access due to concurrent delete")));
1436  /* tuple already deleted; nothing to do */
1437  return NULL;
1438 
1439  default:
1440  elog(ERROR, "unrecognized table_tuple_update status: %u",
1441  result);
1442  return NULL;
1443  }
1444 
1445  /* insert index entries for tuple if necessary */
1446  if (resultRelInfo->ri_NumIndices > 0 && update_indexes)
1447  recheckIndexes = ExecInsertIndexTuples(slot, estate, false, NULL, NIL);
1448  }
1449 
1450  if (canSetTag)
1451  (estate->es_processed)++;
1452 
1453  /* AFTER ROW UPDATE Triggers */
1454  ExecARUpdateTriggers(estate, resultRelInfo, tupleid, oldtuple, slot,
1455  recheckIndexes,
1456  mtstate->operation == CMD_INSERT ?
1457  mtstate->mt_oc_transition_capture :
1458  mtstate->mt_transition_capture);
1459 
1460  list_free(recheckIndexes);
1461 
1462  /*
1463  * Check any WITH CHECK OPTION constraints from parent views. We are
1464  * required to do this after testing all constraints and uniqueness
1465  * violations per the SQL spec, so we do it after actually updating the
1466  * record in the heap and all indexes.
1467  *
1468  * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
1469  * are looking for at this point.
1470  */
1471  if (resultRelInfo->ri_WithCheckOptions != NIL)
1472  ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1473 
1474  /* Process RETURNING if present */
1475  if (resultRelInfo->ri_projectReturning)
1476  return ExecProcessReturning(resultRelInfo, slot, planSlot);
1477 
1478  return NULL;
1479 }
1480 
1481 /*
1482  * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE
1483  *
1484  * Try to lock tuple for update as part of speculative insertion. If
1485  * a qual originating from ON CONFLICT DO UPDATE is satisfied, update
1486  * (but still lock row, even though it may not satisfy estate's
1487  * snapshot).
1488  *
1489  * Returns true if we're done (with or without an update), or false if
1490  * the caller must retry the INSERT from scratch.
1491  */
1492 static bool
1494  ResultRelInfo *resultRelInfo,
1495  ItemPointer conflictTid,
1496  TupleTableSlot *planSlot,
1497  TupleTableSlot *excludedSlot,
1498  EState *estate,
1499  bool canSetTag,
1500  TupleTableSlot **returning)
1501 {
1502  ExprContext *econtext = mtstate->ps.ps_ExprContext;
1503  Relation relation = resultRelInfo->ri_RelationDesc;
1504  ExprState *onConflictSetWhere = resultRelInfo->ri_onConflict->oc_WhereClause;
1505  TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing;
1506  TM_FailureData tmfd;
1507  LockTupleMode lockmode;
1508  TM_Result test;
1509  Datum xminDatum;
1510  TransactionId xmin;
1511  bool isnull;
1512 
1513  /* Determine lock mode to use */
1514  lockmode = ExecUpdateLockMode(estate, resultRelInfo);
1515 
1516  /*
1517  * Lock tuple for update. Don't follow updates when tuple cannot be
1518  * locked without doing so. A row locking conflict here means our
1519  * previous conclusion that the tuple is conclusively committed is not
1520  * true anymore.
1521  */
1522  test = table_tuple_lock(relation, conflictTid,
1523  estate->es_snapshot,
1524  existing, estate->es_output_cid,
1525  lockmode, LockWaitBlock, 0,
1526  &tmfd);
1527  switch (test)
1528  {
1529  case TM_Ok:
1530  /* success! */
1531  break;
1532 
1533  case TM_Invisible:
1534 
1535  /*
1536  * This can occur when a just inserted tuple is updated again in
1537  * the same command. E.g. because multiple rows with the same
1538  * conflicting key values are inserted.
1539  *
1540  * This is somewhat similar to the ExecUpdate() TM_SelfModified
1541  * case. We do not want to proceed because it would lead to the
1542  * same row being updated a second time in some unspecified order,
1543  * and in contrast to plain UPDATEs there's no historical behavior
1544  * to break.
1545  *
1546  * It is the user's responsibility to prevent this situation from
1547  * occurring. These problems are why SQL-2003 similarly specifies
1548  * that for SQL MERGE, an exception must be raised in the event of
1549  * an attempt to update the same row twice.
1550  */
1551  xminDatum = slot_getsysattr(existing,
1553  &isnull);
1554  Assert(!isnull);
1555  xmin = DatumGetTransactionId(xminDatum);
1556 
1558  ereport(ERROR,
1559  (errcode(ERRCODE_CARDINALITY_VIOLATION),
1560  errmsg("ON CONFLICT DO UPDATE command cannot affect row a second time"),
1561  errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
1562 
1563  /* This shouldn't happen */
1564  elog(ERROR, "attempted to lock invisible tuple");
1565  break;
1566 
1567  case TM_SelfModified:
1568 
1569  /*
1570  * This state should never be reached. As a dirty snapshot is used
1571  * to find conflicting tuples, speculative insertion wouldn't have
1572  * seen this row to conflict with.
1573  */
1574  elog(ERROR, "unexpected self-updated tuple");
1575  break;
1576 
1577  case TM_Updated:
1579  ereport(ERROR,
1580  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1581  errmsg("could not serialize access due to concurrent update")));
1582 
1583  /*
1584  * As long as we don't support an UPDATE of INSERT ON CONFLICT for
1585  * a partitioned table we shouldn't reach to a case where tuple to
1586  * be lock is moved to another partition due to concurrent update
1587  * of the partition key.
1588  */
1590 
1591  /*
1592  * Tell caller to try again from the very start.
1593  *
1594  * It does not make sense to use the usual EvalPlanQual() style
1595  * loop here, as the new version of the row might not conflict
1596  * anymore, or the conflicting tuple has actually been deleted.
1597  */
1598  ExecClearTuple(existing);
1599  return false;
1600 
1601  case TM_Deleted:
1603  ereport(ERROR,
1604  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1605  errmsg("could not serialize access due to concurrent delete")));
1606 
1607  /* see TM_Updated case */
1609  ExecClearTuple(existing);
1610  return false;
1611 
1612  default:
1613  elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
1614  }
1615 
1616  /* Success, the tuple is locked. */
1617 
1618  /*
1619  * Verify that the tuple is visible to our MVCC snapshot if the current
1620  * isolation level mandates that.
1621  *
1622  * It's not sufficient to rely on the check within ExecUpdate() as e.g.
1623  * CONFLICT ... WHERE clause may prevent us from reaching that.
1624  *
1625  * This means we only ever continue when a new command in the current
1626  * transaction could see the row, even though in READ COMMITTED mode the
1627  * tuple will not be visible according to the current statement's
1628  * snapshot. This is in line with the way UPDATE deals with newer tuple
1629  * versions.
1630  */
1631  ExecCheckTupleVisible(estate, relation, existing);
1632 
1633  /*
1634  * Make tuple and any needed join variables available to ExecQual and
1635  * ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while
1636  * the target's existing tuple is installed in the scantuple. EXCLUDED
1637  * has been made to reference INNER_VAR in setrefs.c, but there is no
1638  * other redirection.
1639  */
1640  econtext->ecxt_scantuple = existing;
1641  econtext->ecxt_innertuple = excludedSlot;
1642  econtext->ecxt_outertuple = NULL;
1643 
1644  if (!ExecQual(onConflictSetWhere, econtext))
1645  {
1646  ExecClearTuple(existing); /* see return below */
1647  InstrCountFiltered1(&mtstate->ps, 1);
1648  return true; /* done with the tuple */
1649  }
1650 
1651  if (resultRelInfo->ri_WithCheckOptions != NIL)
1652  {
1653  /*
1654  * Check target's existing tuple against UPDATE-applicable USING
1655  * security barrier quals (if any), enforced here as RLS checks/WCOs.
1656  *
1657  * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security
1658  * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK,
1659  * but that's almost the extent of its special handling for ON
1660  * CONFLICT DO UPDATE.
1661  *
1662  * The rewriter will also have associated UPDATE applicable straight
1663  * RLS checks/WCOs for the benefit of the ExecUpdate() call that
1664  * follows. INSERTs and UPDATEs naturally have mutually exclusive WCO
1665  * kinds, so there is no danger of spurious over-enforcement in the
1666  * INSERT or UPDATE path.
1667  */
1669  existing,
1670  mtstate->ps.state);
1671  }
1672 
1673  /* Project the new tuple version */
1674  ExecProject(resultRelInfo->ri_onConflict->oc_ProjInfo);
1675 
1676  /*
1677  * Note that it is possible that the target tuple has been modified in
1678  * this session, after the above table_tuple_lock. We choose to not error
1679  * out in that case, in line with ExecUpdate's treatment of similar cases.
1680  * This can happen if an UPDATE is triggered from within ExecQual(),
1681  * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
1682  * wCTE in the ON CONFLICT's SET.
1683  */
1684 
1685  /* Execute UPDATE with projection */
1686  *returning = ExecUpdate(mtstate, conflictTid, NULL,
1687  resultRelInfo->ri_onConflict->oc_ProjSlot,
1688  planSlot,
1689  &mtstate->mt_epqstate, mtstate->ps.state,
1690  canSetTag);
1691 
1692  /*
1693  * Clear out existing tuple, as there might not be another conflict among
1694  * the next input rows. Don't want to hold resources till the end of the
1695  * query.
1696  */
1697  ExecClearTuple(existing);
1698  return true;
1699 }
1700 
1701 
1702 /*
1703  * Process BEFORE EACH STATEMENT triggers
1704  */
1705 static void
1707 {
1708  ModifyTable *plan = (ModifyTable *) node->ps.plan;
1709  ResultRelInfo *resultRelInfo = node->resultRelInfo;
1710 
1711  /*
1712  * If the node modifies a partitioned table, we must fire its triggers.
1713  * Note that in that case, node->resultRelInfo points to the first leaf
1714  * partition, not the root table.
1715  */
1716  if (node->rootResultRelInfo != NULL)
1717  resultRelInfo = node->rootResultRelInfo;
1718 
1719  switch (node->operation)
1720  {
1721  case CMD_INSERT:
1722  ExecBSInsertTriggers(node->ps.state, resultRelInfo);
1723  if (plan->onConflictAction == ONCONFLICT_UPDATE)
1725  resultRelInfo);
1726  break;
1727  case CMD_UPDATE:
1728  ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
1729  break;
1730  case CMD_DELETE:
1731  ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
1732  break;
1733  default:
1734  elog(ERROR, "unknown operation");
1735  break;
1736  }
1737 }
1738 
1739 /*
1740  * Return the target rel ResultRelInfo.
1741  *
1742  * This relation is the same as :
1743  * - the relation for which we will fire AFTER STATEMENT triggers.
1744  * - the relation into whose tuple format all captured transition tuples must
1745  * be converted.
1746  * - the root partitioned table.
1747  */
1748 static ResultRelInfo *
1750 {
1751  /*
1752  * Note that if the node modifies a partitioned table, node->resultRelInfo
1753  * points to the first leaf partition, not the root table.
1754  */
1755  if (node->rootResultRelInfo != NULL)
1756  return node->rootResultRelInfo;
1757  else
1758  return node->resultRelInfo;
1759 }
1760 
1761 /*
1762  * Process AFTER EACH STATEMENT triggers
1763  */
1764 static void
1766 {
1767  ModifyTable *plan = (ModifyTable *) node->ps.plan;
1768  ResultRelInfo *resultRelInfo = getTargetResultRelInfo(node);
1769 
1770  switch (node->operation)
1771  {
1772  case CMD_INSERT:
1773  if (plan->onConflictAction == ONCONFLICT_UPDATE)
1775  resultRelInfo,
1776  node->mt_oc_transition_capture);
1777  ExecASInsertTriggers(node->ps.state, resultRelInfo,
1778  node->mt_transition_capture);
1779  break;
1780  case CMD_UPDATE:
1781  ExecASUpdateTriggers(node->ps.state, resultRelInfo,
1782  node->mt_transition_capture);
1783  break;
1784  case CMD_DELETE:
1785  ExecASDeleteTriggers(node->ps.state, resultRelInfo,
1786  node->mt_transition_capture);
1787  break;
1788  default:
1789  elog(ERROR, "unknown operation");
1790  break;
1791  }
1792 }
1793 
1794 /*
1795  * Set up the state needed for collecting transition tuples for AFTER
1796  * triggers.
1797  */
1798 static void
1800 {
1801  ModifyTable *plan = (ModifyTable *) mtstate->ps.plan;
1802  ResultRelInfo *targetRelInfo = getTargetResultRelInfo(mtstate);
1803 
1804  /* Check for transition tables on the directly targeted relation. */
1805  mtstate->mt_transition_capture =
1806  MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
1807  RelationGetRelid(targetRelInfo->ri_RelationDesc),
1808  mtstate->operation);
1809  if (plan->operation == CMD_INSERT &&
1811  mtstate->mt_oc_transition_capture =
1812  MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
1813  RelationGetRelid(targetRelInfo->ri_RelationDesc),
1814  CMD_UPDATE);
1815 
1816  /*
1817  * If we found that we need to collect transition tuples then we may also
1818  * need tuple conversion maps for any children that have TupleDescs that
1819  * aren't compatible with the tuplestores. (We can share these maps
1820  * between the regular and ON CONFLICT cases.)
1821  */
1822  if (mtstate->mt_transition_capture != NULL ||
1823  mtstate->mt_oc_transition_capture != NULL)
1824  {
1826 
1827  /*
1828  * Install the conversion map for the first plan for UPDATE and DELETE
1829  * operations. It will be advanced each time we switch to the next
1830  * plan. (INSERT operations set it every time, so we need not update
1831  * mtstate->mt_oc_transition_capture here.)
1832  */
1833  if (mtstate->mt_transition_capture && mtstate->operation != CMD_INSERT)
1834  mtstate->mt_transition_capture->tcs_map =
1835  tupconv_map_for_subplan(mtstate, 0);
1836  }
1837 }
1838 
1839 /*
1840  * ExecPrepareTupleRouting --- prepare for routing one tuple
1841  *
1842  * Determine the partition in which the tuple in slot is to be inserted,
1843  * and modify mtstate and estate to prepare for it.
1844  *
1845  * Caller must revert the estate changes after executing the insertion!
1846  * In mtstate, transition capture changes may also need to be reverted.
1847  *
1848  * Returns a slot holding the tuple of the partition rowtype.
1849  */
1850 static TupleTableSlot *
1852  EState *estate,
1853  PartitionTupleRouting *proute,
1854  ResultRelInfo *targetRelInfo,
1855  TupleTableSlot *slot)
1856 {
1857  ResultRelInfo *partrel;
1858  PartitionRoutingInfo *partrouteinfo;
1859  TupleConversionMap *map;
1860 
1861  /*
1862  * Lookup the target partition's ResultRelInfo. If ExecFindPartition does
1863  * not find a valid partition for the tuple in 'slot' then an error is
1864  * raised. An error may also be raised if the found partition is not a
1865  * valid target for INSERTs. This is required since a partitioned table
1866  * UPDATE to another partition becomes a DELETE+INSERT.
1867  */
1868  partrel = ExecFindPartition(mtstate, targetRelInfo, proute, slot, estate);
1869  partrouteinfo = partrel->ri_PartitionInfo;
1870  Assert(partrouteinfo != NULL);
1871 
1872  /*
1873  * Make it look like we are inserting into the partition.
1874  */
1875  estate->es_result_relation_info = partrel;
1876 
1877  /*
1878  * If we're capturing transition tuples, we might need to convert from the
1879  * partition rowtype to root partitioned table's rowtype.
1880  */
1881  if (mtstate->mt_transition_capture != NULL)
1882  {
1883  if (partrel->ri_TrigDesc &&
1885  {
1886  /*
1887  * If there are any BEFORE triggers on the partition, we'll have
1888  * to be ready to convert their result back to tuplestore format.
1889  */
1891  mtstate->mt_transition_capture->tcs_map =
1892  partrouteinfo->pi_PartitionToRootMap;
1893  }
1894  else
1895  {
1896  /*
1897  * Otherwise, just remember the original unconverted tuple, to
1898  * avoid a needless round trip conversion.
1899  */
1901  mtstate->mt_transition_capture->tcs_map = NULL;
1902  }
1903  }
1904  if (mtstate->mt_oc_transition_capture != NULL)
1905  {
1906  mtstate->mt_oc_transition_capture->tcs_map =
1907  partrouteinfo->pi_PartitionToRootMap;
1908  }
1909 
1910  /*
1911  * Convert the tuple, if necessary.
1912  */
1913  map = partrouteinfo->pi_RootToPartitionMap;
1914  if (map != NULL)
1915  {
1916  TupleTableSlot *new_slot = partrouteinfo->pi_PartitionTupleSlot;
1917 
1918  slot = execute_attr_map_slot(map->attrMap, slot, new_slot);
1919  }
1920 
1921  return slot;
1922 }
1923 
1924 /*
1925  * Initialize the child-to-root tuple conversion map array for UPDATE subplans.
1926  *
1927  * This map array is required to convert the tuple from the subplan result rel
1928  * to the target table descriptor. This requirement arises for two independent
1929  * scenarios:
1930  * 1. For update-tuple-routing.
1931  * 2. For capturing tuples in transition tables.
1932  */
1933 static void
1935 {
1936  ResultRelInfo *targetRelInfo = getTargetResultRelInfo(mtstate);
1937  ResultRelInfo *resultRelInfos = mtstate->resultRelInfo;
1938  TupleDesc outdesc;
1939  int numResultRelInfos = mtstate->mt_nplans;
1940  int i;
1941 
1942  /*
1943  * Build array of conversion maps from each child's TupleDesc to the one
1944  * used in the target relation. The map pointers may be NULL when no
1945  * conversion is necessary, which is hopefully a common case.
1946  */
1947 
1948  /* Get tuple descriptor of the target rel. */
1949  outdesc = RelationGetDescr(targetRelInfo->ri_RelationDesc);
1950 
1952  palloc(sizeof(TupleConversionMap *) * numResultRelInfos);
1953 
1954  for (i = 0; i < numResultRelInfos; ++i)
1955  {
1956  mtstate->mt_per_subplan_tupconv_maps[i] =
1957  convert_tuples_by_name(RelationGetDescr(resultRelInfos[i].ri_RelationDesc),
1958  outdesc);
1959  }
1960 }
1961 
1962 /*
1963  * For a given subplan index, get the tuple conversion map.
1964  */
1965 static TupleConversionMap *
1967 {
1968  /* If nobody else set the per-subplan array of maps, do so ourselves. */
1969  if (mtstate->mt_per_subplan_tupconv_maps == NULL)
1971 
1972  Assert(whichplan >= 0 && whichplan < mtstate->mt_nplans);
1973  return mtstate->mt_per_subplan_tupconv_maps[whichplan];
1974 }
1975 
1976 /* ----------------------------------------------------------------
1977  * ExecModifyTable
1978  *
1979  * Perform table modifications as required, and return RETURNING results
1980  * if needed.
1981  * ----------------------------------------------------------------
1982  */
1983 static TupleTableSlot *
1985 {
1986  ModifyTableState *node = castNode(ModifyTableState, pstate);
1988  EState *estate = node->ps.state;
1989  CmdType operation = node->operation;
1990  ResultRelInfo *saved_resultRelInfo;
1991  ResultRelInfo *resultRelInfo;
1992  PlanState *subplanstate;
1993  JunkFilter *junkfilter;
1994  TupleTableSlot *slot;
1995  TupleTableSlot *planSlot;
1996  ItemPointer tupleid;
1997  ItemPointerData tuple_ctid;
1998  HeapTupleData oldtupdata;
1999  HeapTuple oldtuple;
2000 
2002 
2003  /*
2004  * This should NOT get called during EvalPlanQual; we should have passed a
2005  * subplan tree to EvalPlanQual, instead. Use a runtime test not just
2006  * Assert because this condition is easy to miss in testing. (Note:
2007  * although ModifyTable should not get executed within an EvalPlanQual
2008  * operation, we do have to allow it to be initialized and shut down in
2009  * case it is within a CTE subplan. Hence this test must be here, not in
2010  * ExecInitModifyTable.)
2011  */
2012  if (estate->es_epq_active != NULL)
2013  elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
2014 
2015  /*
2016  * If we've already completed processing, don't try to do more. We need
2017  * this test because ExecPostprocessPlan might call us an extra time, and
2018  * our subplan's nodes aren't necessarily robust against being called
2019  * extra times.
2020  */
2021  if (node->mt_done)
2022  return NULL;
2023 
2024  /*
2025  * On first call, fire BEFORE STATEMENT triggers before proceeding.
2026  */
2027  if (node->fireBSTriggers)
2028  {
2029  fireBSTriggers(node);
2030  node->fireBSTriggers = false;
2031  }
2032 
2033  /* Preload local variables */
2034  resultRelInfo = node->resultRelInfo + node->mt_whichplan;
2035  subplanstate = node->mt_plans[node->mt_whichplan];
2036  junkfilter = resultRelInfo->ri_junkFilter;
2037 
2038  /*
2039  * es_result_relation_info must point to the currently active result
2040  * relation while we are within this ModifyTable node. Even though
2041  * ModifyTable nodes can't be nested statically, they can be nested
2042  * dynamically (since our subplan could include a reference to a modifying
2043  * CTE). So we have to save and restore the caller's value.
2044  */
2045  saved_resultRelInfo = estate->es_result_relation_info;
2046 
2047  estate->es_result_relation_info = resultRelInfo;
2048 
2049  /*
2050  * Fetch rows from subplan(s), and execute the required table modification
2051  * for each row.
2052  */
2053  for (;;)
2054  {
2055  /*
2056  * Reset the per-output-tuple exprcontext. This is needed because
2057  * triggers expect to use that context as workspace. It's a bit ugly
2058  * to do this below the top level of the plan, however. We might need
2059  * to rethink this later.
2060  */
2061  ResetPerTupleExprContext(estate);
2062 
2063  /*
2064  * Reset per-tuple memory context used for processing on conflict and
2065  * returning clauses, to free any expression evaluation storage
2066  * allocated in the previous cycle.
2067  */
2068  if (pstate->ps_ExprContext)
2070 
2071  planSlot = ExecProcNode(subplanstate);
2072 
2073  if (TupIsNull(planSlot))
2074  {
2075  /* advance to next subplan if any */
2076  node->mt_whichplan++;
2077  if (node->mt_whichplan < node->mt_nplans)
2078  {
2079  resultRelInfo++;
2080  subplanstate = node->mt_plans[node->mt_whichplan];
2081  junkfilter = resultRelInfo->ri_junkFilter;
2082  estate->es_result_relation_info = resultRelInfo;
2083  EvalPlanQualSetPlan(&node->mt_epqstate, subplanstate->plan,
2084  node->mt_arowmarks[node->mt_whichplan]);
2085  /* Prepare to convert transition tuples from this child. */
2086  if (node->mt_transition_capture != NULL)
2087  {
2090  }
2091  if (node->mt_oc_transition_capture != NULL)
2092  {
2095  }
2096  continue;
2097  }
2098  else
2099  break;
2100  }
2101 
2102  /*
2103  * Ensure input tuple is the right format for the target relation.
2104  */
2105  if (node->mt_scans[node->mt_whichplan]->tts_ops != planSlot->tts_ops)
2106  {
2107  ExecCopySlot(node->mt_scans[node->mt_whichplan], planSlot);
2108  planSlot = node->mt_scans[node->mt_whichplan];
2109  }
2110 
2111  /*
2112  * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do
2113  * here is compute the RETURNING expressions.
2114  */
2115  if (resultRelInfo->ri_usesFdwDirectModify)
2116  {
2117  Assert(resultRelInfo->ri_projectReturning);
2118 
2119  /*
2120  * A scan slot containing the data that was actually inserted,
2121  * updated or deleted has already been made available to
2122  * ExecProcessReturning by IterateDirectModify, so no need to
2123  * provide it here.
2124  */
2125  slot = ExecProcessReturning(resultRelInfo, NULL, planSlot);
2126 
2127  estate->es_result_relation_info = saved_resultRelInfo;
2128  return slot;
2129  }
2130 
2131  EvalPlanQualSetSlot(&node->mt_epqstate, planSlot);
2132  slot = planSlot;
2133 
2134  tupleid = NULL;
2135  oldtuple = NULL;
2136  if (junkfilter != NULL)
2137  {
2138  /*
2139  * extract the 'ctid' or 'wholerow' junk attribute.
2140  */
2141  if (operation == CMD_UPDATE || operation == CMD_DELETE)
2142  {
2143  char relkind;
2144  Datum datum;
2145  bool isNull;
2146 
2147  relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
2148  if (relkind == RELKIND_RELATION || relkind == RELKIND_MATVIEW)
2149  {
2150  datum = ExecGetJunkAttribute(slot,
2151  junkfilter->jf_junkAttNo,
2152  &isNull);
2153  /* shouldn't ever get a null result... */
2154  if (isNull)
2155  elog(ERROR, "ctid is NULL");
2156 
2157  tupleid = (ItemPointer) DatumGetPointer(datum);
2158  tuple_ctid = *tupleid; /* be sure we don't free ctid!! */
2159  tupleid = &tuple_ctid;
2160  }
2161 
2162  /*
2163  * Use the wholerow attribute, when available, to reconstruct
2164  * the old relation tuple.
2165  *
2166  * Foreign table updates have a wholerow attribute when the
2167  * relation has a row-level trigger. Note that the wholerow
2168  * attribute does not carry system columns. Foreign table
2169  * triggers miss seeing those, except that we know enough here
2170  * to set t_tableOid. Quite separately from this, the FDW may
2171  * fetch its own junk attrs to identify the row.
2172  *
2173  * Other relevant relkinds, currently limited to views, always
2174  * have a wholerow attribute.
2175  */
2176  else if (AttributeNumberIsValid(junkfilter->jf_junkAttNo))
2177  {
2178  datum = ExecGetJunkAttribute(slot,
2179  junkfilter->jf_junkAttNo,
2180  &isNull);
2181  /* shouldn't ever get a null result... */
2182  if (isNull)
2183  elog(ERROR, "wholerow is NULL");
2184 
2185  oldtupdata.t_data = DatumGetHeapTupleHeader(datum);
2186  oldtupdata.t_len =
2188  ItemPointerSetInvalid(&(oldtupdata.t_self));
2189  /* Historically, view triggers see invalid t_tableOid. */
2190  oldtupdata.t_tableOid =
2191  (relkind == RELKIND_VIEW) ? InvalidOid :
2192  RelationGetRelid(resultRelInfo->ri_RelationDesc);
2193 
2194  oldtuple = &oldtupdata;
2195  }
2196  else
2197  Assert(relkind == RELKIND_FOREIGN_TABLE);
2198  }
2199 
2200  /*
2201  * apply the junkfilter if needed.
2202  */
2203  if (operation != CMD_DELETE)
2204  slot = ExecFilterJunk(junkfilter, slot);
2205  }
2206 
2207  switch (operation)
2208  {
2209  case CMD_INSERT:
2210  /* Prepare for tuple routing if needed. */
2211  if (proute)
2212  slot = ExecPrepareTupleRouting(node, estate, proute,
2213  resultRelInfo, slot);
2214  slot = ExecInsert(node, slot, planSlot,
2215  estate, node->canSetTag);
2216  /* Revert ExecPrepareTupleRouting's state change. */
2217  if (proute)
2218  estate->es_result_relation_info = resultRelInfo;
2219  break;
2220  case CMD_UPDATE:
2221  slot = ExecUpdate(node, tupleid, oldtuple, slot, planSlot,
2222  &node->mt_epqstate, estate, node->canSetTag);
2223  break;
2224  case CMD_DELETE:
2225  slot = ExecDelete(node, tupleid, oldtuple, planSlot,
2226  &node->mt_epqstate, estate,
2227  true, node->canSetTag,
2228  false /* changingPart */ , NULL, NULL);
2229  break;
2230  default:
2231  elog(ERROR, "unknown operation");
2232  break;
2233  }
2234 
2235  /*
2236  * If we got a RETURNING result, return it to caller. We'll continue
2237  * the work on next call.
2238  */
2239  if (slot)
2240  {
2241  estate->es_result_relation_info = saved_resultRelInfo;
2242  return slot;
2243  }
2244  }
2245 
2246  /* Restore es_result_relation_info before exiting */
2247  estate->es_result_relation_info = saved_resultRelInfo;
2248 
2249  /*
2250  * We're done, but fire AFTER STATEMENT triggers before exiting.
2251  */
2252  fireASTriggers(node);
2253 
2254  node->mt_done = true;
2255 
2256  return NULL;
2257 }
2258 
2259 /* ----------------------------------------------------------------
2260  * ExecInitModifyTable
2261  * ----------------------------------------------------------------
2262  */
2264 ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
2265 {
2266  ModifyTableState *mtstate;
2267  CmdType operation = node->operation;
2268  int nplans = list_length(node->plans);
2269  ResultRelInfo *saved_resultRelInfo;
2270  ResultRelInfo *resultRelInfo;
2271  Plan *subplan;
2272  ListCell *l;
2273  int i;
2274  Relation rel;
2275  bool update_tuple_routing_needed = node->partColsUpdated;
2276 
2277  /* check for unsupported flags */
2278  Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
2279 
2280  /*
2281  * create state structure
2282  */
2283  mtstate = makeNode(ModifyTableState);
2284  mtstate->ps.plan = (Plan *) node;
2285  mtstate->ps.state = estate;
2286  mtstate->ps.ExecProcNode = ExecModifyTable;
2287 
2288  mtstate->operation = operation;
2289  mtstate->canSetTag = node->canSetTag;
2290  mtstate->mt_done = false;
2291 
2292  mtstate->mt_plans = (PlanState **) palloc0(sizeof(PlanState *) * nplans);
2293  mtstate->resultRelInfo = estate->es_result_relations + node->resultRelIndex;
2294  mtstate->mt_scans = (TupleTableSlot **) palloc0(sizeof(TupleTableSlot *) * nplans);
2295 
2296  /* If modifying a partitioned table, initialize the root table info */
2297  if (node->rootResultRelIndex >= 0)
2298  mtstate->rootResultRelInfo = estate->es_root_result_relations +
2299  node->rootResultRelIndex;
2300 
2301  mtstate->mt_arowmarks = (List **) palloc0(sizeof(List *) * nplans);
2302  mtstate->mt_nplans = nplans;
2303 
2304  /* set up epqstate with dummy subplan data for the moment */
2305  EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL, node->epqParam);
2306  mtstate->fireBSTriggers = true;
2307 
2308  /*
2309  * call ExecInitNode on each of the plans to be executed and save the
2310  * results into the array "mt_plans". This is also a convenient place to
2311  * verify that the proposed target relations are valid and open their
2312  * indexes for insertion of new index entries. Note we *must* set
2313  * estate->es_result_relation_info correctly while we initialize each
2314  * sub-plan; external modules such as FDWs may depend on that (see
2315  * contrib/postgres_fdw/postgres_fdw.c: postgresBeginDirectModify() as one
2316  * example).
2317  */
2318  saved_resultRelInfo = estate->es_result_relation_info;
2319 
2320  resultRelInfo = mtstate->resultRelInfo;
2321  i = 0;
2322  foreach(l, node->plans)
2323  {
2324  subplan = (Plan *) lfirst(l);
2325 
2326  /* Initialize the usesFdwDirectModify flag */
2327  resultRelInfo->ri_usesFdwDirectModify = bms_is_member(i,
2328  node->fdwDirectModifyPlans);
2329 
2330  /*
2331  * Verify result relation is a valid target for the current operation
2332  */
2333  CheckValidResultRel(resultRelInfo, operation);
2334 
2335  /*
2336  * If there are indices on the result relation, open them and save
2337  * descriptors in the result relation info, so that we can add new
2338  * index entries for the tuples we add/update. We need not do this
2339  * for a DELETE, however, since deletion doesn't affect indexes. Also,
2340  * inside an EvalPlanQual operation, the indexes might be open
2341  * already, since we share the resultrel state with the original
2342  * query.
2343  */
2344  if (resultRelInfo->ri_RelationDesc->rd_rel->relhasindex &&
2345  operation != CMD_DELETE &&
2346  resultRelInfo->ri_IndexRelationDescs == NULL)
2347  ExecOpenIndices(resultRelInfo,
2349 
2350  /*
2351  * If this is an UPDATE and a BEFORE UPDATE trigger is present, the
2352  * trigger itself might modify the partition-key values. So arrange
2353  * for tuple routing.
2354  */
2355  if (resultRelInfo->ri_TrigDesc &&
2356  resultRelInfo->ri_TrigDesc->trig_update_before_row &&
2357  operation == CMD_UPDATE)
2358  update_tuple_routing_needed = true;
2359 
2360  /* Now init the plan for this result rel */
2361  estate->es_result_relation_info = resultRelInfo;
2362  mtstate->mt_plans[i] = ExecInitNode(subplan, estate, eflags);
2363  mtstate->mt_scans[i] =
2364  ExecInitExtraTupleSlot(mtstate->ps.state, ExecGetResultType(mtstate->mt_plans[i]),
2365  table_slot_callbacks(resultRelInfo->ri_RelationDesc));
2366 
2367  /* Also let FDWs init themselves for foreign-table result rels */
2368  if (!resultRelInfo->ri_usesFdwDirectModify &&
2369  resultRelInfo->ri_FdwRoutine != NULL &&
2370  resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL)
2371  {
2372  List *fdw_private = (List *) list_nth(node->fdwPrivLists, i);
2373 
2374  resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate,
2375  resultRelInfo,
2376  fdw_private,
2377  i,
2378  eflags);
2379  }
2380 
2381  resultRelInfo++;
2382  i++;
2383  }
2384 
2385  estate->es_result_relation_info = saved_resultRelInfo;
2386 
2387  /* Get the target relation */
2388  rel = (getTargetResultRelInfo(mtstate))->ri_RelationDesc;
2389 
2390  /*
2391  * If it's not a partitioned table after all, UPDATE tuple routing should
2392  * not be attempted.
2393  */
2394  if (rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
2395  update_tuple_routing_needed = false;
2396 
2397  /*
2398  * Build state for tuple routing if it's an INSERT or if it's an UPDATE of
2399  * partition key.
2400  */
2401  if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
2402  (operation == CMD_INSERT || update_tuple_routing_needed))
2403  mtstate->mt_partition_tuple_routing =
2404  ExecSetupPartitionTupleRouting(estate, mtstate, rel);
2405 
2406  /*
2407  * Build state for collecting transition tuples. This requires having a
2408  * valid trigger query context, so skip it in explain-only mode.
2409  */
2410  if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
2411  ExecSetupTransitionCaptureState(mtstate, estate);
2412 
2413  /*
2414  * Construct mapping from each of the per-subplan partition attnos to the
2415  * root attno. This is required when during update row movement the tuple
2416  * descriptor of a source partition does not match the root partitioned
2417  * table descriptor. In such a case we need to convert tuples to the root
2418  * tuple descriptor, because the search for destination partition starts
2419  * from the root. We'll also need a slot to store these converted tuples.
2420  * We can skip this setup if it's not a partition key update.
2421  */
2422  if (update_tuple_routing_needed)
2423  {
2425  mtstate->mt_root_tuple_slot = table_slot_create(rel, NULL);
2426  }
2427 
2428  /*
2429  * Initialize any WITH CHECK OPTION constraints if needed.
2430  */
2431  resultRelInfo = mtstate->resultRelInfo;
2432  i = 0;
2433  foreach(l, node->withCheckOptionLists)
2434  {
2435  List *wcoList = (List *) lfirst(l);
2436  List *wcoExprs = NIL;
2437  ListCell *ll;
2438 
2439  foreach(ll, wcoList)
2440  {
2441  WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
2442  ExprState *wcoExpr = ExecInitQual((List *) wco->qual,
2443  &mtstate->ps);
2444 
2445  wcoExprs = lappend(wcoExprs, wcoExpr);
2446  }
2447 
2448  resultRelInfo->ri_WithCheckOptions = wcoList;
2449  resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
2450  resultRelInfo++;
2451  i++;
2452  }
2453 
2454  /*
2455  * Initialize RETURNING projections if needed.
2456  */
2457  if (node->returningLists)
2458  {
2459  TupleTableSlot *slot;
2460  ExprContext *econtext;
2461 
2462  /*
2463  * Initialize result tuple slot and assign its rowtype using the first
2464  * RETURNING list. We assume the rest will look the same.
2465  */
2466  mtstate->ps.plan->targetlist = (List *) linitial(node->returningLists);
2467 
2468  /* Set up a slot for the output of the RETURNING projection(s) */
2470  slot = mtstate->ps.ps_ResultTupleSlot;
2471 
2472  /* Need an econtext too */
2473  if (mtstate->ps.ps_ExprContext == NULL)
2474  ExecAssignExprContext(estate, &mtstate->ps);
2475  econtext = mtstate->ps.ps_ExprContext;
2476 
2477  /*
2478  * Build a projection for each result rel.
2479  */
2480  resultRelInfo = mtstate->resultRelInfo;
2481  foreach(l, node->returningLists)
2482  {
2483  List *rlist = (List *) lfirst(l);
2484 
2485  resultRelInfo->ri_returningList = rlist;
2486  resultRelInfo->ri_projectReturning =
2487  ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps,
2488  resultRelInfo->ri_RelationDesc->rd_att);
2489  resultRelInfo++;
2490  }
2491  }
2492  else
2493  {
2494  /*
2495  * We still must construct a dummy result tuple type, because InitPlan
2496  * expects one (maybe should change that?).
2497  */
2498  mtstate->ps.plan->targetlist = NIL;
2499  ExecInitResultTypeTL(&mtstate->ps);
2500 
2501  mtstate->ps.ps_ExprContext = NULL;
2502  }
2503 
2504  /* Set the list of arbiter indexes if needed for ON CONFLICT */
2505  resultRelInfo = mtstate->resultRelInfo;
2506  if (node->onConflictAction != ONCONFLICT_NONE)
2507  resultRelInfo->ri_onConflictArbiterIndexes = node->arbiterIndexes;
2508 
2509  /*
2510  * If needed, Initialize target list, projection and qual for ON CONFLICT
2511  * DO UPDATE.
2512  */
2513  if (node->onConflictAction == ONCONFLICT_UPDATE)
2514  {
2515  ExprContext *econtext;
2516  TupleDesc relationDesc;
2517  TupleDesc tupDesc;
2518 
2519  /* insert may only have one plan, inheritance is not expanded */
2520  Assert(nplans == 1);
2521 
2522  /* already exists if created by RETURNING processing above */
2523  if (mtstate->ps.ps_ExprContext == NULL)
2524  ExecAssignExprContext(estate, &mtstate->ps);
2525 
2526  econtext = mtstate->ps.ps_ExprContext;
2527  relationDesc = resultRelInfo->ri_RelationDesc->rd_att;
2528 
2529  /* create state for DO UPDATE SET operation */
2530  resultRelInfo->ri_onConflict = makeNode(OnConflictSetState);
2531 
2532  /* initialize slot for the existing tuple */
2533  resultRelInfo->ri_onConflict->oc_Existing =
2534  table_slot_create(resultRelInfo->ri_RelationDesc,
2535  &mtstate->ps.state->es_tupleTable);
2536 
2537  /*
2538  * Create the tuple slot for the UPDATE SET projection. We want a slot
2539  * of the table's type here, because the slot will be used to insert
2540  * into the table, and for RETURNING processing - which may access
2541  * system attributes.
2542  */
2543  tupDesc = ExecTypeFromTL((List *) node->onConflictSet);
2544  resultRelInfo->ri_onConflict->oc_ProjSlot =
2545  ExecInitExtraTupleSlot(mtstate->ps.state, tupDesc,
2546  table_slot_callbacks(resultRelInfo->ri_RelationDesc));
2547 
2548  /* build UPDATE SET projection state */
2549  resultRelInfo->ri_onConflict->oc_ProjInfo =
2550  ExecBuildProjectionInfo(node->onConflictSet, econtext,
2551  resultRelInfo->ri_onConflict->oc_ProjSlot,
2552  &mtstate->ps,
2553  relationDesc);
2554 
2555  /* initialize state to evaluate the WHERE clause, if any */
2556  if (node->onConflictWhere)
2557  {
2558  ExprState *qualexpr;
2559 
2560  qualexpr = ExecInitQual((List *) node->onConflictWhere,
2561  &mtstate->ps);
2562  resultRelInfo->ri_onConflict->oc_WhereClause = qualexpr;
2563  }
2564  }
2565 
2566  /*
2567  * If we have any secondary relations in an UPDATE or DELETE, they need to
2568  * be treated like non-locked relations in SELECT FOR UPDATE, ie, the
2569  * EvalPlanQual mechanism needs to be told about them. Locate the
2570  * relevant ExecRowMarks.
2571  */
2572  foreach(l, node->rowMarks)
2573  {
2575  ExecRowMark *erm;
2576 
2577  /* ignore "parent" rowmarks; they are irrelevant at runtime */
2578  if (rc->isParent)
2579  continue;
2580 
2581  /* find ExecRowMark (same for all subplans) */
2582  erm = ExecFindRowMark(estate, rc->rti, false);
2583 
2584  /* build ExecAuxRowMark for each subplan */
2585  for (i = 0; i < nplans; i++)
2586  {
2587  ExecAuxRowMark *aerm;
2588 
2589  subplan = mtstate->mt_plans[i]->plan;
2590  aerm = ExecBuildAuxRowMark(erm, subplan->targetlist);
2591  mtstate->mt_arowmarks[i] = lappend(mtstate->mt_arowmarks[i], aerm);
2592  }
2593  }
2594 
2595  /* select first subplan */
2596  mtstate->mt_whichplan = 0;
2597  subplan = (Plan *) linitial(node->plans);
2598  EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan,
2599  mtstate->mt_arowmarks[0]);
2600 
2601  /*
2602  * Initialize the junk filter(s) if needed. INSERT queries need a filter
2603  * if there are any junk attrs in the tlist. UPDATE and DELETE always
2604  * need a filter, since there's always at least one junk attribute present
2605  * --- no need to look first. Typically, this will be a 'ctid' or
2606  * 'wholerow' attribute, but in the case of a foreign data wrapper it
2607  * might be a set of junk attributes sufficient to identify the remote
2608  * row.
2609  *
2610  * If there are multiple result relations, each one needs its own junk
2611  * filter. Note multiple rels are only possible for UPDATE/DELETE, so we
2612  * can't be fooled by some needing a filter and some not.
2613  *
2614  * This section of code is also a convenient place to verify that the
2615  * output of an INSERT or UPDATE matches the target table(s).
2616  */
2617  {
2618  bool junk_filter_needed = false;
2619 
2620  switch (operation)
2621  {
2622  case CMD_INSERT:
2623  foreach(l, subplan->targetlist)
2624  {
2625  TargetEntry *tle = (TargetEntry *) lfirst(l);
2626 
2627  if (tle->resjunk)
2628  {
2629  junk_filter_needed = true;
2630  break;
2631  }
2632  }
2633  break;
2634  case CMD_UPDATE:
2635  case CMD_DELETE:
2636  junk_filter_needed = true;
2637  break;
2638  default:
2639  elog(ERROR, "unknown operation");
2640  break;
2641  }
2642 
2643  if (junk_filter_needed)
2644  {
2645  resultRelInfo = mtstate->resultRelInfo;
2646  for (i = 0; i < nplans; i++)
2647  {
2648  JunkFilter *j;
2649  TupleTableSlot *junkresslot;
2650 
2651  subplan = mtstate->mt_plans[i]->plan;
2652  if (operation == CMD_INSERT || operation == CMD_UPDATE)
2653  ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc,
2654  subplan->targetlist);
2655 
2656  junkresslot =
2657  ExecInitExtraTupleSlot(estate, NULL,
2658  table_slot_callbacks(resultRelInfo->ri_RelationDesc));
2659  j = ExecInitJunkFilter(subplan->targetlist,
2660  junkresslot);
2661 
2662  if (operation == CMD_UPDATE || operation == CMD_DELETE)
2663  {
2664  /* For UPDATE/DELETE, find the appropriate junk attr now */
2665  char relkind;
2666 
2667  relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
2668  if (relkind == RELKIND_RELATION ||
2669  relkind == RELKIND_MATVIEW ||
2670  relkind == RELKIND_PARTITIONED_TABLE)
2671  {
2672  j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
2674  elog(ERROR, "could not find junk ctid column");
2675  }
2676  else if (relkind == RELKIND_FOREIGN_TABLE)
2677  {
2678  /*
2679  * When there is a row-level trigger, there should be
2680  * a wholerow attribute.
2681  */
2682  j->jf_junkAttNo = ExecFindJunkAttribute(j, "wholerow");
2683  }
2684  else
2685  {
2686  j->jf_junkAttNo = ExecFindJunkAttribute(j, "wholerow");
2688  elog(ERROR, "could not find junk wholerow column");
2689  }
2690  }
2691 
2692  resultRelInfo->ri_junkFilter = j;
2693  resultRelInfo++;
2694  }
2695  }
2696  else
2697  {
2698  if (operation == CMD_INSERT)
2700  subplan->targetlist);
2701  }
2702  }
2703 
2704  /*
2705  * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
2706  * to estate->es_auxmodifytables so that it will be run to completion by
2707  * ExecPostprocessPlan. (It'd actually work fine to add the primary
2708  * ModifyTable node too, but there's no need.) Note the use of lcons not
2709  * lappend: we need later-initialized ModifyTable nodes to be shut down
2710  * before earlier ones. This ensures that we don't throw away RETURNING
2711  * rows that need to be seen by a later CTE subplan.
2712  */
2713  if (!mtstate->canSetTag)
2714  estate->es_auxmodifytables = lcons(mtstate,
2715  estate->es_auxmodifytables);
2716 
2717  return mtstate;
2718 }
2719 
2720 /* ----------------------------------------------------------------
2721  * ExecEndModifyTable
2722  *
2723  * Shuts down the plan.
2724  *
2725  * Returns nothing of interest.
2726  * ----------------------------------------------------------------
2727  */
2728 void
2730 {
2731  int i;
2732 
2733  /*
2734  * Allow any FDWs to shut down
2735  */
2736  for (i = 0; i < node->mt_nplans; i++)
2737  {
2738  ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
2739 
2740  if (!resultRelInfo->ri_usesFdwDirectModify &&
2741  resultRelInfo->ri_FdwRoutine != NULL &&
2742  resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL)
2743  resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state,
2744  resultRelInfo);
2745  }
2746 
2747  /*
2748  * Close all the partitioned tables, leaf partitions, and their indices
2749  * and release the slot used for tuple routing, if set.
2750  */
2751  if (node->mt_partition_tuple_routing)
2752  {
2754 
2755  if (node->mt_root_tuple_slot)
2757  }
2758 
2759  /*
2760  * Free the exprcontext
2761  */
2762  ExecFreeExprContext(&node->ps);
2763 
2764  /*
2765  * clean out the tuple table
2766  */
2767  if (node->ps.ps_ResultTupleSlot)
2769 
2770  /*
2771  * Terminate EPQ execution if active
2772  */
2773  EvalPlanQualEnd(&node->mt_epqstate);
2774 
2775  /*
2776  * shut down subplans
2777  */
2778  for (i = 0; i < node->mt_nplans; i++)
2779  ExecEndNode(node->mt_plans[i]);
2780 }
2781 
2782 void
2784 {
2785  /*
2786  * Currently, we don't need to support rescan on ModifyTable nodes. The
2787  * semantics of that would be a bit debatable anyway.
2788  */
2789  elog(ERROR, "ExecReScanModifyTable is not implemented");
2790 }
TupleTableSlot * table_slot_create(Relation relation, List **reglist)
Definition: tableam.c:77
AttrNumber * attrMap
Definition: tupconvert.h:26
AttrNumber jf_junkAttNo
Definition: execnodes.h:370
ExecForeignDelete_function ExecForeignDelete
Definition: fdwapi.h:213
int ri_NumIndices
Definition: execnodes.h:413
static TupleTableSlot * ExecCopySlot(TupleTableSlot *dstslot, TupleTableSlot *srcslot)
Definition: tuptable.h:475
#define NIL
Definition: pg_list.h:65
Oid tts_tableOid
Definition: tuptable.h:131
ItemPointerData ctid
Definition: tableam.h:123
JunkFilter * ri_junkFilter
Definition: execnodes.h:460
List * arbiterIndexes
Definition: plannodes.h:237
struct TransitionCaptureState * mt_oc_transition_capture
Definition: execnodes.h:1188
static TupleTableSlot * ExecDelete(ModifyTableState *mtstate, ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *planSlot, EPQState *epqstate, EState *estate, bool processReturning, bool canSetTag, bool changingPart, bool *tupleDeleted, TupleTableSlot **epqreturnslot)
Relation ri_RelationDesc
Definition: execnodes.h:410
LockTupleMode
Definition: lockoptions.h:49
bool ExecIRDeleteTriggers(EState *estate, ResultRelInfo *relinfo, HeapTuple trigtuple)
Definition: trigger.c:2877
#define IsA(nodeptr, _type_)
Definition: nodes.h:576
Bitmapset * fdwDirectModifyPlans
Definition: plannodes.h:233
void SpeculativeInsertionLockRelease(TransactionId xid)
Definition: lmgr.c:765
static bool ExecOnConflictUpdate(ModifyTableState *mtstate, ResultRelInfo *resultRelInfo, ItemPointer conflictTid, TupleTableSlot *planSlot, TupleTableSlot *excludedSlot, EState *estate, bool canSetTag, TupleTableSlot **returning)
int errhint(const char *fmt,...)
Definition: elog.c:1069
void ExecARInsertTriggers(EState *estate, ResultRelInfo *relinfo, TupleTableSlot *slot, List *recheckIndexes, TransitionCaptureState *transition_capture)
Definition: trigger.c:2599
void ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate)
Definition: execMain.c:2026
static TupleTableSlot * ExecInsert(ModifyTableState *mtstate, TupleTableSlot *slot, TupleTableSlot *planSlot, EState *estate, bool canSetTag)
TupleTableSlot * ExecInitExtraTupleSlot(EState *estate, TupleDesc tupledesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1801
void ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
Definition: trigger.c:2681
TupleTableSlot * ExecGetReturningSlot(EState *estate, ResultRelInfo *relInfo)
Definition: execUtils.c:1148
static ResultRelInfo * getTargetResultRelInfo(ModifyTableState *node)
CommandId es_output_cid
Definition: execnodes.h:516
static void test(void)
void ExecReScanModifyTable(ModifyTableState *node)
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:425
uint32 TransactionId
Definition: c.h:514
#define ResetPerTupleExprContext(estate)
Definition: executor.h:510
struct PartitionTupleRouting * mt_partition_tuple_routing
Definition: execnodes.h:1182
TupleTableSlot * ExecStoreAllNullTuple(TupleTableSlot *slot)
Definition: execTuples.c:1546
#define RelationGetDescr(relation)
Definition: rel.h:448
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:853
List * withCheckOptionLists
Definition: plannodes.h:230
#define castNode(_type_, nodeptr)
Definition: nodes.h:594
void ExecEndNode(PlanState *node)
Definition: execProcnode.c:537
int resultRelIndex
Definition: plannodes.h:227
#define TTS_EMPTY(slot)
Definition: tuptable.h:97
ResultRelInfo * resultRelInfo
Definition: execnodes.h:1168
static TupleTableSlot * ExecUpdate(ModifyTableState *mtstate, ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot, TupleTableSlot *planSlot, EPQState *epqstate, EState *estate, bool canSetTag)
AttrNumber ExecFindJunkAttribute(JunkFilter *junkfilter, const char *attrName)
Definition: execJunk.c:208
#define TupleDescAttr(tupdesc, i)
Definition: tupdesc.h:92
ExecForeignInsert_function ExecForeignInsert
Definition: fdwapi.h:211
struct PartitionRoutingInfo * ri_PartitionInfo
Definition: execnodes.h:484
Relation ri_PartitionRoot
Definition: execnodes.h:481
ExprContext * ps_ExprContext
Definition: execnodes.h:978
TupleTableSlot ** mt_scans
Definition: execnodes.h:1166
const TupleTableSlotOps * table_slot_callbacks(Relation relation)
Definition: tableam.c:44
static void fireBSTriggers(ModifyTableState *node)
const TupleTableSlotOps TTSOpsVirtual
Definition: execTuples.c:83
CommandId cmax
Definition: tableam.h:125
void ExecConstraints(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate)
Definition: execMain.c:1896
static TupleConversionMap * tupconv_map_for_subplan(ModifyTableState *node, int whichplan)
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
#define IsolationUsesXactSnapshot()
Definition: xact.h:51
bool partColsUpdated
Definition: plannodes.h:225
Definition: nodes.h:525
Snapshot es_crosscheck_snapshot
Definition: execnodes.h:503
const TupleTableSlotOps *const tts_ops
Definition: tuptable.h:122
int errcode(int sqlerrcode)
Definition: elog.c:608
uint32 SpeculativeInsertionLockAcquire(TransactionId xid)
Definition: lmgr.c:739
TupleTableSlot * execute_attr_map_slot(AttrNumber *attrMap, TupleTableSlot *in_slot, TupleTableSlot *out_slot)
Definition: tupconvert.c:425
bool canSetTag
Definition: plannodes.h:222
char * format_type_be(Oid type_oid)
Definition: format_type.c:326
CmdType operation
Definition: execnodes.h:1160
Snapshot es_snapshot
Definition: execnodes.h:502
ResultRelInfo * rootResultRelInfo
Definition: execnodes.h:1169
Datum * tts_values
Definition: tuptable.h:126
void EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks)
Definition: execMain.c:2522
EState * state
Definition: execnodes.h:941
Form_pg_class rd_rel
Definition: rel.h:83
static void ExecCheckTupleVisible(EState *estate, Relation rel, TupleTableSlot *slot)
static bool ExecQual(ExprState *state, ExprContext *econtext)
Definition: executor.h:365
ExprState * ExecPrepareExpr(Expr *node, EState *estate)
Definition: execExpr.c:490
static void table_tuple_complete_speculative(Relation rel, TupleTableSlot *slot, uint32 specToken, bool succeeded)
Definition: tableam.h:1156
#define DatumGetHeapTupleHeader(X)
Definition: fmgr.h:289
bool ExecCheckIndexConstraints(TupleTableSlot *slot, EState *estate, ItemPointer conflictTid, List *arbiterIndexes)
Definition: execIndexing.c:482
void ExecFreeExprContext(PlanState *planstate)
Definition: execUtils.c:614
List * plans
Definition: plannodes.h:229
void ExecForceStoreHeapTuple(HeapTuple tuple, TupleTableSlot *slot, bool shouldFree)
Definition: execTuples.c:1439
static void table_tuple_insert(Relation rel, TupleTableSlot *slot, CommandId cid, int options, struct BulkInsertStateData *bistate)
Definition: tableam.h:1123
struct EPQState * es_epq_active
Definition: execnodes.h:578
ExprState * ExecInitQual(List *qual, PlanState *parent)
Definition: execExpr.c:207
Index ri_RangeTableIndex
Definition: execnodes.h:407
void ExecASDeleteTriggers(EState *estate, ResultRelInfo *relinfo, TransitionCaptureState *transition_capture)
Definition: trigger.c:2738
char relkind
Definition: pg_class.h:81
List * onConflictSet
Definition: plannodes.h:238
int rootResultRelIndex
Definition: plannodes.h:228
void ExecOpenIndices(ResultRelInfo *resultRelInfo, bool speculative)
Definition: execIndexing.c:151
void EvalPlanQualEnd(EPQState *epqstate)
Definition: execMain.c:2944
void ExecComputeStoredGenerated(EState *estate, TupleTableSlot *slot)
ItemPointerData * ItemPointer
Definition: itemptr.h:49
HeapTupleHeader t_data
Definition: htup.h:68
List * ri_WithCheckOptionExprs
Definition: execnodes.h:451
bool ExecBRDeleteTriggers(EState *estate, EPQState *epqstate, ResultRelInfo *relinfo, ItemPointer tupleid, HeapTuple fdw_trigtuple, TupleTableSlot **epqslot)
Definition: trigger.c:2756
void ExecARDeleteTriggers(EState *estate, ResultRelInfo *relinfo, ItemPointer tupleid, HeapTuple fdw_trigtuple, TransitionCaptureState *transition_capture)
Definition: trigger.c:2847
TupleTableSlot * oc_Existing
Definition: execnodes.h:382
bool trig_insert_instead_row
Definition: reltrigger.h:57
#define GetPerTupleExprContext(estate)
Definition: executor.h:501
bool has_generated_stored
Definition: tupdesc.h:45
TupleTableSlot * ps_ResultTupleSlot
Definition: execnodes.h:977
static void ExecSetupChildParentMapForSubplan(ModifyTableState *mtstate)
List * rowMarks
Definition: plannodes.h:234
TupleConversionMap * pi_RootToPartitionMap
Definition: execPartition.h:37
MemoryContext es_query_cxt
Definition: execnodes.h:549
bool resjunk
Definition: primnodes.h:1400
#define linitial(l)
Definition: pg_list.h:195
#define ERROR
Definition: elog.h:43
PlanState ps
Definition: execnodes.h:1159
ProjectionInfo * oc_ProjInfo
Definition: execnodes.h:384
void ExecCleanupTupleRouting(ModifyTableState *mtstate, PartitionTupleRouting *proute)
static void * list_nth(const List *list, int n)
Definition: pg_list.h:277
bool ExecBRInsertTriggers(EState *estate, ResultRelInfo *relinfo, TupleTableSlot *slot)
Definition: trigger.c:2532
static void ExecCheckTIDVisible(EState *estate, ResultRelInfo *relinfo, ItemPointer tid, TupleTableSlot *tempSlot)
TupleConversionMap * tcs_map
Definition: trigger.h:73
static TM_Result table_tuple_lock(Relation rel, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, uint8 flags, TM_FailureData *tmfd)
Definition: tableam.h:1301
ItemPointerData t_self
Definition: htup.h:65
List * ExecInsertIndexTuples(TupleTableSlot *slot, EState *estate, bool noDupErr, bool *specConflict, List *arbiterIndexes)
Definition: execIndexing.c:273
TupleTableSlot * EvalPlanQualSlot(EPQState *epqstate, Relation relation, Index rti)
Definition: execMain.c:2539
static void slot_getallattrs(TupleTableSlot *slot)
Definition: tuptable.h:354
bool ri_usesFdwDirectModify
Definition: execnodes.h:445
static void ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
void CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation)
Definition: execMain.c:1076
#define EXEC_FLAG_BACKWARD
Definition: executor.h:58
#define lfirst_node(type, lc)
Definition: pg_list.h:193
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:422
uint32 t_len
Definition: htup.h:64
TupleConversionMap * convert_tuples_by_name(TupleDesc indesc, TupleDesc outdesc)
Definition: tupconvert.c:205
void ExecBSInsertTriggers(EState *estate, ResultRelInfo *relinfo)
Definition: trigger.c:2464
struct TransitionCaptureState * mt_transition_capture
Definition: execnodes.h:1185
bool * tts_isnull
Definition: tuptable.h:128
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
Definition: execTuples.c:1224
static Datum ExecEvalExpr(ExprState *state, ExprContext *econtext, bool *isNull)
Definition: executor.h:285
TupleConstr * constr
Definition: tupdesc.h:85
ResultRelInfo * es_result_relations
Definition: execnodes.h:519
void ExecInitResultTypeTL(PlanState *planstate)
Definition: execTuples.c:1725
List * fdwPrivLists
Definition: plannodes.h:232
int errdetail(const char *fmt,...)
Definition: elog.c:955
EPQState mt_epqstate
Definition: execnodes.h:1172
void ExecPartitionCheckEmitError(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate)
Definition: execMain.c:1828
TupleTableSlot * ecxt_innertuple
Definition: execnodes.h:225
ExprState ** ri_GeneratedExprs
Definition: execnodes.h:457
bool trig_update_before_row
Definition: reltrigger.h:60
#define RelationGetRelationName(relation)
Definition: rel.h:456
TupleTableSlot * EvalPlanQual(EPQState *epqstate, Relation relation, Index rti, TupleTableSlot *inputslot)
Definition: execMain.c:2430
ProjectionInfo * ri_projectReturning
Definition: execnodes.h:466
#define TupIsNull(slot)
Definition: tuptable.h:292
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:200
struct FdwRoutine * ri_FdwRoutine
Definition: execnodes.h:439
unsigned int uint32
Definition: c.h:359
ModifyTableState * ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
Oid t_tableOid
Definition: htup.h:66
#define InstrCountTuples2(node, delta)
Definition: execnodes.h:1036
ExprState * oc_WhereClause
Definition: execnodes.h:385
void setLastTid(const ItemPointer tid)
Definition: tid.c:280
EndForeignModify_function EndForeignModify
Definition: fdwapi.h:214
#define InstrCountFiltered1(node, delta)
Definition: execnodes.h:1041
PartitionTupleRouting * ExecSetupPartitionTupleRouting(EState *estate, ModifyTableState *mtstate, Relation rel)
WCOKind
Definition: parsenodes.h:1155
#define ereport(elevel, rest)
Definition: elog.h:141
Datum datumCopy(Datum value, bool typByVal, int typLen)
Definition: datum.c:130
TriggerDesc * ri_TrigDesc
Definition: execnodes.h:422
List * lappend(List *list, void *datum)
Definition: list.c:322
static bool table_tuple_fetch_row_version(Relation rel, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot)
Definition: tableam.h:1021
bool trig_update_instead_row
Definition: reltrigger.h:62
static TupleTableSlot * ExecProcessReturning(ResultRelInfo *resultRelInfo, TupleTableSlot *tupleSlot, TupleTableSlot *planSlot)
PlanState ** mt_plans
Definition: execnodes.h:1163
void ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo, ItemPointer tupleid, HeapTuple fdw_trigtuple, TupleTableSlot *newslot, List *recheckIndexes, TransitionCaptureState *transition_capture)
Definition: trigger.c:3134
void EvalPlanQualInit(EPQState *epqstate, EState *parentestate, Plan *subplan, List *auxrowmarks, int epqParam)
Definition: execMain.c:2484
static TupleTableSlot * ExecPrepareTupleRouting(ModifyTableState *mtstate, EState *estate, PartitionTupleRouting *proute, ResultRelInfo *targetRelInfo, TupleTableSlot *slot)
OnConflictSetState * ri_onConflict
Definition: execnodes.h:472
#define AttributeNumberIsValid(attributeNumber)
Definition: attnum.h:34
static void fireASTriggers(ModifyTableState *node)
static TupleTableSlot * ExecModifyTable(PlanState *pstate)
Node * build_column_default(Relation rel, int attrno)
TM_Result
Definition: tableam.h:68
TupleConversionMap ** mt_per_subplan_tupconv_maps
Definition: execnodes.h:1191
bool trig_insert_before_row
Definition: reltrigger.h:55
bool trig_delete_instead_row
Definition: reltrigger.h:67
void ExecASUpdateTriggers(EState *estate, ResultRelInfo *relinfo, TransitionCaptureState *transition_capture)
Definition: trigger.c:2990
List * es_tupleTable
Definition: execnodes.h:551
void * palloc0(Size size)
Definition: mcxt.c:980
List * es_auxmodifytables
Definition: execnodes.h:563
ExecProcNodeMtd ExecProcNode
Definition: execnodes.h:945
void ExecASInsertTriggers(EState *estate, ResultRelInfo *relinfo, TransitionCaptureState *transition_capture)
Definition: trigger.c:2521
uintptr_t Datum
Definition: postgres.h:367
TupleTableSlot * ExecFilterJunk(JunkFilter *junkfilter, TupleTableSlot *slot)
Definition: execJunk.c:261
static void table_tuple_insert_speculative(Relation rel, TupleTableSlot *slot, CommandId cid, int options, struct BulkInsertStateData *bistate, uint32 specToken)
Definition: tableam.h:1142
static TupleTableSlot * ExecProcNode(PlanState *node)
Definition: executor.h:235
List * ri_WithCheckOptions
Definition: execnodes.h:448
TransitionCaptureState * MakeTransitionCaptureState(TriggerDesc *trigdesc, Oid relid, CmdType cmdType)
Definition: trigger.c:4670
#define ItemPointerIndicatesMovedPartitions(pointer)
Definition: itemptr.h:184
TupleConversionMap * pi_PartitionToRootMap
Definition: execPartition.h:43
List * ri_PartitionCheck
Definition: execnodes.h:475
static TM_Result table_tuple_delete(Relation rel, ItemPointer tid, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool changingPart)
Definition: tableam.h:1212
TupleDesc rd_att
Definition: rel.h:84
static void ExecMaterializeSlot(TupleTableSlot *slot)
Definition: tuptable.h:443
Plan * plan
Definition: execnodes.h:939
#define InvalidOid
Definition: postgres_ext.h:36
TupleTableSlot * oc_ProjSlot
Definition: execnodes.h:383
ExecForeignUpdate_function ExecForeignUpdate
Definition: fdwapi.h:212
List * lcons(void *datum, List *list)
Definition: list.c:454
#define makeNode(_type_)
Definition: nodes.h:573
TupleTableSlot * ecxt_outertuple
Definition: execnodes.h:227
static void ExecCheckPlanOutput(Relation resultRel, List *targetList)
#define Assert(condition)
Definition: c.h:739
static bool table_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot, Snapshot snapshot)
Definition: tableam.h:1059
#define lfirst(lc)
Definition: pg_list.h:190
LockTupleMode ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo)
Definition: execMain.c:2311
#define EXEC_FLAG_MARK
Definition: executor.h:59
bool ExecIRInsertTriggers(EState *estate, ResultRelInfo *relinfo, TupleTableSlot *slot)
Definition: trigger.c:2614
Definition: tableam.h:74
OnConflictAction onConflictAction
Definition: plannodes.h:236
Expr * expr
Definition: primnodes.h:1393
uint64 es_processed
Definition: execnodes.h:553
void ExecAssignExprContext(EState *estate, PlanState *planstate)
Definition: execUtils.c:444
Oid exprType(const Node *expr)
Definition: nodeFuncs.c:41
static int list_length(const List *l)
Definition: pg_list.h:169
TupleDesc ExecTypeFromTL(List *targetList)
Definition: execTuples.c:1908
TupleTableSlot * ecxt_scantuple
Definition: execnodes.h:223
void ExecInitResultTupleSlotTL(PlanState *planstate, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1769
#define TUPLE_LOCK_FLAG_FIND_LAST_VERSION
Definition: tableam.h:139
TupleTableSlot * mt_root_tuple_slot
Definition: execnodes.h:1179
TupleDesc ExecGetResultType(PlanState *planstate)
Definition: execUtils.c:454
#define SnapshotAny
Definition: snapmgr.h:69
List * targetlist
Definition: plannodes.h:142
#define GetPerTupleMemoryContext(estate)
Definition: executor.h:506
#define DatumGetPointer(X)
Definition: postgres.h:549
static Datum values[MAXATTR]
Definition: bootstrap.c:167
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:374
#define DatumGetTransactionId(X)
Definition: postgres.h:514
void ExecBSUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
Definition: trigger.c:2930
#define ItemPointerSetInvalid(pointer)
Definition: itemptr.h:172
void * palloc(Size size)
Definition: mcxt.c:949
ProjectionInfo * ExecBuildProjectionInfo(List *targetList, ExprContext *econtext, TupleTableSlot *slot, PlanState *parent, TupleDesc inputDesc)
Definition: execExpr.c:351
int errmsg(const char *fmt,...)
Definition: elog.c:822
CmdType operation
Definition: plannodes.h:221
bool ExecIRUpdateTriggers(EState *estate, ResultRelInfo *relinfo, HeapTuple trigtuple, TupleTableSlot *newslot)
Definition: trigger.c:3176
Datum ExecGetJunkAttribute(TupleTableSlot *slot, AttrNumber attno, bool *isNull)
Definition: execJunk.c:247
ResultRelInfo * es_root_result_relations
Definition: execnodes.h:529
void list_free(List *list)
Definition: list.c:1377
#define elog(elevel,...)
Definition: elog.h:228
int i
bool ExecBRUpdateTriggers(EState *estate, EPQState *epqstate, ResultRelInfo *relinfo, ItemPointer tupleid, HeapTuple fdw_trigtuple, TupleTableSlot *newslot)
Definition: trigger.c:3003
bool ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate, bool emitError)
Definition: execMain.c:1783
List * returningLists
Definition: plannodes.h:231
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
bool isParent
Definition: plannodes.h:1068
ResultRelInfo * ExecFindPartition(ModifyTableState *mtstate, ResultRelInfo *rootResultRelInfo, PartitionTupleRouting *proute, TupleTableSlot *slot, EState *estate)
static TM_Result table_tuple_update(Relation rel, ItemPointer otid, TupleTableSlot *slot, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode, bool *update_indexes)
Definition: tableam.h:1256
static Datum slot_getsysattr(TupleTableSlot *slot, int attnum, bool *isnull)
Definition: tuptable.h:402
bool traversed
Definition: tableam.h:126
ExprContext * pi_exprContext
Definition: execnodes.h:332
BeginForeignModify_function BeginForeignModify
Definition: fdwapi.h:210
#define MinTransactionIdAttributeNumber
Definition: sysattr.h:22
PlanState * ExecInitNode(Plan *node, EState *estate, int eflags)
Definition: execProcnode.c:138
Definition: pg_list.h:50
ItemPointerData tts_tid
Definition: tuptable.h:130
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:427
OnConflictAction
Definition: nodes.h:818
#define EXEC_FLAG_EXPLAIN_ONLY
Definition: executor.h:56
#define RelationGetRelid(relation)
Definition: rel.h:422
JunkFilter * ExecInitJunkFilter(List *targetList, TupleTableSlot *slot)
Definition: execJunk.c:60
long val
Definition: informix.c:664
void EvalPlanQualBegin(EPQState *epqstate)
Definition: execMain.c:2692
List * ri_onConflictArbiterIndexes
Definition: execnodes.h:469
CmdType
Definition: nodes.h:668
void ExecEndModifyTable(ModifyTableState *node)
RelationPtr ri_IndexRelationDescs
Definition: execnodes.h:416
ExecAuxRowMark * ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
Definition: execMain.c:2360
List * ri_returningList
Definition: execnodes.h:463
static TupleTableSlot * ExecProject(ProjectionInfo *projInfo)
Definition: executor.h:328
#define ResetExprContext(econtext)
Definition: executor.h:495
TupleTableSlot * ExecStoreVirtualTuple(TupleTableSlot *slot)
Definition: execTuples.c:1522
List ** mt_arowmarks
Definition: execnodes.h:1171
#define EvalPlanQualSetSlot(epqstate, slot)
Definition: executor.h:210
int epqParam
Definition: plannodes.h:235
bool trig_delete_before_row
Definition: reltrigger.h:65
Node * onConflictWhere
Definition: plannodes.h:239
ExecRowMark * ExecFindRowMark(EState *estate, Index rti, bool missing_ok)
Definition: execMain.c:2337
TupleTableSlot * pi_PartitionTupleSlot
Definition: execPartition.h:49
#define HeapTupleHeaderGetDatumLength(tup)
Definition: htup_details.h:452
TupleTableSlot * tcs_original_insert_tuple
Definition: trigger.h:82
ResultRelInfo * es_result_relation_info
Definition: execnodes.h:521