PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
nodeModifyTable.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * nodeModifyTable.c
4  * routines to handle ModifyTable nodes.
5  *
6  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/executor/nodeModifyTable.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 /* INTERFACE ROUTINES
16  * ExecInitModifyTable - initialize the ModifyTable node
17  * ExecModifyTable - retrieve the next tuple from the node
18  * ExecEndModifyTable - shut down the ModifyTable node
19  * ExecReScanModifyTable - rescan the ModifyTable node
20  *
21  * NOTES
22  * Each ModifyTable node contains a list of one or more subplans,
23  * much like an Append node. There is one subplan per result relation.
24  * The key reason for this is that in an inherited UPDATE command, each
25  * result relation could have a different schema (more or different
26  * columns) requiring a different plan tree to produce it. In an
27  * inherited DELETE, all the subplans should produce the same output
28  * rowtype, but we might still find that different plans are appropriate
29  * for different child relations.
30  *
31  * If the query specifies RETURNING, then the ModifyTable returns a
32  * RETURNING tuple after completing each row insert, update, or delete.
33  * It must be called again to continue the operation. Without RETURNING,
34  * we just loop within the node until all the work is done, then
35  * return NULL. This avoids useless call/return overhead.
36  */
37 
38 #include "postgres.h"
39 
40 #include "access/htup_details.h"
41 #include "access/xact.h"
42 #include "commands/trigger.h"
43 #include "executor/executor.h"
45 #include "foreign/fdwapi.h"
46 #include "miscadmin.h"
47 #include "nodes/nodeFuncs.h"
48 #include "parser/parsetree.h"
49 #include "storage/bufmgr.h"
50 #include "storage/lmgr.h"
51 #include "utils/builtins.h"
52 #include "utils/memutils.h"
53 #include "utils/rel.h"
54 #include "utils/tqual.h"
55 
56 
57 static bool ExecOnConflictUpdate(ModifyTableState *mtstate,
58  ResultRelInfo *resultRelInfo,
59  ItemPointer conflictTid,
60  TupleTableSlot *planSlot,
61  TupleTableSlot *excludedSlot,
62  EState *estate,
63  bool canSetTag,
64  TupleTableSlot **returning);
65 
66 /*
67  * Verify that the tuples to be produced by INSERT or UPDATE match the
68  * target relation's rowtype
69  *
70  * We do this to guard against stale plans. If plan invalidation is
71  * functioning properly then we should never get a failure here, but better
72  * safe than sorry. Note that this is called after we have obtained lock
73  * on the target rel, so the rowtype can't change underneath us.
74  *
75  * The plan output is represented by its targetlist, because that makes
76  * handling the dropped-column case easier.
77  */
78 static void
79 ExecCheckPlanOutput(Relation resultRel, List *targetList)
80 {
81  TupleDesc resultDesc = RelationGetDescr(resultRel);
82  int attno = 0;
83  ListCell *lc;
84 
85  foreach(lc, targetList)
86  {
87  TargetEntry *tle = (TargetEntry *) lfirst(lc);
88  Form_pg_attribute attr;
89 
90  if (tle->resjunk)
91  continue; /* ignore junk tlist items */
92 
93  if (attno >= resultDesc->natts)
94  ereport(ERROR,
95  (errcode(ERRCODE_DATATYPE_MISMATCH),
96  errmsg("table row type and query-specified row type do not match"),
97  errdetail("Query has too many columns.")));
98  attr = resultDesc->attrs[attno++];
99 
100  if (!attr->attisdropped)
101  {
102  /* Normal case: demand type match */
103  if (exprType((Node *) tle->expr) != attr->atttypid)
104  ereport(ERROR,
105  (errcode(ERRCODE_DATATYPE_MISMATCH),
106  errmsg("table row type and query-specified row type do not match"),
107  errdetail("Table has type %s at ordinal position %d, but query expects %s.",
108  format_type_be(attr->atttypid),
109  attno,
110  format_type_be(exprType((Node *) tle->expr)))));
111  }
112  else
113  {
114  /*
115  * For a dropped column, we can't check atttypid (it's likely 0).
116  * In any case the planner has most likely inserted an INT4 null.
117  * What we insist on is just *some* NULL constant.
118  */
119  if (!IsA(tle->expr, Const) ||
120  !((Const *) tle->expr)->constisnull)
121  ereport(ERROR,
122  (errcode(ERRCODE_DATATYPE_MISMATCH),
123  errmsg("table row type and query-specified row type do not match"),
124  errdetail("Query provides a value for a dropped column at ordinal position %d.",
125  attno)));
126  }
127  }
128  if (attno != resultDesc->natts)
129  ereport(ERROR,
130  (errcode(ERRCODE_DATATYPE_MISMATCH),
131  errmsg("table row type and query-specified row type do not match"),
132  errdetail("Query has too few columns.")));
133 }
134 
135 /*
136  * ExecProcessReturning --- evaluate a RETURNING list
137  *
138  * projectReturning: RETURNING projection info for current result rel
139  * tupleSlot: slot holding tuple actually inserted/updated/deleted
140  * planSlot: slot holding tuple returned by top subplan node
141  *
142  * Note: If tupleSlot is NULL, the FDW should have already provided econtext's
143  * scan tuple.
144  *
145  * Returns a slot holding the result tuple
146  */
147 static TupleTableSlot *
149  TupleTableSlot *tupleSlot,
150  TupleTableSlot *planSlot)
151 {
152  ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning;
153  ExprContext *econtext = projectReturning->pi_exprContext;
154 
155  /*
156  * Reset per-tuple memory context to free any expression evaluation
157  * storage allocated in the previous cycle.
158  */
159  ResetExprContext(econtext);
160 
161  /* Make tuple and any needed join variables available to ExecProject */
162  if (tupleSlot)
163  econtext->ecxt_scantuple = tupleSlot;
164  else
165  {
166  HeapTuple tuple;
167 
168  /*
169  * RETURNING expressions might reference the tableoid column, so
170  * initialize t_tableOid before evaluating them.
171  */
172  Assert(!TupIsNull(econtext->ecxt_scantuple));
173  tuple = ExecMaterializeSlot(econtext->ecxt_scantuple);
174  tuple->t_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
175  }
176  econtext->ecxt_outertuple = planSlot;
177 
178  /* Compute the RETURNING expressions */
179  return ExecProject(projectReturning);
180 }
181 
182 /*
183  * ExecCheckHeapTupleVisible -- verify heap tuple is visible
184  *
185  * It would not be consistent with guarantees of the higher isolation levels to
186  * proceed with avoiding insertion (taking speculative insertion's alternative
187  * path) on the basis of another tuple that is not visible to MVCC snapshot.
188  * Check for the need to raise a serialization failure, and do so as necessary.
189  */
190 static void
192  HeapTuple tuple,
193  Buffer buffer)
194 {
196  return;
197 
198  /*
199  * We need buffer pin and lock to call HeapTupleSatisfiesVisibility.
200  * Caller should be holding pin, but not lock.
201  */
202  LockBuffer(buffer, BUFFER_LOCK_SHARE);
203  if (!HeapTupleSatisfiesVisibility(tuple, estate->es_snapshot, buffer))
204  {
205  /*
206  * We should not raise a serialization failure if the conflict is
207  * against a tuple inserted by our own transaction, even if it's not
208  * visible to our snapshot. (This would happen, for example, if
209  * conflicting keys are proposed for insertion in a single command.)
210  */
212  ereport(ERROR,
213  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
214  errmsg("could not serialize access due to concurrent update")));
215  }
217 }
218 
219 /*
220  * ExecCheckTIDVisible -- convenience variant of ExecCheckHeapTupleVisible()
221  */
222 static void
224  ResultRelInfo *relinfo,
225  ItemPointer tid)
226 {
227  Relation rel = relinfo->ri_RelationDesc;
228  Buffer buffer;
229  HeapTupleData tuple;
230 
231  /* Redundantly check isolation level */
233  return;
234 
235  tuple.t_self = *tid;
236  if (!heap_fetch(rel, SnapshotAny, &tuple, &buffer, false, NULL))
237  elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
238  ExecCheckHeapTupleVisible(estate, &tuple, buffer);
239  ReleaseBuffer(buffer);
240 }
241 
242 /* ----------------------------------------------------------------
243  * ExecInsert
244  *
245  * For INSERT, we have to insert the tuple into the target relation
246  * and insert appropriate tuples into the index relations.
247  *
248  * Returns RETURNING result if any, otherwise NULL.
249  * ----------------------------------------------------------------
250  */
251 static TupleTableSlot *
253  TupleTableSlot *slot,
254  TupleTableSlot *planSlot,
255  List *arbiterIndexes,
256  OnConflictAction onconflict,
257  EState *estate,
258  bool canSetTag)
259 {
260  HeapTuple tuple;
261  ResultRelInfo *resultRelInfo;
262  ResultRelInfo *saved_resultRelInfo = NULL;
263  Relation resultRelationDesc;
264  Oid newId;
265  List *recheckIndexes = NIL;
266  TupleTableSlot *oldslot = slot,
267  *result = NULL;
268 
269  /*
270  * get the heap tuple out of the tuple table slot, making sure we have a
271  * writable copy
272  */
273  tuple = ExecMaterializeSlot(slot);
274 
275  /*
276  * get information on the (current) result relation
277  */
278  resultRelInfo = estate->es_result_relation_info;
279 
280  /* Determine the partition to heap_insert the tuple into */
281  if (mtstate->mt_partition_dispatch_info)
282  {
283  int leaf_part_index;
284  TupleConversionMap *map;
285 
286  /*
287  * Away we go ... If we end up not finding a partition after all,
288  * ExecFindPartition() does not return and errors out instead.
289  * Otherwise, the returned value is to be used as an index into arrays
290  * mt_partitions[] and mt_partition_tupconv_maps[] that will get us
291  * the ResultRelInfo and TupleConversionMap for the partition,
292  * respectively.
293  */
294  leaf_part_index = ExecFindPartition(resultRelInfo,
296  slot,
297  estate);
298  Assert(leaf_part_index >= 0 &&
299  leaf_part_index < mtstate->mt_num_partitions);
300 
301  /*
302  * Save the old ResultRelInfo and switch to the one corresponding to
303  * the selected partition.
304  */
305  saved_resultRelInfo = resultRelInfo;
306  resultRelInfo = mtstate->mt_partitions + leaf_part_index;
307 
308  /* We do not yet have a way to insert into a foreign partition */
309  if (resultRelInfo->ri_FdwRoutine)
310  ereport(ERROR,
311  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
312  errmsg("cannot route inserted tuples to a foreign table")));
313 
314  /* For ExecInsertIndexTuples() to work on the partition's indexes */
315  estate->es_result_relation_info = resultRelInfo;
316 
317  /*
318  * We might need to convert from the parent rowtype to the partition
319  * rowtype.
320  */
321  map = mtstate->mt_partition_tupconv_maps[leaf_part_index];
322  if (map)
323  {
324  Relation partrel = resultRelInfo->ri_RelationDesc;
325 
326  tuple = do_convert_tuple(tuple, map);
327 
328  /*
329  * We must use the partition's tuple descriptor from this point
330  * on, until we're finished dealing with the partition. Use the
331  * dedicated slot for that.
332  */
333  slot = mtstate->mt_partition_tuple_slot;
334  Assert(slot != NULL);
335  ExecSetSlotDescriptor(slot, RelationGetDescr(partrel));
336  ExecStoreTuple(tuple, slot, InvalidBuffer, true);
337  }
338  }
339 
340  resultRelationDesc = resultRelInfo->ri_RelationDesc;
341 
342  /*
343  * If the result relation has OIDs, force the tuple's OID to zero so that
344  * heap_insert will assign a fresh OID. Usually the OID already will be
345  * zero at this point, but there are corner cases where the plan tree can
346  * return a tuple extracted literally from some table with the same
347  * rowtype.
348  *
349  * XXX if we ever wanted to allow users to assign their own OIDs to new
350  * rows, this'd be the place to do it. For the moment, we make a point of
351  * doing this before calling triggers, so that a user-supplied trigger
352  * could hack the OID if desired.
353  */
354  if (resultRelationDesc->rd_rel->relhasoids)
355  HeapTupleSetOid(tuple, InvalidOid);
356 
357  /*
358  * BEFORE ROW INSERT Triggers.
359  *
360  * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an
361  * INSERT ... ON CONFLICT statement. We cannot check for constraint
362  * violations before firing these triggers, because they can change the
363  * values to insert. Also, they can run arbitrary user-defined code with
364  * side-effects that we can't cancel by just not inserting the tuple.
365  */
366  if (resultRelInfo->ri_TrigDesc &&
367  resultRelInfo->ri_TrigDesc->trig_insert_before_row)
368  {
369  slot = ExecBRInsertTriggers(estate, resultRelInfo, slot);
370 
371  if (slot == NULL) /* "do nothing" */
372  return NULL;
373 
374  /* trigger might have changed tuple */
375  tuple = ExecMaterializeSlot(slot);
376  }
377 
378  /* INSTEAD OF ROW INSERT Triggers */
379  if (resultRelInfo->ri_TrigDesc &&
380  resultRelInfo->ri_TrigDesc->trig_insert_instead_row)
381  {
382  slot = ExecIRInsertTriggers(estate, resultRelInfo, slot);
383 
384  if (slot == NULL) /* "do nothing" */
385  return NULL;
386 
387  /* trigger might have changed tuple */
388  tuple = ExecMaterializeSlot(slot);
389 
390  newId = InvalidOid;
391  }
392  else if (resultRelInfo->ri_FdwRoutine)
393  {
394  /*
395  * insert into foreign table: let the FDW do it
396  */
397  slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate,
398  resultRelInfo,
399  slot,
400  planSlot);
401 
402  if (slot == NULL) /* "do nothing" */
403  return NULL;
404 
405  /* FDW might have changed tuple */
406  tuple = ExecMaterializeSlot(slot);
407 
408  /*
409  * AFTER ROW Triggers or RETURNING expressions might reference the
410  * tableoid column, so initialize t_tableOid before evaluating them.
411  */
412  tuple->t_tableOid = RelationGetRelid(resultRelationDesc);
413 
414  newId = InvalidOid;
415  }
416  else
417  {
418  /*
419  * Constraints might reference the tableoid column, so initialize
420  * t_tableOid before evaluating them.
421  */
422  tuple->t_tableOid = RelationGetRelid(resultRelationDesc);
423 
424  /*
425  * Check any RLS INSERT WITH CHECK policies
426  *
427  * ExecWithCheckOptions() will skip any WCOs which are not of the kind
428  * we are looking for at this point.
429  */
430  if (resultRelInfo->ri_WithCheckOptions != NIL)
432  resultRelInfo, slot, estate);
433 
434  /*
435  * Check the constraints of the tuple
436  */
437  if (resultRelationDesc->rd_att->constr || resultRelInfo->ri_PartitionCheck)
438  ExecConstraints(resultRelInfo, slot, oldslot, estate);
439 
440  if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
441  {
442  /* Perform a speculative insertion. */
443  uint32 specToken;
444  ItemPointerData conflictTid;
445  bool specConflict;
446 
447  /*
448  * Do a non-conclusive check for conflicts first.
449  *
450  * We're not holding any locks yet, so this doesn't guarantee that
451  * the later insert won't conflict. But it avoids leaving behind
452  * a lot of canceled speculative insertions, if you run a lot of
453  * INSERT ON CONFLICT statements that do conflict.
454  *
455  * We loop back here if we find a conflict below, either during
456  * the pre-check, or when we re-check after inserting the tuple
457  * speculatively.
458  */
459  vlock:
460  specConflict = false;
461  if (!ExecCheckIndexConstraints(slot, estate, &conflictTid,
462  arbiterIndexes))
463  {
464  /* committed conflict tuple found */
465  if (onconflict == ONCONFLICT_UPDATE)
466  {
467  /*
468  * In case of ON CONFLICT DO UPDATE, execute the UPDATE
469  * part. Be prepared to retry if the UPDATE fails because
470  * of another concurrent UPDATE/DELETE to the conflict
471  * tuple.
472  */
473  TupleTableSlot *returning = NULL;
474 
475  if (ExecOnConflictUpdate(mtstate, resultRelInfo,
476  &conflictTid, planSlot, slot,
477  estate, canSetTag, &returning))
478  {
479  InstrCountFiltered2(&mtstate->ps, 1);
480  return returning;
481  }
482  else
483  goto vlock;
484  }
485  else
486  {
487  /*
488  * In case of ON CONFLICT DO NOTHING, do nothing. However,
489  * verify that the tuple is visible to the executor's MVCC
490  * snapshot at higher isolation levels.
491  */
492  Assert(onconflict == ONCONFLICT_NOTHING);
493  ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid);
494  InstrCountFiltered2(&mtstate->ps, 1);
495  return NULL;
496  }
497  }
498 
499  /*
500  * Before we start insertion proper, acquire our "speculative
501  * insertion lock". Others can use that to wait for us to decide
502  * if we're going to go ahead with the insertion, instead of
503  * waiting for the whole transaction to complete.
504  */
506  HeapTupleHeaderSetSpeculativeToken(tuple->t_data, specToken);
507 
508  /* insert the tuple, with the speculative token */
509  newId = heap_insert(resultRelationDesc, tuple,
510  estate->es_output_cid,
512  NULL);
513 
514  /* insert index entries for tuple */
515  recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
516  estate, true, &specConflict,
517  arbiterIndexes);
518 
519  /* adjust the tuple's state accordingly */
520  if (!specConflict)
521  heap_finish_speculative(resultRelationDesc, tuple);
522  else
523  heap_abort_speculative(resultRelationDesc, tuple);
524 
525  /*
526  * Wake up anyone waiting for our decision. They will re-check
527  * the tuple, see that it's no longer speculative, and wait on our
528  * XID as if this was a regularly inserted tuple all along. Or if
529  * we killed the tuple, they will see it's dead, and proceed as if
530  * the tuple never existed.
531  */
533 
534  /*
535  * If there was a conflict, start from the beginning. We'll do
536  * the pre-check again, which will now find the conflicting tuple
537  * (unless it aborts before we get there).
538  */
539  if (specConflict)
540  {
541  list_free(recheckIndexes);
542  goto vlock;
543  }
544 
545  /* Since there was no insertion conflict, we're done */
546  }
547  else
548  {
549  /*
550  * insert the tuple normally.
551  *
552  * Note: heap_insert returns the tid (location) of the new tuple
553  * in the t_self field.
554  */
555  newId = heap_insert(resultRelationDesc, tuple,
556  estate->es_output_cid,
557  0, NULL);
558 
559  /* insert index entries for tuple */
560  if (resultRelInfo->ri_NumIndices > 0)
561  recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
562  estate, false, NULL,
563  arbiterIndexes);
564  }
565  }
566 
567  if (canSetTag)
568  {
569  (estate->es_processed)++;
570  estate->es_lastoid = newId;
571  setLastTid(&(tuple->t_self));
572  }
573 
574  /* AFTER ROW INSERT Triggers */
575  ExecARInsertTriggers(estate, resultRelInfo, tuple, recheckIndexes);
576 
577  list_free(recheckIndexes);
578 
579  /*
580  * Check any WITH CHECK OPTION constraints from parent views. We are
581  * required to do this after testing all constraints and uniqueness
582  * violations per the SQL spec, so we do it after actually inserting the
583  * record into the heap and all indexes.
584  *
585  * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
586  * tuple will never be seen, if it violates the WITH CHECK OPTION.
587  *
588  * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
589  * are looking for at this point.
590  */
591  if (resultRelInfo->ri_WithCheckOptions != NIL)
592  ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
593 
594  /* Process RETURNING if present */
595  if (resultRelInfo->ri_projectReturning)
596  result = ExecProcessReturning(resultRelInfo, slot, planSlot);
597 
598  if (saved_resultRelInfo)
599  estate->es_result_relation_info = saved_resultRelInfo;
600 
601  return result;
602 }
603 
604 /* ----------------------------------------------------------------
605  * ExecDelete
606  *
607  * DELETE is like UPDATE, except that we delete the tuple and no
608  * index modifications are needed.
609  *
610  * When deleting from a table, tupleid identifies the tuple to
611  * delete and oldtuple is NULL. When deleting from a view,
612  * oldtuple is passed to the INSTEAD OF triggers and identifies
613  * what to delete, and tupleid is invalid. When deleting from a
614  * foreign table, tupleid is invalid; the FDW has to figure out
615  * which row to delete using data from the planSlot. oldtuple is
616  * passed to foreign table triggers; it is NULL when the foreign
617  * table has no relevant triggers.
618  *
619  * Returns RETURNING result if any, otherwise NULL.
620  * ----------------------------------------------------------------
621  */
622 static TupleTableSlot *
624  HeapTuple oldtuple,
625  TupleTableSlot *planSlot,
626  EPQState *epqstate,
627  EState *estate,
628  bool canSetTag)
629 {
630  ResultRelInfo *resultRelInfo;
631  Relation resultRelationDesc;
632  HTSU_Result result;
634  TupleTableSlot *slot = NULL;
635 
636  /*
637  * get information on the (current) result relation
638  */
639  resultRelInfo = estate->es_result_relation_info;
640  resultRelationDesc = resultRelInfo->ri_RelationDesc;
641 
642  /* BEFORE ROW DELETE Triggers */
643  if (resultRelInfo->ri_TrigDesc &&
644  resultRelInfo->ri_TrigDesc->trig_delete_before_row)
645  {
646  bool dodelete;
647 
648  dodelete = ExecBRDeleteTriggers(estate, epqstate, resultRelInfo,
649  tupleid, oldtuple);
650 
651  if (!dodelete) /* "do nothing" */
652  return NULL;
653  }
654 
655  /* INSTEAD OF ROW DELETE Triggers */
656  if (resultRelInfo->ri_TrigDesc &&
657  resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
658  {
659  bool dodelete;
660 
661  Assert(oldtuple != NULL);
662  dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple);
663 
664  if (!dodelete) /* "do nothing" */
665  return NULL;
666  }
667  else if (resultRelInfo->ri_FdwRoutine)
668  {
669  HeapTuple tuple;
670 
671  /*
672  * delete from foreign table: let the FDW do it
673  *
674  * We offer the trigger tuple slot as a place to store RETURNING data,
675  * although the FDW can return some other slot if it wants. Set up
676  * the slot's tupdesc so the FDW doesn't need to do that for itself.
677  */
678  slot = estate->es_trig_tuple_slot;
679  if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc))
680  ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc));
681 
682  slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate,
683  resultRelInfo,
684  slot,
685  planSlot);
686 
687  if (slot == NULL) /* "do nothing" */
688  return NULL;
689 
690  /*
691  * RETURNING expressions might reference the tableoid column, so
692  * initialize t_tableOid before evaluating them.
693  */
694  if (slot->tts_isempty)
695  ExecStoreAllNullTuple(slot);
696  tuple = ExecMaterializeSlot(slot);
697  tuple->t_tableOid = RelationGetRelid(resultRelationDesc);
698  }
699  else
700  {
701  /*
702  * delete the tuple
703  *
704  * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check
705  * that the row to be deleted is visible to that snapshot, and throw a
706  * can't-serialize error if not. This is a special-case behavior
707  * needed for referential integrity updates in transaction-snapshot
708  * mode transactions.
709  */
710 ldelete:;
711  result = heap_delete(resultRelationDesc, tupleid,
712  estate->es_output_cid,
713  estate->es_crosscheck_snapshot,
714  true /* wait for commit */ ,
715  &hufd);
716  switch (result)
717  {
719 
720  /*
721  * The target tuple was already updated or deleted by the
722  * current command, or by a later command in the current
723  * transaction. The former case is possible in a join DELETE
724  * where multiple tuples join to the same target tuple. This
725  * is somewhat questionable, but Postgres has always allowed
726  * it: we just ignore additional deletion attempts.
727  *
728  * The latter case arises if the tuple is modified by a
729  * command in a BEFORE trigger, or perhaps by a command in a
730  * volatile function used in the query. In such situations we
731  * should not ignore the deletion, but it is equally unsafe to
732  * proceed. We don't want to discard the original DELETE
733  * while keeping the triggered actions based on its deletion;
734  * and it would be no better to allow the original DELETE
735  * while discarding updates that it triggered. The row update
736  * carries some information that might be important according
737  * to business rules; so throwing an error is the only safe
738  * course.
739  *
740  * If a trigger actually intends this type of interaction, it
741  * can re-execute the DELETE and then return NULL to cancel
742  * the outer delete.
743  */
744  if (hufd.cmax != estate->es_output_cid)
745  ereport(ERROR,
746  (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
747  errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
748  errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
749 
750  /* Else, already deleted by self; nothing to do */
751  return NULL;
752 
754  break;
755 
756  case HeapTupleUpdated:
758  ereport(ERROR,
759  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
760  errmsg("could not serialize access due to concurrent update")));
761  if (!ItemPointerEquals(tupleid, &hufd.ctid))
762  {
763  TupleTableSlot *epqslot;
764 
765  epqslot = EvalPlanQual(estate,
766  epqstate,
767  resultRelationDesc,
768  resultRelInfo->ri_RangeTableIndex,
770  &hufd.ctid,
771  hufd.xmax);
772  if (!TupIsNull(epqslot))
773  {
774  *tupleid = hufd.ctid;
775  goto ldelete;
776  }
777  }
778  /* tuple already deleted; nothing to do */
779  return NULL;
780 
781  default:
782  elog(ERROR, "unrecognized heap_delete status: %u", result);
783  return NULL;
784  }
785 
786  /*
787  * Note: Normally one would think that we have to delete index tuples
788  * associated with the heap tuple now...
789  *
790  * ... but in POSTGRES, we have no need to do this because VACUUM will
791  * take care of it later. We can't delete index tuples immediately
792  * anyway, since the tuple is still visible to other transactions.
793  */
794  }
795 
796  if (canSetTag)
797  (estate->es_processed)++;
798 
799  /* AFTER ROW DELETE Triggers */
800  ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple);
801 
802  /* Process RETURNING if present */
803  if (resultRelInfo->ri_projectReturning)
804  {
805  /*
806  * We have to put the target tuple into a slot, which means first we
807  * gotta fetch it. We can use the trigger tuple slot.
808  */
809  TupleTableSlot *rslot;
810  HeapTupleData deltuple;
811  Buffer delbuffer;
812 
813  if (resultRelInfo->ri_FdwRoutine)
814  {
815  /* FDW must have provided a slot containing the deleted row */
816  Assert(!TupIsNull(slot));
817  delbuffer = InvalidBuffer;
818  }
819  else
820  {
821  slot = estate->es_trig_tuple_slot;
822  if (oldtuple != NULL)
823  {
824  deltuple = *oldtuple;
825  delbuffer = InvalidBuffer;
826  }
827  else
828  {
829  deltuple.t_self = *tupleid;
830  if (!heap_fetch(resultRelationDesc, SnapshotAny,
831  &deltuple, &delbuffer, false, NULL))
832  elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
833  }
834 
835  if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc))
836  ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc));
837  ExecStoreTuple(&deltuple, slot, InvalidBuffer, false);
838  }
839 
840  rslot = ExecProcessReturning(resultRelInfo, slot, planSlot);
841 
842  /*
843  * Before releasing the target tuple again, make sure rslot has a
844  * local copy of any pass-by-reference values.
845  */
846  ExecMaterializeSlot(rslot);
847 
848  ExecClearTuple(slot);
849  if (BufferIsValid(delbuffer))
850  ReleaseBuffer(delbuffer);
851 
852  return rslot;
853  }
854 
855  return NULL;
856 }
857 
858 /* ----------------------------------------------------------------
859  * ExecUpdate
860  *
861  * note: we can't run UPDATE queries with transactions
862  * off because UPDATEs are actually INSERTs and our
863  * scan will mistakenly loop forever, updating the tuple
864  * it just inserted.. This should be fixed but until it
865  * is, we don't want to get stuck in an infinite loop
866  * which corrupts your database..
867  *
868  * When updating a table, tupleid identifies the tuple to
869  * update and oldtuple is NULL. When updating a view, oldtuple
870  * is passed to the INSTEAD OF triggers and identifies what to
871  * update, and tupleid is invalid. When updating a foreign table,
872  * tupleid is invalid; the FDW has to figure out which row to
873  * update using data from the planSlot. oldtuple is passed to
874  * foreign table triggers; it is NULL when the foreign table has
875  * no relevant triggers.
876  *
877  * Returns RETURNING result if any, otherwise NULL.
878  * ----------------------------------------------------------------
879  */
880 static TupleTableSlot *
882  HeapTuple oldtuple,
883  TupleTableSlot *slot,
884  TupleTableSlot *planSlot,
885  EPQState *epqstate,
886  EState *estate,
887  bool canSetTag)
888 {
889  HeapTuple tuple;
890  ResultRelInfo *resultRelInfo;
891  Relation resultRelationDesc;
892  HTSU_Result result;
894  List *recheckIndexes = NIL;
895 
896  /*
897  * abort the operation if not running transactions
898  */
900  elog(ERROR, "cannot UPDATE during bootstrap");
901 
902  /*
903  * get the heap tuple out of the tuple table slot, making sure we have a
904  * writable copy
905  */
906  tuple = ExecMaterializeSlot(slot);
907 
908  /*
909  * get information on the (current) result relation
910  */
911  resultRelInfo = estate->es_result_relation_info;
912  resultRelationDesc = resultRelInfo->ri_RelationDesc;
913 
914  /* BEFORE ROW UPDATE Triggers */
915  if (resultRelInfo->ri_TrigDesc &&
916  resultRelInfo->ri_TrigDesc->trig_update_before_row)
917  {
918  slot = ExecBRUpdateTriggers(estate, epqstate, resultRelInfo,
919  tupleid, oldtuple, slot);
920 
921  if (slot == NULL) /* "do nothing" */
922  return NULL;
923 
924  /* trigger might have changed tuple */
925  tuple = ExecMaterializeSlot(slot);
926  }
927 
928  /* INSTEAD OF ROW UPDATE Triggers */
929  if (resultRelInfo->ri_TrigDesc &&
930  resultRelInfo->ri_TrigDesc->trig_update_instead_row)
931  {
932  slot = ExecIRUpdateTriggers(estate, resultRelInfo,
933  oldtuple, slot);
934 
935  if (slot == NULL) /* "do nothing" */
936  return NULL;
937 
938  /* trigger might have changed tuple */
939  tuple = ExecMaterializeSlot(slot);
940  }
941  else if (resultRelInfo->ri_FdwRoutine)
942  {
943  /*
944  * update in foreign table: let the FDW do it
945  */
946  slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate,
947  resultRelInfo,
948  slot,
949  planSlot);
950 
951  if (slot == NULL) /* "do nothing" */
952  return NULL;
953 
954  /* FDW might have changed tuple */
955  tuple = ExecMaterializeSlot(slot);
956 
957  /*
958  * AFTER ROW Triggers or RETURNING expressions might reference the
959  * tableoid column, so initialize t_tableOid before evaluating them.
960  */
961  tuple->t_tableOid = RelationGetRelid(resultRelationDesc);
962  }
963  else
964  {
965  LockTupleMode lockmode;
966 
967  /*
968  * Constraints might reference the tableoid column, so initialize
969  * t_tableOid before evaluating them.
970  */
971  tuple->t_tableOid = RelationGetRelid(resultRelationDesc);
972 
973  /*
974  * Check any RLS UPDATE WITH CHECK policies
975  *
976  * If we generate a new candidate tuple after EvalPlanQual testing, we
977  * must loop back here and recheck any RLS policies and constraints.
978  * (We don't need to redo triggers, however. If there are any BEFORE
979  * triggers then trigger.c will have done heap_lock_tuple to lock the
980  * correct tuple, so there's no need to do them again.)
981  *
982  * ExecWithCheckOptions() will skip any WCOs which are not of the kind
983  * we are looking for at this point.
984  */
985 lreplace:;
986  if (resultRelInfo->ri_WithCheckOptions != NIL)
988  resultRelInfo, slot, estate);
989 
990  /*
991  * Check the constraints of the tuple. Note that we pass the same
992  * slot for the orig_slot argument, because unlike ExecInsert(), no
993  * tuple-routing is performed here, hence the slot remains unchanged.
994  */
995  if (resultRelationDesc->rd_att->constr || resultRelInfo->ri_PartitionCheck)
996  ExecConstraints(resultRelInfo, slot, slot, estate);
997 
998  /*
999  * replace the heap tuple
1000  *
1001  * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check
1002  * that the row to be updated is visible to that snapshot, and throw a
1003  * can't-serialize error if not. This is a special-case behavior
1004  * needed for referential integrity updates in transaction-snapshot
1005  * mode transactions.
1006  */
1007  result = heap_update(resultRelationDesc, tupleid, tuple,
1008  estate->es_output_cid,
1009  estate->es_crosscheck_snapshot,
1010  true /* wait for commit */ ,
1011  &hufd, &lockmode);
1012  switch (result)
1013  {
1014  case HeapTupleSelfUpdated:
1015 
1016  /*
1017  * The target tuple was already updated or deleted by the
1018  * current command, or by a later command in the current
1019  * transaction. The former case is possible in a join UPDATE
1020  * where multiple tuples join to the same target tuple. This
1021  * is pretty questionable, but Postgres has always allowed it:
1022  * we just execute the first update action and ignore
1023  * additional update attempts.
1024  *
1025  * The latter case arises if the tuple is modified by a
1026  * command in a BEFORE trigger, or perhaps by a command in a
1027  * volatile function used in the query. In such situations we
1028  * should not ignore the update, but it is equally unsafe to
1029  * proceed. We don't want to discard the original UPDATE
1030  * while keeping the triggered actions based on it; and we
1031  * have no principled way to merge this update with the
1032  * previous ones. So throwing an error is the only safe
1033  * course.
1034  *
1035  * If a trigger actually intends this type of interaction, it
1036  * can re-execute the UPDATE (assuming it can figure out how)
1037  * and then return NULL to cancel the outer update.
1038  */
1039  if (hufd.cmax != estate->es_output_cid)
1040  ereport(ERROR,
1041  (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1042  errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
1043  errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1044 
1045  /* Else, already updated by self; nothing to do */
1046  return NULL;
1047 
1048  case HeapTupleMayBeUpdated:
1049  break;
1050 
1051  case HeapTupleUpdated:
1053  ereport(ERROR,
1054  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1055  errmsg("could not serialize access due to concurrent update")));
1056  if (!ItemPointerEquals(tupleid, &hufd.ctid))
1057  {
1058  TupleTableSlot *epqslot;
1059 
1060  epqslot = EvalPlanQual(estate,
1061  epqstate,
1062  resultRelationDesc,
1063  resultRelInfo->ri_RangeTableIndex,
1064  lockmode,
1065  &hufd.ctid,
1066  hufd.xmax);
1067  if (!TupIsNull(epqslot))
1068  {
1069  *tupleid = hufd.ctid;
1070  slot = ExecFilterJunk(resultRelInfo->ri_junkFilter, epqslot);
1071  tuple = ExecMaterializeSlot(slot);
1072  goto lreplace;
1073  }
1074  }
1075  /* tuple already deleted; nothing to do */
1076  return NULL;
1077 
1078  default:
1079  elog(ERROR, "unrecognized heap_update status: %u", result);
1080  return NULL;
1081  }
1082 
1083  /*
1084  * Note: instead of having to update the old index tuples associated
1085  * with the heap tuple, all we do is form and insert new index tuples.
1086  * This is because UPDATEs are actually DELETEs and INSERTs, and index
1087  * tuple deletion is done later by VACUUM (see notes in ExecDelete).
1088  * All we do here is insert new index tuples. -cim 9/27/89
1089  */
1090 
1091  /*
1092  * insert index entries for tuple
1093  *
1094  * Note: heap_update returns the tid (location) of the new tuple in
1095  * the t_self field.
1096  *
1097  * If it's a HOT update, we mustn't insert new index entries.
1098  */
1099  if (resultRelInfo->ri_NumIndices > 0 && !HeapTupleIsHeapOnly(tuple))
1100  recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
1101  estate, false, NULL, NIL);
1102  }
1103 
1104  if (canSetTag)
1105  (estate->es_processed)++;
1106 
1107  /* AFTER ROW UPDATE Triggers */
1108  ExecARUpdateTriggers(estate, resultRelInfo, tupleid, oldtuple, tuple,
1109  recheckIndexes);
1110 
1111  list_free(recheckIndexes);
1112 
1113  /*
1114  * Check any WITH CHECK OPTION constraints from parent views. We are
1115  * required to do this after testing all constraints and uniqueness
1116  * violations per the SQL spec, so we do it after actually updating the
1117  * record in the heap and all indexes.
1118  *
1119  * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
1120  * are looking for at this point.
1121  */
1122  if (resultRelInfo->ri_WithCheckOptions != NIL)
1123  ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1124 
1125  /* Process RETURNING if present */
1126  if (resultRelInfo->ri_projectReturning)
1127  return ExecProcessReturning(resultRelInfo, slot, planSlot);
1128 
1129  return NULL;
1130 }
1131 
1132 /*
1133  * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE
1134  *
1135  * Try to lock tuple for update as part of speculative insertion. If
1136  * a qual originating from ON CONFLICT DO UPDATE is satisfied, update
1137  * (but still lock row, even though it may not satisfy estate's
1138  * snapshot).
1139  *
1140  * Returns true if if we're done (with or without an update), or false if
1141  * the caller must retry the INSERT from scratch.
1142  */
1143 static bool
1145  ResultRelInfo *resultRelInfo,
1146  ItemPointer conflictTid,
1147  TupleTableSlot *planSlot,
1148  TupleTableSlot *excludedSlot,
1149  EState *estate,
1150  bool canSetTag,
1151  TupleTableSlot **returning)
1152 {
1153  ExprContext *econtext = mtstate->ps.ps_ExprContext;
1154  Relation relation = resultRelInfo->ri_RelationDesc;
1155  List *onConflictSetWhere = resultRelInfo->ri_onConflictSetWhere;
1156  HeapTupleData tuple;
1157  HeapUpdateFailureData hufd;
1158  LockTupleMode lockmode;
1159  HTSU_Result test;
1160  Buffer buffer;
1161 
1162  /* Determine lock mode to use */
1163  lockmode = ExecUpdateLockMode(estate, resultRelInfo);
1164 
1165  /*
1166  * Lock tuple for update. Don't follow updates when tuple cannot be
1167  * locked without doing so. A row locking conflict here means our
1168  * previous conclusion that the tuple is conclusively committed is not
1169  * true anymore.
1170  */
1171  tuple.t_self = *conflictTid;
1172  test = heap_lock_tuple(relation, &tuple, estate->es_output_cid,
1173  lockmode, LockWaitBlock, false, &buffer,
1174  &hufd);
1175  switch (test)
1176  {
1177  case HeapTupleMayBeUpdated:
1178  /* success! */
1179  break;
1180 
1181  case HeapTupleInvisible:
1182 
1183  /*
1184  * This can occur when a just inserted tuple is updated again in
1185  * the same command. E.g. because multiple rows with the same
1186  * conflicting key values are inserted.
1187  *
1188  * This is somewhat similar to the ExecUpdate()
1189  * HeapTupleSelfUpdated case. We do not want to proceed because
1190  * it would lead to the same row being updated a second time in
1191  * some unspecified order, and in contrast to plain UPDATEs
1192  * there's no historical behavior to break.
1193  *
1194  * It is the user's responsibility to prevent this situation from
1195  * occurring. These problems are why SQL-2003 similarly specifies
1196  * that for SQL MERGE, an exception must be raised in the event of
1197  * an attempt to update the same row twice.
1198  */
1200  ereport(ERROR,
1201  (errcode(ERRCODE_CARDINALITY_VIOLATION),
1202  errmsg("ON CONFLICT DO UPDATE command cannot affect row a second time"),
1203  errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
1204 
1205  /* This shouldn't happen */
1206  elog(ERROR, "attempted to lock invisible tuple");
1207 
1208  case HeapTupleSelfUpdated:
1209 
1210  /*
1211  * This state should never be reached. As a dirty snapshot is used
1212  * to find conflicting tuples, speculative insertion wouldn't have
1213  * seen this row to conflict with.
1214  */
1215  elog(ERROR, "unexpected self-updated tuple");
1216 
1217  case HeapTupleUpdated:
1219  ereport(ERROR,
1220  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1221  errmsg("could not serialize access due to concurrent update")));
1222 
1223  /*
1224  * Tell caller to try again from the very start.
1225  *
1226  * It does not make sense to use the usual EvalPlanQual() style
1227  * loop here, as the new version of the row might not conflict
1228  * anymore, or the conflicting tuple has actually been deleted.
1229  */
1230  ReleaseBuffer(buffer);
1231  return false;
1232 
1233  default:
1234  elog(ERROR, "unrecognized heap_lock_tuple status: %u", test);
1235  }
1236 
1237  /*
1238  * Success, the tuple is locked.
1239  *
1240  * Reset per-tuple memory context to free any expression evaluation
1241  * storage allocated in the previous cycle.
1242  */
1243  ResetExprContext(econtext);
1244 
1245  /*
1246  * Verify that the tuple is visible to our MVCC snapshot if the current
1247  * isolation level mandates that.
1248  *
1249  * It's not sufficient to rely on the check within ExecUpdate() as e.g.
1250  * CONFLICT ... WHERE clause may prevent us from reaching that.
1251  *
1252  * This means we only ever continue when a new command in the current
1253  * transaction could see the row, even though in READ COMMITTED mode the
1254  * tuple will not be visible according to the current statement's
1255  * snapshot. This is in line with the way UPDATE deals with newer tuple
1256  * versions.
1257  */
1258  ExecCheckHeapTupleVisible(estate, &tuple, buffer);
1259 
1260  /* Store target's existing tuple in the state's dedicated slot */
1261  ExecStoreTuple(&tuple, mtstate->mt_existing, buffer, false);
1262 
1263  /*
1264  * Make tuple and any needed join variables available to ExecQual and
1265  * ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while
1266  * the target's existing tuple is installed in the scantuple. EXCLUDED
1267  * has been made to reference INNER_VAR in setrefs.c, but there is no
1268  * other redirection.
1269  */
1270  econtext->ecxt_scantuple = mtstate->mt_existing;
1271  econtext->ecxt_innertuple = excludedSlot;
1272  econtext->ecxt_outertuple = NULL;
1273 
1274  if (!ExecQual(onConflictSetWhere, econtext, false))
1275  {
1276  ReleaseBuffer(buffer);
1277  InstrCountFiltered1(&mtstate->ps, 1);
1278  return true; /* done with the tuple */
1279  }
1280 
1281  if (resultRelInfo->ri_WithCheckOptions != NIL)
1282  {
1283  /*
1284  * Check target's existing tuple against UPDATE-applicable USING
1285  * security barrier quals (if any), enforced here as RLS checks/WCOs.
1286  *
1287  * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security
1288  * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK,
1289  * but that's almost the extent of its special handling for ON
1290  * CONFLICT DO UPDATE.
1291  *
1292  * The rewriter will also have associated UPDATE applicable straight
1293  * RLS checks/WCOs for the benefit of the ExecUpdate() call that
1294  * follows. INSERTs and UPDATEs naturally have mutually exclusive WCO
1295  * kinds, so there is no danger of spurious over-enforcement in the
1296  * INSERT or UPDATE path.
1297  */
1299  mtstate->mt_existing,
1300  mtstate->ps.state);
1301  }
1302 
1303  /* Project the new tuple version */
1304  ExecProject(resultRelInfo->ri_onConflictSetProj);
1305 
1306  /*
1307  * Note that it is possible that the target tuple has been modified in
1308  * this session, after the above heap_lock_tuple. We choose to not error
1309  * out in that case, in line with ExecUpdate's treatment of similar cases.
1310  * This can happen if an UPDATE is triggered from within ExecQual(),
1311  * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
1312  * wCTE in the ON CONFLICT's SET.
1313  */
1314 
1315  /* Execute UPDATE with projection */
1316  *returning = ExecUpdate(&tuple.t_self, NULL,
1317  mtstate->mt_conflproj, planSlot,
1318  &mtstate->mt_epqstate, mtstate->ps.state,
1319  canSetTag);
1320 
1321  ReleaseBuffer(buffer);
1322  return true;
1323 }
1324 
1325 
1326 /*
1327  * Process BEFORE EACH STATEMENT triggers
1328  */
1329 static void
1331 {
1332  switch (node->operation)
1333  {
1334  case CMD_INSERT:
1336  if (node->mt_onconflict == ONCONFLICT_UPDATE)
1338  node->resultRelInfo);
1339  break;
1340  case CMD_UPDATE:
1342  break;
1343  case CMD_DELETE:
1345  break;
1346  default:
1347  elog(ERROR, "unknown operation");
1348  break;
1349  }
1350 }
1351 
1352 /*
1353  * Process AFTER EACH STATEMENT triggers
1354  */
1355 static void
1357 {
1358  switch (node->operation)
1359  {
1360  case CMD_INSERT:
1361  if (node->mt_onconflict == ONCONFLICT_UPDATE)
1363  node->resultRelInfo);
1365  break;
1366  case CMD_UPDATE:
1368  break;
1369  case CMD_DELETE:
1371  break;
1372  default:
1373  elog(ERROR, "unknown operation");
1374  break;
1375  }
1376 }
1377 
1378 
1379 /* ----------------------------------------------------------------
1380  * ExecModifyTable
1381  *
1382  * Perform table modifications as required, and return RETURNING results
1383  * if needed.
1384  * ----------------------------------------------------------------
1385  */
1388 {
1389  EState *estate = node->ps.state;
1390  CmdType operation = node->operation;
1391  ResultRelInfo *saved_resultRelInfo;
1392  ResultRelInfo *resultRelInfo;
1393  PlanState *subplanstate;
1394  JunkFilter *junkfilter;
1395  TupleTableSlot *slot;
1396  TupleTableSlot *planSlot;
1397  ItemPointer tupleid = NULL;
1398  ItemPointerData tuple_ctid;
1399  HeapTupleData oldtupdata;
1400  HeapTuple oldtuple;
1401 
1402  /*
1403  * This should NOT get called during EvalPlanQual; we should have passed a
1404  * subplan tree to EvalPlanQual, instead. Use a runtime test not just
1405  * Assert because this condition is easy to miss in testing. (Note:
1406  * although ModifyTable should not get executed within an EvalPlanQual
1407  * operation, we do have to allow it to be initialized and shut down in
1408  * case it is within a CTE subplan. Hence this test must be here, not in
1409  * ExecInitModifyTable.)
1410  */
1411  if (estate->es_epqTuple != NULL)
1412  elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
1413 
1414  /*
1415  * If we've already completed processing, don't try to do more. We need
1416  * this test because ExecPostprocessPlan might call us an extra time, and
1417  * our subplan's nodes aren't necessarily robust against being called
1418  * extra times.
1419  */
1420  if (node->mt_done)
1421  return NULL;
1422 
1423  /*
1424  * On first call, fire BEFORE STATEMENT triggers before proceeding.
1425  */
1426  if (node->fireBSTriggers)
1427  {
1428  fireBSTriggers(node);
1429  node->fireBSTriggers = false;
1430  }
1431 
1432  /* Preload local variables */
1433  resultRelInfo = node->resultRelInfo + node->mt_whichplan;
1434  subplanstate = node->mt_plans[node->mt_whichplan];
1435  junkfilter = resultRelInfo->ri_junkFilter;
1436 
1437  /*
1438  * es_result_relation_info must point to the currently active result
1439  * relation while we are within this ModifyTable node. Even though
1440  * ModifyTable nodes can't be nested statically, they can be nested
1441  * dynamically (since our subplan could include a reference to a modifying
1442  * CTE). So we have to save and restore the caller's value.
1443  */
1444  saved_resultRelInfo = estate->es_result_relation_info;
1445 
1446  estate->es_result_relation_info = resultRelInfo;
1447 
1448  /*
1449  * Fetch rows from subplan(s), and execute the required table modification
1450  * for each row.
1451  */
1452  for (;;)
1453  {
1454  /*
1455  * Reset the per-output-tuple exprcontext. This is needed because
1456  * triggers expect to use that context as workspace. It's a bit ugly
1457  * to do this below the top level of the plan, however. We might need
1458  * to rethink this later.
1459  */
1460  ResetPerTupleExprContext(estate);
1461 
1462  planSlot = ExecProcNode(subplanstate);
1463 
1464  if (TupIsNull(planSlot))
1465  {
1466  /* advance to next subplan if any */
1467  node->mt_whichplan++;
1468  if (node->mt_whichplan < node->mt_nplans)
1469  {
1470  resultRelInfo++;
1471  subplanstate = node->mt_plans[node->mt_whichplan];
1472  junkfilter = resultRelInfo->ri_junkFilter;
1473  estate->es_result_relation_info = resultRelInfo;
1474  EvalPlanQualSetPlan(&node->mt_epqstate, subplanstate->plan,
1475  node->mt_arowmarks[node->mt_whichplan]);
1476  continue;
1477  }
1478  else
1479  break;
1480  }
1481 
1482  /*
1483  * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do
1484  * here is compute the RETURNING expressions.
1485  */
1486  if (resultRelInfo->ri_usesFdwDirectModify)
1487  {
1488  Assert(resultRelInfo->ri_projectReturning);
1489 
1490  /*
1491  * A scan slot containing the data that was actually inserted,
1492  * updated or deleted has already been made available to
1493  * ExecProcessReturning by IterateDirectModify, so no need to
1494  * provide it here.
1495  */
1496  slot = ExecProcessReturning(resultRelInfo, NULL, planSlot);
1497 
1498  estate->es_result_relation_info = saved_resultRelInfo;
1499  return slot;
1500  }
1501 
1502  EvalPlanQualSetSlot(&node->mt_epqstate, planSlot);
1503  slot = planSlot;
1504 
1505  oldtuple = NULL;
1506  if (junkfilter != NULL)
1507  {
1508  /*
1509  * extract the 'ctid' or 'wholerow' junk attribute.
1510  */
1511  if (operation == CMD_UPDATE || operation == CMD_DELETE)
1512  {
1513  char relkind;
1514  Datum datum;
1515  bool isNull;
1516 
1517  relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
1518  if (relkind == RELKIND_RELATION || relkind == RELKIND_MATVIEW)
1519  {
1520  datum = ExecGetJunkAttribute(slot,
1521  junkfilter->jf_junkAttNo,
1522  &isNull);
1523  /* shouldn't ever get a null result... */
1524  if (isNull)
1525  elog(ERROR, "ctid is NULL");
1526 
1527  tupleid = (ItemPointer) DatumGetPointer(datum);
1528  tuple_ctid = *tupleid; /* be sure we don't free
1529  * ctid!! */
1530  tupleid = &tuple_ctid;
1531  }
1532 
1533  /*
1534  * Use the wholerow attribute, when available, to reconstruct
1535  * the old relation tuple.
1536  *
1537  * Foreign table updates have a wholerow attribute when the
1538  * relation has an AFTER ROW trigger. Note that the wholerow
1539  * attribute does not carry system columns. Foreign table
1540  * triggers miss seeing those, except that we know enough here
1541  * to set t_tableOid. Quite separately from this, the FDW may
1542  * fetch its own junk attrs to identify the row.
1543  *
1544  * Other relevant relkinds, currently limited to views, always
1545  * have a wholerow attribute.
1546  */
1547  else if (AttributeNumberIsValid(junkfilter->jf_junkAttNo))
1548  {
1549  datum = ExecGetJunkAttribute(slot,
1550  junkfilter->jf_junkAttNo,
1551  &isNull);
1552  /* shouldn't ever get a null result... */
1553  if (isNull)
1554  elog(ERROR, "wholerow is NULL");
1555 
1556  oldtupdata.t_data = DatumGetHeapTupleHeader(datum);
1557  oldtupdata.t_len =
1559  ItemPointerSetInvalid(&(oldtupdata.t_self));
1560  /* Historically, view triggers see invalid t_tableOid. */
1561  oldtupdata.t_tableOid =
1562  (relkind == RELKIND_VIEW) ? InvalidOid :
1563  RelationGetRelid(resultRelInfo->ri_RelationDesc);
1564 
1565  oldtuple = &oldtupdata;
1566  }
1567  else
1568  Assert(relkind == RELKIND_FOREIGN_TABLE);
1569  }
1570 
1571  /*
1572  * apply the junkfilter if needed.
1573  */
1574  if (operation != CMD_DELETE)
1575  slot = ExecFilterJunk(junkfilter, slot);
1576  }
1577 
1578  switch (operation)
1579  {
1580  case CMD_INSERT:
1581  slot = ExecInsert(node, slot, planSlot,
1582  node->mt_arbiterindexes, node->mt_onconflict,
1583  estate, node->canSetTag);
1584  break;
1585  case CMD_UPDATE:
1586  slot = ExecUpdate(tupleid, oldtuple, slot, planSlot,
1587  &node->mt_epqstate, estate, node->canSetTag);
1588  break;
1589  case CMD_DELETE:
1590  slot = ExecDelete(tupleid, oldtuple, planSlot,
1591  &node->mt_epqstate, estate, node->canSetTag);
1592  break;
1593  default:
1594  elog(ERROR, "unknown operation");
1595  break;
1596  }
1597 
1598  /*
1599  * If we got a RETURNING result, return it to caller. We'll continue
1600  * the work on next call.
1601  */
1602  if (slot)
1603  {
1604  estate->es_result_relation_info = saved_resultRelInfo;
1605  return slot;
1606  }
1607  }
1608 
1609  /* Restore es_result_relation_info before exiting */
1610  estate->es_result_relation_info = saved_resultRelInfo;
1611 
1612  /*
1613  * We're done, but fire AFTER STATEMENT triggers before exiting.
1614  */
1615  fireASTriggers(node);
1616 
1617  node->mt_done = true;
1618 
1619  return NULL;
1620 }
1621 
1622 /* ----------------------------------------------------------------
1623  * ExecInitModifyTable
1624  * ----------------------------------------------------------------
1625  */
1627 ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
1628 {
1629  ModifyTableState *mtstate;
1630  CmdType operation = node->operation;
1631  int nplans = list_length(node->plans);
1632  ResultRelInfo *saved_resultRelInfo;
1633  ResultRelInfo *resultRelInfo;
1634  TupleDesc tupDesc;
1635  Plan *subplan;
1636  ListCell *l;
1637  int i;
1638  Relation rel;
1639 
1640  /* check for unsupported flags */
1641  Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
1642 
1643  /*
1644  * create state structure
1645  */
1646  mtstate = makeNode(ModifyTableState);
1647  mtstate->ps.plan = (Plan *) node;
1648  mtstate->ps.state = estate;
1649  mtstate->ps.targetlist = NIL; /* not actually used */
1650 
1651  mtstate->operation = operation;
1652  mtstate->canSetTag = node->canSetTag;
1653  mtstate->mt_done = false;
1654 
1655  mtstate->mt_plans = (PlanState **) palloc0(sizeof(PlanState *) * nplans);
1656  mtstate->resultRelInfo = estate->es_result_relations + node->resultRelIndex;
1657  mtstate->mt_arowmarks = (List **) palloc0(sizeof(List *) * nplans);
1658  mtstate->mt_nplans = nplans;
1659  mtstate->mt_onconflict = node->onConflictAction;
1660  mtstate->mt_arbiterindexes = node->arbiterIndexes;
1661 
1662  /* set up epqstate with dummy subplan data for the moment */
1663  EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL, node->epqParam);
1664  mtstate->fireBSTriggers = true;
1665 
1666  /*
1667  * call ExecInitNode on each of the plans to be executed and save the
1668  * results into the array "mt_plans". This is also a convenient place to
1669  * verify that the proposed target relations are valid and open their
1670  * indexes for insertion of new index entries. Note we *must* set
1671  * estate->es_result_relation_info correctly while we initialize each
1672  * sub-plan; ExecContextForcesOids depends on that!
1673  */
1674  saved_resultRelInfo = estate->es_result_relation_info;
1675 
1676  resultRelInfo = mtstate->resultRelInfo;
1677  i = 0;
1678  foreach(l, node->plans)
1679  {
1680  subplan = (Plan *) lfirst(l);
1681 
1682  /* Initialize the usesFdwDirectModify flag */
1683  resultRelInfo->ri_usesFdwDirectModify = bms_is_member(i,
1684  node->fdwDirectModifyPlans);
1685 
1686  /*
1687  * Verify result relation is a valid target for the current operation
1688  */
1689  CheckValidResultRel(resultRelInfo->ri_RelationDesc, operation);
1690 
1691  /*
1692  * If there are indices on the result relation, open them and save
1693  * descriptors in the result relation info, so that we can add new
1694  * index entries for the tuples we add/update. We need not do this
1695  * for a DELETE, however, since deletion doesn't affect indexes. Also,
1696  * inside an EvalPlanQual operation, the indexes might be open
1697  * already, since we share the resultrel state with the original
1698  * query.
1699  */
1700  if (resultRelInfo->ri_RelationDesc->rd_rel->relhasindex &&
1701  operation != CMD_DELETE &&
1702  resultRelInfo->ri_IndexRelationDescs == NULL)
1703  ExecOpenIndices(resultRelInfo, mtstate->mt_onconflict != ONCONFLICT_NONE);
1704 
1705  /* Now init the plan for this result rel */
1706  estate->es_result_relation_info = resultRelInfo;
1707  mtstate->mt_plans[i] = ExecInitNode(subplan, estate, eflags);
1708 
1709  /* Also let FDWs init themselves for foreign-table result rels */
1710  if (!resultRelInfo->ri_usesFdwDirectModify &&
1711  resultRelInfo->ri_FdwRoutine != NULL &&
1712  resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL)
1713  {
1714  List *fdw_private = (List *) list_nth(node->fdwPrivLists, i);
1715 
1716  resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate,
1717  resultRelInfo,
1718  fdw_private,
1719  i,
1720  eflags);
1721  }
1722 
1723  resultRelInfo++;
1724  i++;
1725  }
1726 
1727  estate->es_result_relation_info = saved_resultRelInfo;
1728 
1729  /* The root table RT index is at the head of the partitioned_rels list */
1730  if (node->partitioned_rels)
1731  {
1732  Index root_rti;
1733  Oid root_oid;
1734 
1735  root_rti = linitial_int(node->partitioned_rels);
1736  root_oid = getrelid(root_rti, estate->es_range_table);
1737  rel = heap_open(root_oid, NoLock); /* locked by InitPlan */
1738  }
1739  else
1740  rel = mtstate->resultRelInfo->ri_RelationDesc;
1741 
1742  /* Build state for INSERT tuple routing */
1743  if (operation == CMD_INSERT &&
1744  rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1745  {
1746  PartitionDispatch *partition_dispatch_info;
1747  ResultRelInfo *partitions;
1748  TupleConversionMap **partition_tupconv_maps;
1749  TupleTableSlot *partition_tuple_slot;
1750  int num_parted,
1751  num_partitions;
1752 
1754  &partition_dispatch_info,
1755  &partitions,
1756  &partition_tupconv_maps,
1757  &partition_tuple_slot,
1758  &num_parted, &num_partitions);
1759  mtstate->mt_partition_dispatch_info = partition_dispatch_info;
1760  mtstate->mt_num_dispatch = num_parted;
1761  mtstate->mt_partitions = partitions;
1762  mtstate->mt_num_partitions = num_partitions;
1763  mtstate->mt_partition_tupconv_maps = partition_tupconv_maps;
1764  mtstate->mt_partition_tuple_slot = partition_tuple_slot;
1765  }
1766 
1767  /*
1768  * Initialize any WITH CHECK OPTION constraints if needed.
1769  */
1770  resultRelInfo = mtstate->resultRelInfo;
1771  i = 0;
1772  foreach(l, node->withCheckOptionLists)
1773  {
1774  List *wcoList = (List *) lfirst(l);
1775  List *wcoExprs = NIL;
1776  ListCell *ll;
1777 
1778  foreach(ll, wcoList)
1779  {
1780  WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
1781  ExprState *wcoExpr = ExecInitExpr((Expr *) wco->qual,
1782  mtstate->mt_plans[i]);
1783 
1784  wcoExprs = lappend(wcoExprs, wcoExpr);
1785  }
1786 
1787  resultRelInfo->ri_WithCheckOptions = wcoList;
1788  resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
1789  resultRelInfo++;
1790  i++;
1791  }
1792 
1793  /*
1794  * Build WITH CHECK OPTION constraints for each leaf partition rel.
1795  * Note that we didn't build the withCheckOptionList for each partition
1796  * within the planner, but simple translation of the varattnos for each
1797  * partition will suffice. This only occurs for the INSERT case;
1798  * UPDATE/DELETE cases are handled above.
1799  */
1800  if (node->withCheckOptionLists != NIL && mtstate->mt_num_partitions > 0)
1801  {
1802  List *wcoList;
1803 
1804  Assert(operation == CMD_INSERT);
1805  resultRelInfo = mtstate->mt_partitions;
1806  wcoList = linitial(node->withCheckOptionLists);
1807  for (i = 0; i < mtstate->mt_num_partitions; i++)
1808  {
1809  Relation partrel = resultRelInfo->ri_RelationDesc;
1810  List *mapped_wcoList;
1811  List *wcoExprs = NIL;
1812  ListCell *ll;
1813 
1814  /* varno = node->nominalRelation */
1815  mapped_wcoList = map_partition_varattnos(wcoList,
1816  node->nominalRelation,
1817  partrel, rel);
1818  foreach(ll, mapped_wcoList)
1819  {
1820  WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
1821  ExprState *wcoExpr = ExecInitExpr((Expr *) wco->qual,
1822  mtstate->mt_plans[i]);
1823 
1824  wcoExprs = lappend(wcoExprs, wcoExpr);
1825  }
1826 
1827  resultRelInfo->ri_WithCheckOptions = mapped_wcoList;
1828  resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
1829  resultRelInfo++;
1830  }
1831  }
1832 
1833  /*
1834  * Initialize RETURNING projections if needed.
1835  */
1836  if (node->returningLists)
1837  {
1838  TupleTableSlot *slot;
1839  ExprContext *econtext;
1840  List *returningList;
1841 
1842  /*
1843  * Initialize result tuple slot and assign its rowtype using the first
1844  * RETURNING list. We assume the rest will look the same.
1845  */
1846  tupDesc = ExecTypeFromTL((List *) linitial(node->returningLists),
1847  false);
1848 
1849  /* Set up a slot for the output of the RETURNING projection(s) */
1850  ExecInitResultTupleSlot(estate, &mtstate->ps);
1851  ExecAssignResultType(&mtstate->ps, tupDesc);
1852  slot = mtstate->ps.ps_ResultTupleSlot;
1853 
1854  /* Need an econtext too */
1855  econtext = CreateExprContext(estate);
1856  mtstate->ps.ps_ExprContext = econtext;
1857 
1858  /*
1859  * Build a projection for each result rel.
1860  */
1861  resultRelInfo = mtstate->resultRelInfo;
1862  foreach(l, node->returningLists)
1863  {
1864  List *rlist = (List *) lfirst(l);
1865  List *rliststate;
1866 
1867  rliststate = (List *) ExecInitExpr((Expr *) rlist, &mtstate->ps);
1868  resultRelInfo->ri_projectReturning =
1869  ExecBuildProjectionInfo(rliststate, econtext, slot,
1870  resultRelInfo->ri_RelationDesc->rd_att);
1871  resultRelInfo++;
1872  }
1873 
1874  /*
1875  * Build a projection for each leaf partition rel. Note that we
1876  * didn't build the returningList for each partition within the
1877  * planner, but simple translation of the varattnos for each partition
1878  * will suffice. This only occurs for the INSERT case; UPDATE/DELETE
1879  * are handled above.
1880  */
1881  resultRelInfo = mtstate->mt_partitions;
1882  returningList = linitial(node->returningLists);
1883  for (i = 0; i < mtstate->mt_num_partitions; i++)
1884  {
1885  Relation partrel = resultRelInfo->ri_RelationDesc;
1886  List *rlist,
1887  *rliststate;
1888 
1889  /* varno = node->nominalRelation */
1890  rlist = map_partition_varattnos(returningList,
1891  node->nominalRelation,
1892  partrel, rel);
1893  rliststate = (List *) ExecInitExpr((Expr *) rlist, &mtstate->ps);
1894  resultRelInfo->ri_projectReturning =
1895  ExecBuildProjectionInfo(rliststate, econtext, slot,
1896  resultRelInfo->ri_RelationDesc->rd_att);
1897  resultRelInfo++;
1898  }
1899  }
1900  else
1901  {
1902  /*
1903  * We still must construct a dummy result tuple type, because InitPlan
1904  * expects one (maybe should change that?).
1905  */
1906  tupDesc = ExecTypeFromTL(NIL, false);
1907  ExecInitResultTupleSlot(estate, &mtstate->ps);
1908  ExecAssignResultType(&mtstate->ps, tupDesc);
1909 
1910  mtstate->ps.ps_ExprContext = NULL;
1911  }
1912 
1913  /* Close the root partitioned rel if we opened it above. */
1914  if (rel != mtstate->resultRelInfo->ri_RelationDesc)
1915  heap_close(rel, NoLock);
1916 
1917  /*
1918  * If needed, Initialize target list, projection and qual for ON CONFLICT
1919  * DO UPDATE.
1920  */
1921  resultRelInfo = mtstate->resultRelInfo;
1922  if (node->onConflictAction == ONCONFLICT_UPDATE)
1923  {
1924  ExprContext *econtext;
1925  ExprState *setexpr;
1926  TupleDesc tupDesc;
1927 
1928  /* insert may only have one plan, inheritance is not expanded */
1929  Assert(nplans == 1);
1930 
1931  /* already exists if created by RETURNING processing above */
1932  if (mtstate->ps.ps_ExprContext == NULL)
1933  ExecAssignExprContext(estate, &mtstate->ps);
1934 
1935  econtext = mtstate->ps.ps_ExprContext;
1936 
1937  /* initialize slot for the existing tuple */
1938  mtstate->mt_existing = ExecInitExtraTupleSlot(mtstate->ps.state);
1940  resultRelInfo->ri_RelationDesc->rd_att);
1941 
1942  /* carried forward solely for the benefit of explain */
1943  mtstate->mt_excludedtlist = node->exclRelTlist;
1944 
1945  /* create target slot for UPDATE SET projection */
1946  tupDesc = ExecTypeFromTL((List *) node->onConflictSet,
1947  resultRelInfo->ri_RelationDesc->rd_rel->relhasoids);
1948  mtstate->mt_conflproj = ExecInitExtraTupleSlot(mtstate->ps.state);
1949  ExecSetSlotDescriptor(mtstate->mt_conflproj, tupDesc);
1950 
1951  /* build UPDATE SET expression and projection state */
1952  setexpr = ExecInitExpr((Expr *) node->onConflictSet, &mtstate->ps);
1953  resultRelInfo->ri_onConflictSetProj =
1954  ExecBuildProjectionInfo((List *) setexpr, econtext,
1955  mtstate->mt_conflproj,
1956  resultRelInfo->ri_RelationDesc->rd_att);
1957 
1958  /* build DO UPDATE WHERE clause expression */
1959  if (node->onConflictWhere)
1960  {
1961  ExprState *qualexpr;
1962 
1963  qualexpr = ExecInitExpr((Expr *) node->onConflictWhere,
1964  &mtstate->ps);
1965 
1966  resultRelInfo->ri_onConflictSetWhere = (List *) qualexpr;
1967  }
1968  }
1969 
1970  /*
1971  * If we have any secondary relations in an UPDATE or DELETE, they need to
1972  * be treated like non-locked relations in SELECT FOR UPDATE, ie, the
1973  * EvalPlanQual mechanism needs to be told about them. Locate the
1974  * relevant ExecRowMarks.
1975  */
1976  foreach(l, node->rowMarks)
1977  {
1979  ExecRowMark *erm;
1980 
1981  /* ignore "parent" rowmarks; they are irrelevant at runtime */
1982  if (rc->isParent)
1983  continue;
1984 
1985  /* find ExecRowMark (same for all subplans) */
1986  erm = ExecFindRowMark(estate, rc->rti, false);
1987 
1988  /* build ExecAuxRowMark for each subplan */
1989  for (i = 0; i < nplans; i++)
1990  {
1991  ExecAuxRowMark *aerm;
1992 
1993  subplan = mtstate->mt_plans[i]->plan;
1994  aerm = ExecBuildAuxRowMark(erm, subplan->targetlist);
1995  mtstate->mt_arowmarks[i] = lappend(mtstate->mt_arowmarks[i], aerm);
1996  }
1997  }
1998 
1999  /* select first subplan */
2000  mtstate->mt_whichplan = 0;
2001  subplan = (Plan *) linitial(node->plans);
2002  EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan,
2003  mtstate->mt_arowmarks[0]);
2004 
2005  /*
2006  * Initialize the junk filter(s) if needed. INSERT queries need a filter
2007  * if there are any junk attrs in the tlist. UPDATE and DELETE always
2008  * need a filter, since there's always a junk 'ctid' or 'wholerow'
2009  * attribute present --- no need to look first.
2010  *
2011  * If there are multiple result relations, each one needs its own junk
2012  * filter. Note multiple rels are only possible for UPDATE/DELETE, so we
2013  * can't be fooled by some needing a filter and some not.
2014  *
2015  * This section of code is also a convenient place to verify that the
2016  * output of an INSERT or UPDATE matches the target table(s).
2017  */
2018  {
2019  bool junk_filter_needed = false;
2020 
2021  switch (operation)
2022  {
2023  case CMD_INSERT:
2024  foreach(l, subplan->targetlist)
2025  {
2026  TargetEntry *tle = (TargetEntry *) lfirst(l);
2027 
2028  if (tle->resjunk)
2029  {
2030  junk_filter_needed = true;
2031  break;
2032  }
2033  }
2034  break;
2035  case CMD_UPDATE:
2036  case CMD_DELETE:
2037  junk_filter_needed = true;
2038  break;
2039  default:
2040  elog(ERROR, "unknown operation");
2041  break;
2042  }
2043 
2044  if (junk_filter_needed)
2045  {
2046  resultRelInfo = mtstate->resultRelInfo;
2047  for (i = 0; i < nplans; i++)
2048  {
2049  JunkFilter *j;
2050 
2051  subplan = mtstate->mt_plans[i]->plan;
2052  if (operation == CMD_INSERT || operation == CMD_UPDATE)
2053  ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc,
2054  subplan->targetlist);
2055 
2056  j = ExecInitJunkFilter(subplan->targetlist,
2057  resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
2058  ExecInitExtraTupleSlot(estate));
2059 
2060  if (operation == CMD_UPDATE || operation == CMD_DELETE)
2061  {
2062  /* For UPDATE/DELETE, find the appropriate junk attr now */
2063  char relkind;
2064 
2065  relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
2066  if (relkind == RELKIND_RELATION ||
2067  relkind == RELKIND_MATVIEW ||
2068  relkind == RELKIND_PARTITIONED_TABLE)
2069  {
2070  j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
2072  elog(ERROR, "could not find junk ctid column");
2073  }
2074  else if (relkind == RELKIND_FOREIGN_TABLE)
2075  {
2076  /*
2077  * When there is an AFTER trigger, there should be a
2078  * wholerow attribute.
2079  */
2080  j->jf_junkAttNo = ExecFindJunkAttribute(j, "wholerow");
2081  }
2082  else
2083  {
2084  j->jf_junkAttNo = ExecFindJunkAttribute(j, "wholerow");
2086  elog(ERROR, "could not find junk wholerow column");
2087  }
2088  }
2089 
2090  resultRelInfo->ri_junkFilter = j;
2091  resultRelInfo++;
2092  }
2093  }
2094  else
2095  {
2096  if (operation == CMD_INSERT)
2098  subplan->targetlist);
2099  }
2100  }
2101 
2102  /*
2103  * Set up a tuple table slot for use for trigger output tuples. In a plan
2104  * containing multiple ModifyTable nodes, all can share one such slot, so
2105  * we keep it in the estate.
2106  */
2107  if (estate->es_trig_tuple_slot == NULL)
2108  estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate);
2109 
2110  /*
2111  * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
2112  * to estate->es_auxmodifytables so that it will be run to completion by
2113  * ExecPostprocessPlan. (It'd actually work fine to add the primary
2114  * ModifyTable node too, but there's no need.) Note the use of lcons not
2115  * lappend: we need later-initialized ModifyTable nodes to be shut down
2116  * before earlier ones. This ensures that we don't throw away RETURNING
2117  * rows that need to be seen by a later CTE subplan.
2118  */
2119  if (!mtstate->canSetTag)
2120  estate->es_auxmodifytables = lcons(mtstate,
2121  estate->es_auxmodifytables);
2122 
2123  return mtstate;
2124 }
2125 
2126 /* ----------------------------------------------------------------
2127  * ExecEndModifyTable
2128  *
2129  * Shuts down the plan.
2130  *
2131  * Returns nothing of interest.
2132  * ----------------------------------------------------------------
2133  */
2134 void
2136 {
2137  int i;
2138 
2139  /*
2140  * Allow any FDWs to shut down
2141  */
2142  for (i = 0; i < node->mt_nplans; i++)
2143  {
2144  ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
2145 
2146  if (!resultRelInfo->ri_usesFdwDirectModify &&
2147  resultRelInfo->ri_FdwRoutine != NULL &&
2148  resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL)
2149  resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state,
2150  resultRelInfo);
2151  }
2152 
2153  /*
2154  * Close all the partitioned tables, leaf partitions, and their indices
2155  *
2156  * Remember node->mt_partition_dispatch_info[0] corresponds to the root
2157  * partitioned table, which we must not try to close, because it is the
2158  * main target table of the query that will be closed by ExecEndPlan().
2159  * Also, tupslot is NULL for the root partitioned table.
2160  */
2161  for (i = 1; i < node->mt_num_dispatch; i++)
2162  {
2164 
2165  heap_close(pd->reldesc, NoLock);
2167  }
2168  for (i = 0; i < node->mt_num_partitions; i++)
2169  {
2170  ResultRelInfo *resultRelInfo = node->mt_partitions + i;
2171 
2172  ExecCloseIndices(resultRelInfo);
2173  heap_close(resultRelInfo->ri_RelationDesc, NoLock);
2174  }
2175 
2176  /* Release the standalone partition tuple descriptor, if any */
2177  if (node->mt_partition_tuple_slot)
2179 
2180  /*
2181  * Free the exprcontext
2182  */
2183  ExecFreeExprContext(&node->ps);
2184 
2185  /*
2186  * clean out the tuple table
2187  */
2189 
2190  /*
2191  * Terminate EPQ execution if active
2192  */
2193  EvalPlanQualEnd(&node->mt_epqstate);
2194 
2195  /*
2196  * shut down subplans
2197  */
2198  for (i = 0; i < node->mt_nplans; i++)
2199  ExecEndNode(node->mt_plans[i]);
2200 }
2201 
2202 void
2204 {
2205  /*
2206  * Currently, we don't need to support rescan on ModifyTable nodes. The
2207  * semantics of that would be a bit debatable anyway.
2208  */
2209  elog(ERROR, "ExecReScanModifyTable is not implemented");
2210 }
AttrNumber jf_junkAttNo
Definition: execnodes.h:299
ExecForeignDelete_function ExecForeignDelete
Definition: fdwapi.h:199
int ri_NumIndices
Definition: execnodes.h:338
#define NIL
Definition: pg_list.h:69
void ExecASDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
Definition: trigger.c:2369
TupleTableSlot * ExecStoreTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer, bool shouldFree)
Definition: execTuples.c:320
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
JunkFilter * ri_junkFilter
Definition: execnodes.h:351
HeapTuple * es_epqTuple
Definition: execnodes.h:434
TupleTableSlot * ExecModifyTable(ModifyTableState *node)
List * arbiterIndexes
Definition: plannodes.h:220
Relation ri_RelationDesc
Definition: execnodes.h:337
bool tts_isempty
Definition: tuptable.h:116
TupleTableSlot * ExecProject(ProjectionInfo *projInfo)
Definition: execQual.c:5215
TupleTableSlot * ExecProcNode(PlanState *node)
Definition: execProcnode.c:392
bool ExecIRDeleteTriggers(EState *estate, ResultRelInfo *relinfo, HeapTuple trigtuple)
Definition: trigger.c:2478
#define IsA(nodeptr, _type_)
Definition: nodes.h:569
Bitmapset * fdwDirectModifyPlans
Definition: plannodes.h:216
void SpeculativeInsertionLockRelease(TransactionId xid)
Definition: lmgr.c:669
TupleTableSlot * ExecInitExtraTupleSlot(EState *estate)
Definition: execTuples.c:852
static bool ExecOnConflictUpdate(ModifyTableState *mtstate, ResultRelInfo *resultRelInfo, ItemPointer conflictTid, TupleTableSlot *planSlot, TupleTableSlot *excludedSlot, EState *estate, bool canSetTag, TupleTableSlot **returning)
int errhint(const char *fmt,...)
Definition: elog.c:987
void ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate)
Definition: execMain.c:1942
Index nominalRelation
Definition: plannodes.h:207
void ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
Definition: trigger.c:2317
void heap_abort_speculative(Relation relation, HeapTuple tuple)
Definition: heapam.c:6108
bool tdhasoid
Definition: tupdesc.h:79
CommandId es_output_cid
Definition: execnodes.h:381
ProjectionInfo * ri_onConflictSetProj
Definition: execnodes.h:353
static void test(void)
void ExecReScanModifyTable(ModifyTableState *node)
List * ExecInsertIndexTuples(TupleTableSlot *slot, ItemPointer tupleid, EState *estate, bool noDupErr, bool *specConflict, List *arbiterIndexes)
Definition: execIndexing.c:271
ResultRelInfo * mt_partitions
Definition: execnodes.h:1177
#define ResetPerTupleExprContext(estate)
Definition: executor.h:347
TupleTableSlot * ExecStoreAllNullTuple(TupleTableSlot *slot)
Definition: execTuples.c:512
#define RelationGetDescr(relation)
Definition: rel.h:425
static TupleTableSlot * ExecUpdate(ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot, TupleTableSlot *planSlot, EPQState *epqstate, EState *estate, bool canSetTag)
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:773
Oid es_lastoid
Definition: execnodes.h:406
List * withCheckOptionLists
Definition: plannodes.h:213
HTSU_Result heap_lock_tuple(Relation relation, HeapTuple tuple, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, bool follow_updates, Buffer *buffer, HeapUpdateFailureData *hufd)
Definition: heapam.c:4594
#define castNode(_type_, nodeptr)
Definition: nodes.h:587
void ExecEndNode(PlanState *node)
Definition: execProcnode.c:644
int resultRelIndex
Definition: plannodes.h:211
void ExecASUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
Definition: trigger.c:2580
ResultRelInfo * resultRelInfo
Definition: execnodes.h:1159
bool heap_fetch(Relation relation, Snapshot snapshot, HeapTuple tuple, Buffer *userbuf, bool keep_buf, Relation stats_relation)
Definition: heapam.c:1865
AttrNumber ExecFindJunkAttribute(JunkFilter *junkfilter, const char *attrName)
Definition: execJunk.c:209
ExecForeignInsert_function ExecForeignInsert
Definition: fdwapi.h:197
ExprContext * ps_ExprContext
Definition: execnodes.h:1080
TupleTableSlot * EvalPlanQual(EState *estate, EPQState *epqstate, Relation relation, Index rti, int lockmode, ItemPointer tid, TransactionId priorXmax)
Definition: execMain.c:2328
static void fireBSTriggers(ModifyTableState *node)
TupleTableSlot * ExecBRUpdateTriggers(EState *estate, EPQState *epqstate, ResultRelInfo *relinfo, ItemPointer tupleid, HeapTuple fdw_trigtuple, TupleTableSlot *slot)
Definition: trigger.c:2591
TupleTableSlot * ExecIRInsertTriggers(EState *estate, ResultRelInfo *relinfo, TupleTableSlot *slot)
Definition: trigger.c:2251
TupleTableSlot * mt_conflproj
Definition: execnodes.h:1169
TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: execTuples.c:439
Form_pg_attribute * attrs
Definition: tupdesc.h:74
#define RELKIND_MATVIEW
Definition: pg_class.h:165
#define IsolationUsesXactSnapshot()
Definition: xact.h:43
#define HeapTupleHeaderSetSpeculativeToken(tup, token)
Definition: htup_details.h:434
#define InvalidBuffer
Definition: buf.h:25
Definition: nodes.h:518
Snapshot es_crosscheck_snapshot
Definition: execnodes.h:373
int errcode(int sqlerrcode)
Definition: elog.c:575
uint32 SpeculativeInsertionLockAcquire(TransactionId xid)
Definition: lmgr.c:643
bool canSetTag
Definition: plannodes.h:206
char * format_type_be(Oid type_oid)
Definition: format_type.c:94
CmdType operation
Definition: execnodes.h:1153
List * targetlist
Definition: execnodes.h:1063
Snapshot es_snapshot
Definition: execnodes.h:372
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
void EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks)
Definition: execMain.c:2663
#define heap_close(r, l)
Definition: heapam.h:97
EState * state
Definition: execnodes.h:1051
List * es_range_table
Definition: execnodes.h:374
Form_pg_class rd_rel
Definition: rel.h:113
unsigned int Oid
Definition: postgres_ext.h:31
#define DatumGetHeapTupleHeader(X)
Definition: fmgr.h:251
bool ExecCheckIndexConstraints(TupleTableSlot *slot, EState *estate, ItemPointer conflictTid, List *arbiterIndexes)
Definition: execIndexing.c:477
int natts
Definition: tupdesc.h:73
void ExecFreeExprContext(PlanState *planstate)
Definition: execUtils.c:686
List * plans
Definition: plannodes.h:212
void ExecAssignResultType(PlanState *planstate, TupleDesc tupDesc)
Definition: execUtils.c:419
#define HEAP_INSERT_SPECULATIVE
Definition: heapam.h:31
Index ri_RangeTableIndex
Definition: execnodes.h:336
#define HeapTupleSatisfiesVisibility(tuple, snapshot, buffer)
Definition: tqual.h:45
List * onConflictSet
Definition: plannodes.h:221
TupleTableSlot * ExecIRUpdateTriggers(EState *estate, ResultRelInfo *relinfo, HeapTuple trigtuple, TupleTableSlot *slot)
Definition: trigger.c:2741
void ExecOpenIndices(ResultRelInfo *resultRelInfo, bool speculative)
Definition: execIndexing.c:149
TupleTableSlot * mt_existing
Definition: execnodes.h:1166
void EvalPlanQualEnd(EPQState *epqstate)
Definition: execMain.c:3055
ItemPointerData * ItemPointer
Definition: itemptr.h:48
HeapTupleHeader t_data
Definition: htup.h:67
List * ri_WithCheckOptionExprs
Definition: execnodes.h:349
#define HeapTupleSetOid(tuple, oid)
Definition: htup_details.h:698
#define linitial_int(l)
Definition: pg_list.h:111
LockTupleMode
Definition: heapam.h:38
bool trig_insert_instead_row
Definition: reltrigger.h:57
static TupleTableSlot * ExecDelete(ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *planSlot, EPQState *epqstate, EState *estate, bool canSetTag)
OnConflictAction mt_onconflict
Definition: execnodes.h:1163
TupleTableSlot * ps_ResultTupleSlot
Definition: execnodes.h:1079
ExprState * ExecInitExpr(Expr *node, PlanState *parent)
Definition: execQual.c:4267
List * rowMarks
Definition: plannodes.h:217
void ExecSetupPartitionTupleRouting(Relation rel, PartitionDispatch **pd, ResultRelInfo **partitions, TupleConversionMap ***tup_conv_maps, TupleTableSlot **partition_tuple_slot, int *num_parted, int *num_partitions)
Definition: execMain.c:3124
bool resjunk
Definition: primnodes.h:1359
#define linitial(l)
Definition: pg_list.h:110
HTSU_Result heap_delete(Relation relation, ItemPointer tid, CommandId cid, Snapshot crosscheck, bool wait, HeapUpdateFailureData *hufd)
Definition: heapam.c:3014
#define ERROR
Definition: elog.h:43
PlanState ps
Definition: execnodes.h:1152
static void ExecCheckTIDVisible(EState *estate, ResultRelInfo *relinfo, ItemPointer tid)
void ExecInitResultTupleSlot(EState *estate, PlanState *planstate)
Definition: execTuples.c:832
ItemPointerData t_self
Definition: htup.h:65
void ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo, ItemPointer tupleid, HeapTuple fdw_trigtuple, HeapTuple newtuple, List *recheckIndexes)
Definition: trigger.c:2708
bool ri_usesFdwDirectModify
Definition: execnodes.h:347
#define EXEC_FLAG_BACKWARD
Definition: executor.h:60
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:417
static void ExecCheckHeapTupleVisible(EState *estate, HeapTuple tuple, Buffer buffer)
uint32 t_len
Definition: htup.h:64
void ExecBSInsertTriggers(EState *estate, ResultRelInfo *relinfo)
Definition: trigger.c:2111
void * list_nth(const List *list, int n)
Definition: list.c:410
#define NoLock
Definition: lockdefs.h:34
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
Definition: execTuples.c:216
ResultRelInfo * es_result_relations
Definition: execnodes.h:384
JunkFilter * ExecInitJunkFilter(List *targetList, bool hasoid, TupleTableSlot *slot)
Definition: execJunk.c:61
List * fdwPrivLists
Definition: plannodes.h:215
int errdetail(const char *fmt,...)
Definition: elog.c:873
EPQState mt_epqstate
Definition: execnodes.h:1161
ProjectionInfo * ExecBuildProjectionInfo(List *targetList, ExprContext *econtext, TupleTableSlot *slot, TupleDesc inputDesc)
Definition: execUtils.c:483
CommandId cmax
Definition: heapam.h:72
TupleTableSlot * ecxt_innertuple
Definition: execnodes.h:131
bool trig_update_before_row
Definition: reltrigger.h:60
HTSU_Result
Definition: snapshot.h:119
bool ExecQual(List *qual, ExprContext *econtext, bool resultForNull)
Definition: execQual.c:5056
ProjectionInfo * ri_projectReturning
Definition: execnodes.h:352
#define TupIsNull(slot)
Definition: tuptable.h:138
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:184
void CheckValidResultRel(Relation resultRel, CmdType operation)
Definition: execMain.c:1046
struct FdwRoutine * ri_FdwRoutine
Definition: execnodes.h:345
unsigned int uint32
Definition: c.h:268
ModifyTableState * ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
Oid t_tableOid
Definition: htup.h:66
#define RELKIND_FOREIGN_TABLE
Definition: pg_class.h:167
void setLastTid(const ItemPointer tid)
Definition: tid.c:259
List * partitioned_rels
Definition: plannodes.h:209
TupleTableSlot * es_trig_tuple_slot
Definition: execnodes.h:390
EndForeignModify_function EndForeignModify
Definition: fdwapi.h:200
#define InstrCountFiltered1(node, delta)
Definition: execnodes.h:1095
#define ereport(elevel, rest)
Definition: elog.h:122
Oid heap_insert(Relation relation, HeapTuple tup, CommandId cid, int options, BulkInsertState bistate)
Definition: heapam.c:2399
TriggerDesc * ri_TrigDesc
Definition: execnodes.h:341
TupleDesc ExecTypeFromTL(List *targetList, bool hasoid)
Definition: execTuples.c:888
List * lappend(List *list, void *datum)
Definition: list.c:128
bool trig_update_instead_row
Definition: reltrigger.h:62
static TupleTableSlot * ExecProcessReturning(ResultRelInfo *resultRelInfo, TupleTableSlot *tupleSlot, TupleTableSlot *planSlot)
PlanState ** mt_plans
Definition: execnodes.h:1156
TransactionId xmax
Definition: heapam.h:71
#define AttributeNumberIsValid(attributeNumber)
Definition: attnum.h:34
TupleDesc tts_tupleDescriptor
Definition: tuptable.h:121
static void fireASTriggers(ModifyTableState *node)
void ExecARDeleteTriggers(EState *estate, ResultRelInfo *relinfo, ItemPointer tupleid, HeapTuple fdw_trigtuple)
Definition: trigger.c:2448
#define RELKIND_PARTITIONED_TABLE
Definition: pg_class.h:168
bool trig_insert_before_row
Definition: reltrigger.h:55
bool trig_delete_instead_row
Definition: reltrigger.h:67
void * palloc0(Size size)
Definition: mcxt.c:878
List * es_auxmodifytables
Definition: execnodes.h:416
uintptr_t Datum
Definition: postgres.h:372
TupleTableSlot * ExecFilterJunk(JunkFilter *junkfilter, TupleTableSlot *slot)
Definition: execJunk.c:262
TupleTableSlot * tupslot
Definition: partition.h:66
void ExecSetSlotDescriptor(TupleTableSlot *slot, TupleDesc tupdesc)
Definition: execTuples.c:247
#define SnapshotAny
Definition: tqual.h:28
void ExecASInsertTriggers(EState *estate, ResultRelInfo *relinfo)
Definition: trigger.c:2163
List * ri_WithCheckOptions
Definition: execnodes.h:348
int ExecFindPartition(ResultRelInfo *resultRelInfo, PartitionDispatch *pd, TupleTableSlot *slot, EState *estate)
Definition: execMain.c:3213
TupleTableSlot * mt_partition_tuple_slot
Definition: execnodes.h:1180
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
Relation heap_open(Oid relationId, LOCKMODE lockmode)
Definition: heapam.c:1287
List * ri_PartitionCheck
Definition: execnodes.h:355
unsigned int Index
Definition: c.h:365
TupleDesc rd_att
Definition: rel.h:114
void EvalPlanQualInit(EPQState *epqstate, EState *estate, Plan *subplan, List *auxrowmarks, int epqParam)
Definition: execMain.c:2644
Plan * plan
Definition: execnodes.h:1049
#define InvalidOid
Definition: postgres_ext.h:36
HTSU_Result heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, CommandId cid, Snapshot crosscheck, bool wait, HeapUpdateFailureData *hufd, LockTupleMode *lockmode)
Definition: heapam.c:3465
List * ri_onConflictSetWhere
Definition: execnodes.h:354
void ExecConstraints(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, TupleTableSlot *orig_slot, EState *estate)
Definition: execMain.c:1814
ExecForeignUpdate_function ExecForeignUpdate
Definition: fdwapi.h:198
Index rti
Definition: plannodes.h:978
List * lcons(void *datum, List *list)
Definition: list.c:259
#define makeNode(_type_)
Definition: nodes.h:566
TupleTableSlot * ecxt_outertuple
Definition: execnodes.h:132
static void ExecCheckPlanOutput(Relation resultRel, List *targetList)
bool ExecBRDeleteTriggers(EState *estate, EPQState *epqstate, ResultRelInfo *relinfo, ItemPointer tupleid, HeapTuple fdw_trigtuple)
Definition: trigger.c:2379
#define NULL
Definition: c.h:229
#define HeapTupleIsHeapOnly(tuple)
Definition: htup_details.h:686
#define Assert(condition)
Definition: c.h:675
#define lfirst(lc)
Definition: pg_list.h:106
LockTupleMode ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo)
Definition: execMain.c:2207
#define EXEC_FLAG_MARK
Definition: executor.h:61
OnConflictAction onConflictAction
Definition: plannodes.h:219
Expr * expr
Definition: primnodes.h:1352
#define InstrCountFiltered2(node, delta)
Definition: execnodes.h:1100
void heap_finish_speculative(Relation relation, HeapTuple tuple)
Definition: heapam.c:6017
uint64 es_processed
Definition: execnodes.h:405
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:307
TupleConstr * constr
Definition: tupdesc.h:76
void ExecAssignExprContext(EState *estate, PlanState *planstate)
Definition: execUtils.c:409
Oid exprType(const Node *expr)
Definition: nodeFuncs.c:42
static int list_length(const List *l)
Definition: pg_list.h:89
TupleTableSlot * ecxt_scantuple
Definition: execnodes.h:130
#define BufferIsValid(bufnum)
Definition: bufmgr.h:114
HeapTuple ExecMaterializeSlot(TupleTableSlot *slot)
Definition: execTuples.c:725
static TupleTableSlot * ExecInsert(ModifyTableState *mtstate, TupleTableSlot *slot, TupleTableSlot *planSlot, List *arbiterIndexes, OnConflictAction onconflict, EState *estate, bool canSetTag)
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
void ExecARInsertTriggers(EState *estate, ResultRelInfo *relinfo, HeapTuple trigtuple, List *recheckIndexes)
Definition: trigger.c:2239
HeapTuple do_convert_tuple(HeapTuple tuple, TupleConversionMap *map)
Definition: tupconvert.c:341
List * targetlist
Definition: plannodes.h:132
#define DatumGetPointer(X)
Definition: postgres.h:555
List * mt_arbiterindexes
Definition: execnodes.h:1164
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:365
List * mt_excludedtlist
Definition: execnodes.h:1167
void ExecBSUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
Definition: trigger.c:2525
#define ItemPointerSetInvalid(pointer)
Definition: itemptr.h:131
int errmsg(const char *fmt,...)
Definition: elog.c:797
#define getrelid(rangeindex, rangetable)
Definition: parsetree.h:41
CmdType operation
Definition: plannodes.h:205
Datum ExecGetJunkAttribute(TupleTableSlot *slot, AttrNumber attno, bool *isNull)
Definition: execJunk.c:248
#define RELKIND_VIEW
Definition: pg_class.h:164
void list_free(List *list)
Definition: list.c:1133
int i
struct PartitionDispatchData ** mt_partition_dispatch_info
Definition: execnodes.h:1171
ExprContext * CreateExprContext(EState *estate)
Definition: execUtils.c:209
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
List * returningLists
Definition: plannodes.h:214
bool isParent
Definition: plannodes.h:985
TupleTableSlot * ExecBRInsertTriggers(EState *estate, ResultRelInfo *relinfo, TupleTableSlot *slot)
Definition: trigger.c:2173
ItemPointerData ctid
Definition: heapam.h:70
TupleConversionMap ** mt_partition_tupconv_maps
Definition: execnodes.h:1178
#define elog
Definition: elog.h:219
ExprContext * pi_exprContext
Definition: execnodes.h:252
BeginForeignModify_function BeginForeignModify
Definition: fdwapi.h:196
#define RELKIND_RELATION
Definition: pg_class.h:160
PlanState * ExecInitNode(Plan *node, EState *estate, int eflags)
Definition: execProcnode.c:139
Definition: pg_list.h:45
int Buffer
Definition: buf.h:23
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:419
OnConflictAction
Definition: nodes.h:800
#define RelationGetRelid(relation)
Definition: rel.h:413
CmdType
Definition: nodes.h:651
void ExecCloseIndices(ResultRelInfo *resultRelInfo)
Definition: execIndexing.c:224
void ExecEndModifyTable(ModifyTableState *node)
RelationPtr ri_IndexRelationDescs
Definition: execnodes.h:339
ExecAuxRowMark * ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
Definition: execMain.c:2257
List * exclRelTlist
Definition: plannodes.h:224
#define ResetExprContext(econtext)
Definition: executor.h:332
List ** mt_arowmarks
Definition: execnodes.h:1160
#define EvalPlanQualSetSlot(epqstate, slot)
Definition: executor.h:228
int epqParam
Definition: plannodes.h:218
List * map_partition_varattnos(List *expr, int target_varno, Relation partrel, Relation parent)
Definition: partition.c:928
bool trig_delete_before_row
Definition: reltrigger.h:65
Node * onConflictWhere
Definition: plannodes.h:222
ExecRowMark * ExecFindRowMark(EState *estate, Index rti, bool missing_ok)
Definition: execMain.c:2233
#define HeapTupleHeaderGetDatumLength(tup)
Definition: htup_details.h:439
ResultRelInfo * es_result_relation_info
Definition: execnodes.h:386