PostgreSQL Source Code  git master
execPartition.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * execPartition.c
4  * Support routines for partitioning.
5  *
6  * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  * src/backend/executor/execPartition.c
11  *
12  *-------------------------------------------------------------------------
13  */
14 #include "postgres.h"
15 
16 #include "access/table.h"
17 #include "access/tableam.h"
18 #include "catalog/partition.h"
19 #include "executor/execPartition.h"
20 #include "executor/executor.h"
22 #include "foreign/fdwapi.h"
23 #include "mb/pg_wchar.h"
24 #include "miscadmin.h"
26 #include "partitioning/partdesc.h"
27 #include "partitioning/partprune.h"
28 #include "rewrite/rewriteManip.h"
29 #include "utils/acl.h"
30 #include "utils/lsyscache.h"
31 #include "utils/partcache.h"
32 #include "utils/rls.h"
33 #include "utils/ruleutils.h"
34 
35 
36 /*-----------------------
37  * PartitionTupleRouting - Encapsulates all information required to
38  * route a tuple inserted into a partitioned table to one of its leaf
39  * partitions.
40  *
41  * partition_root
42  * The partitioned table that's the target of the command.
43  *
44  * partition_dispatch_info
45  * Array of 'max_dispatch' elements containing a pointer to a
46  * PartitionDispatch object for every partitioned table touched by tuple
47  * routing. The entry for the target partitioned table is *always*
48  * present in the 0th element of this array. See comment for
49  * PartitionDispatchData->indexes for details on how this array is
50  * indexed.
51  *
52  * nonleaf_partitions
53  * Array of 'max_dispatch' elements containing pointers to fake
54  * ResultRelInfo objects for nonleaf partitions, useful for checking
55  * the partition constraint.
56  *
57  * num_dispatch
58  * The current number of items stored in the 'partition_dispatch_info'
59  * array. Also serves as the index of the next free array element for
60  * new PartitionDispatch objects that need to be stored.
61  *
62  * max_dispatch
63  * The current allocated size of the 'partition_dispatch_info' array.
64  *
65  * partitions
66  * Array of 'max_partitions' elements containing a pointer to a
67  * ResultRelInfo for every leaf partition touched by tuple routing.
68  * Some of these are pointers to ResultRelInfos which are borrowed out of
69  * the owning ModifyTableState node. The remainder have been built
70  * especially for tuple routing. See comment for
71  * PartitionDispatchData->indexes for details on how this array is
72  * indexed.
73  *
74  * is_borrowed_rel
75  * Array of 'max_partitions' booleans recording whether a given entry
76  * in 'partitions' is a ResultRelInfo pointer borrowed from the owning
77  * ModifyTableState node, rather than being built here.
78  *
79  * num_partitions
80  * The current number of items stored in the 'partitions' array. Also
81  * serves as the index of the next free array element for new
82  * ResultRelInfo objects that need to be stored.
83  *
84  * max_partitions
85  * The current allocated size of the 'partitions' array.
86  *
87  * memcxt
88  * Memory context used to allocate subsidiary structs.
89  *-----------------------
90  */
92 {
103 };
104 
105 /*-----------------------
106  * PartitionDispatch - information about one partitioned table in a partition
107  * hierarchy required to route a tuple to any of its partitions. A
108  * PartitionDispatch is always encapsulated inside a PartitionTupleRouting
109  * struct and stored inside its 'partition_dispatch_info' array.
110  *
111  * reldesc
112  * Relation descriptor of the table
113  *
114  * key
115  * Partition key information of the table
116  *
117  * keystate
118  * Execution state required for expressions in the partition key
119  *
120  * partdesc
121  * Partition descriptor of the table
122  *
123  * tupslot
124  * A standalone TupleTableSlot initialized with this table's tuple
125  * descriptor, or NULL if no tuple conversion between the parent is
126  * required.
127  *
128  * tupmap
129  * TupleConversionMap to convert from the parent's rowtype to this table's
130  * rowtype (when extracting the partition key of a tuple just before
131  * routing it through this table). A NULL value is stored if no tuple
132  * conversion is required.
133  *
134  * indexes
135  * Array of partdesc->nparts elements. For leaf partitions the index
136  * corresponds to the partition's ResultRelInfo in the encapsulating
137  * PartitionTupleRouting's partitions array. For partitioned partitions,
138  * the index corresponds to the PartitionDispatch for it in its
139  * partition_dispatch_info array. -1 indicates we've not yet allocated
140  * anything in PartitionTupleRouting for the partition.
141  *-----------------------
142  */
143 typedef struct PartitionDispatchData
144 {
147  List *keystate; /* list of ExprState */
153 
154 
156  EState *estate, PartitionTupleRouting *proute,
157  PartitionDispatch dispatch,
158  ResultRelInfo *rootResultRelInfo,
159  int partidx);
160 static void ExecInitRoutingInfo(ModifyTableState *mtstate,
161  EState *estate,
162  PartitionTupleRouting *proute,
163  PartitionDispatch dispatch,
164  ResultRelInfo *partRelInfo,
165  int partidx,
166  bool is_borrowed_rel);
168  PartitionTupleRouting *proute,
169  Oid partoid, PartitionDispatch parent_pd,
170  int partidx, ResultRelInfo *rootResultRelInfo);
172  TupleTableSlot *slot,
173  EState *estate,
174  Datum *values,
175  bool *isnull);
177  bool *isnull);
179  Datum *values,
180  bool *isnull,
181  int maxfieldlen);
182 static List *adjust_partition_colnos(List *colnos, ResultRelInfo *leaf_part_rri);
183 static List *adjust_partition_colnos_using_map(List *colnos, AttrMap *attrMap);
185  PartitionPruneInfo *pruneinfo);
187  List *pruning_steps,
188  PartitionDesc partdesc,
189  PartitionKey partkey,
190  PlanState *planstate,
191  ExprContext *econtext);
192 static void PartitionPruneFixSubPlanMap(PartitionPruneState *prunestate,
193  Bitmapset *initially_valid_subplans,
194  int n_total_subplans);
197  bool initial_prune,
198  Bitmapset **validsubplans);
199 
200 
201 /*
202  * ExecSetupPartitionTupleRouting - sets up information needed during
203  * tuple routing for partitioned tables, encapsulates it in
204  * PartitionTupleRouting, and returns it.
205  *
206  * Callers must use the returned PartitionTupleRouting during calls to
207  * ExecFindPartition(). The actual ResultRelInfo for a partition is only
208  * allocated when the partition is found for the first time.
209  *
210  * The current memory context is used to allocate this struct and all
211  * subsidiary structs that will be allocated from it later on. Typically
212  * it should be estate->es_query_cxt.
213  */
216 {
217  PartitionTupleRouting *proute;
218 
219  /*
220  * Here we attempt to expend as little effort as possible in setting up
221  * the PartitionTupleRouting. Each partition's ResultRelInfo is built on
222  * demand, only when we actually need to route a tuple to that partition.
223  * The reason for this is that a common case is for INSERT to insert a
224  * single tuple into a partitioned table and this must be fast.
225  */
227  proute->partition_root = rel;
228  proute->memcxt = CurrentMemoryContext;
229  /* Rest of members initialized by zeroing */
230 
231  /*
232  * Initialize this table's PartitionDispatch object. Here we pass in the
233  * parent as NULL as we don't need to care about any parent of the target
234  * partitioned table.
235  */
236  ExecInitPartitionDispatchInfo(estate, proute, RelationGetRelid(rel),
237  NULL, 0, NULL);
238 
239  return proute;
240 }
241 
242 /*
243  * ExecFindPartition -- Return the ResultRelInfo for the leaf partition that
244  * the tuple contained in *slot should belong to.
245  *
246  * If the partition's ResultRelInfo does not yet exist in 'proute' then we set
247  * one up or reuse one from mtstate's resultRelInfo array. When reusing a
248  * ResultRelInfo from the mtstate we verify that the relation is a valid
249  * target for INSERTs and initialize tuple routing information.
250  *
251  * rootResultRelInfo is the relation named in the query.
252  *
253  * estate must be non-NULL; we'll need it to compute any expressions in the
254  * partition keys. Also, its per-tuple contexts are used as evaluation
255  * scratch space.
256  *
257  * If no leaf partition is found, this routine errors out with the appropriate
258  * error message. An error may also be raised if the found target partition
259  * is not a valid target for an INSERT.
260  */
263  ResultRelInfo *rootResultRelInfo,
264  PartitionTupleRouting *proute,
265  TupleTableSlot *slot, EState *estate)
266 {
269  bool isnull[PARTITION_MAX_KEYS];
270  Relation rel;
271  PartitionDispatch dispatch;
272  PartitionDesc partdesc;
273  ExprContext *ecxt = GetPerTupleExprContext(estate);
274  TupleTableSlot *ecxt_scantuple_saved = ecxt->ecxt_scantuple;
275  TupleTableSlot *rootslot = slot;
276  TupleTableSlot *myslot = NULL;
277  MemoryContext oldcxt;
278  ResultRelInfo *rri = NULL;
279 
280  /* use per-tuple context here to avoid leaking memory */
282 
283  /*
284  * First check the root table's partition constraint, if any. No point in
285  * routing the tuple if it doesn't belong in the root table itself.
286  */
287  if (rootResultRelInfo->ri_RelationDesc->rd_rel->relispartition)
288  ExecPartitionCheck(rootResultRelInfo, slot, estate, true);
289 
290  /* start with the root partitioned table */
291  dispatch = pd[0];
292  while (dispatch != NULL)
293  {
294  int partidx = -1;
295  bool is_leaf;
296 
298 
299  rel = dispatch->reldesc;
300  partdesc = dispatch->partdesc;
301 
302  /*
303  * Extract partition key from tuple. Expression evaluation machinery
304  * that FormPartitionKeyDatum() invokes expects ecxt_scantuple to
305  * point to the correct tuple slot. The slot might have changed from
306  * what was used for the parent table if the table of the current
307  * partitioning level has different tuple descriptor from the parent.
308  * So update ecxt_scantuple accordingly.
309  */
310  ecxt->ecxt_scantuple = slot;
311  FormPartitionKeyDatum(dispatch, slot, estate, values, isnull);
312 
313  /*
314  * If this partitioned table has no partitions or no partition for
315  * these values, error out.
316  */
317  if (partdesc->nparts == 0 ||
318  (partidx = get_partition_for_tuple(dispatch, values, isnull)) < 0)
319  {
320  char *val_desc;
321 
323  values, isnull, 64);
325  ereport(ERROR,
326  (errcode(ERRCODE_CHECK_VIOLATION),
327  errmsg("no partition of relation \"%s\" found for row",
329  val_desc ?
330  errdetail("Partition key of the failing row contains %s.",
331  val_desc) : 0,
332  errtable(rel)));
333  }
334 
335  is_leaf = partdesc->is_leaf[partidx];
336  if (is_leaf)
337  {
338  /*
339  * We've reached the leaf -- hurray, we're done. Look to see if
340  * we've already got a ResultRelInfo for this partition.
341  */
342  if (likely(dispatch->indexes[partidx] >= 0))
343  {
344  /* ResultRelInfo already built */
345  Assert(dispatch->indexes[partidx] < proute->num_partitions);
346  rri = proute->partitions[dispatch->indexes[partidx]];
347  }
348  else
349  {
350  /*
351  * If the partition is known in the owning ModifyTableState
352  * node, we can re-use that ResultRelInfo instead of creating
353  * a new one with ExecInitPartitionInfo().
354  */
355  rri = ExecLookupResultRelByOid(mtstate,
356  partdesc->oids[partidx],
357  true, false);
358  if (rri)
359  {
360  /* Verify this ResultRelInfo allows INSERTs */
362 
363  /*
364  * Initialize information needed to insert this and
365  * subsequent tuples routed to this partition.
366  */
367  ExecInitRoutingInfo(mtstate, estate, proute, dispatch,
368  rri, partidx, true);
369  }
370  else
371  {
372  /* We need to create a new one. */
373  rri = ExecInitPartitionInfo(mtstate, estate, proute,
374  dispatch,
375  rootResultRelInfo, partidx);
376  }
377  }
378  Assert(rri != NULL);
379 
380  /* Signal to terminate the loop */
381  dispatch = NULL;
382  }
383  else
384  {
385  /*
386  * Partition is a sub-partitioned table; get the PartitionDispatch
387  */
388  if (likely(dispatch->indexes[partidx] >= 0))
389  {
390  /* Already built. */
391  Assert(dispatch->indexes[partidx] < proute->num_dispatch);
392 
393  rri = proute->nonleaf_partitions[dispatch->indexes[partidx]];
394 
395  /*
396  * Move down to the next partition level and search again
397  * until we find a leaf partition that matches this tuple
398  */
399  dispatch = pd[dispatch->indexes[partidx]];
400  }
401  else
402  {
403  /* Not yet built. Do that now. */
404  PartitionDispatch subdispatch;
405 
406  /*
407  * Create the new PartitionDispatch. We pass the current one
408  * in as the parent PartitionDispatch
409  */
410  subdispatch = ExecInitPartitionDispatchInfo(estate,
411  proute,
412  partdesc->oids[partidx],
413  dispatch, partidx,
414  mtstate->rootResultRelInfo);
415  Assert(dispatch->indexes[partidx] >= 0 &&
416  dispatch->indexes[partidx] < proute->num_dispatch);
417 
418  rri = proute->nonleaf_partitions[dispatch->indexes[partidx]];
419  dispatch = subdispatch;
420  }
421 
422  /*
423  * Convert the tuple to the new parent's layout, if different from
424  * the previous parent.
425  */
426  if (dispatch->tupslot)
427  {
428  AttrMap *map = dispatch->tupmap;
429  TupleTableSlot *tempslot = myslot;
430 
431  myslot = dispatch->tupslot;
432  slot = execute_attr_map_slot(map, slot, myslot);
433 
434  if (tempslot != NULL)
435  ExecClearTuple(tempslot);
436  }
437  }
438 
439  /*
440  * If this partition is the default one, we must check its partition
441  * constraint now, which may have changed concurrently due to
442  * partitions being added to the parent.
443  *
444  * (We do this here, and do not rely on ExecInsert doing it, because
445  * we don't want to miss doing it for non-leaf partitions.)
446  */
447  if (partidx == partdesc->boundinfo->default_index)
448  {
449  /*
450  * The tuple must match the partition's layout for the constraint
451  * expression to be evaluated successfully. If the partition is
452  * sub-partitioned, that would already be the case due to the code
453  * above, but for a leaf partition the tuple still matches the
454  * parent's layout.
455  *
456  * Note that we have a map to convert from root to current
457  * partition, but not from immediate parent to current partition.
458  * So if we have to convert, do it from the root slot; if not, use
459  * the root slot as-is.
460  */
461  if (is_leaf)
462  {
463  TupleConversionMap *map = ExecGetRootToChildMap(rri, estate);
464 
465  if (map)
466  slot = execute_attr_map_slot(map->attrMap, rootslot,
467  rri->ri_PartitionTupleSlot);
468  else
469  slot = rootslot;
470  }
471 
472  ExecPartitionCheck(rri, slot, estate, true);
473  }
474  }
475 
476  /* Release the tuple in the lowest parent's dedicated slot. */
477  if (myslot != NULL)
478  ExecClearTuple(myslot);
479  /* and restore ecxt's scantuple */
480  ecxt->ecxt_scantuple = ecxt_scantuple_saved;
481  MemoryContextSwitchTo(oldcxt);
482 
483  return rri;
484 }
485 
486 /*
487  * ExecInitPartitionInfo
488  * Lock the partition and initialize ResultRelInfo. Also setup other
489  * information for the partition and store it in the next empty slot in
490  * the proute->partitions array.
491  *
492  * Returns the ResultRelInfo
493  */
494 static ResultRelInfo *
496  PartitionTupleRouting *proute,
497  PartitionDispatch dispatch,
498  ResultRelInfo *rootResultRelInfo,
499  int partidx)
500 {
501  ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
502  Oid partOid = dispatch->partdesc->oids[partidx];
503  Relation partrel;
504  int firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex;
505  Relation firstResultRel = mtstate->resultRelInfo[0].ri_RelationDesc;
506  ResultRelInfo *leaf_part_rri;
507  MemoryContext oldcxt;
508  AttrMap *part_attmap = NULL;
509  bool found_whole_row;
510 
511  oldcxt = MemoryContextSwitchTo(proute->memcxt);
512 
513  partrel = table_open(partOid, RowExclusiveLock);
514 
515  leaf_part_rri = makeNode(ResultRelInfo);
516  InitResultRelInfo(leaf_part_rri,
517  partrel,
518  0,
519  rootResultRelInfo,
520  estate->es_instrument);
521 
522  /*
523  * Verify result relation is a valid target for an INSERT. An UPDATE of a
524  * partition-key becomes a DELETE+INSERT operation, so this check is still
525  * required when the operation is CMD_UPDATE.
526  */
527  CheckValidResultRel(leaf_part_rri, CMD_INSERT, NIL);
528 
529  /*
530  * Open partition indices. The user may have asked to check for conflicts
531  * within this leaf partition and do "nothing" instead of throwing an
532  * error. Be prepared in that case by initializing the index information
533  * needed by ExecInsert() to perform speculative insertions.
534  */
535  if (partrel->rd_rel->relhasindex &&
536  leaf_part_rri->ri_IndexRelationDescs == NULL)
537  ExecOpenIndices(leaf_part_rri,
538  (node != NULL &&
540 
541  /*
542  * Build WITH CHECK OPTION constraints for the partition. Note that we
543  * didn't build the withCheckOptionList for partitions within the planner,
544  * but simple translation of varattnos will suffice. This only occurs for
545  * the INSERT case or in the case of UPDATE/MERGE tuple routing where we
546  * didn't find a result rel to reuse.
547  */
548  if (node && node->withCheckOptionLists != NIL)
549  {
550  List *wcoList;
551  List *wcoExprs = NIL;
552  ListCell *ll;
553 
554  /*
555  * In the case of INSERT on a partitioned table, there is only one
556  * plan. Likewise, there is only one WCO list, not one per partition.
557  * For UPDATE/MERGE, there are as many WCO lists as there are plans.
558  */
559  Assert((node->operation == CMD_INSERT &&
560  list_length(node->withCheckOptionLists) == 1 &&
561  list_length(node->resultRelations) == 1) ||
562  (node->operation == CMD_UPDATE &&
564  list_length(node->resultRelations)) ||
565  (node->operation == CMD_MERGE &&
567  list_length(node->resultRelations)));
568 
569  /*
570  * Use the WCO list of the first plan as a reference to calculate
571  * attno's for the WCO list of this partition. In the INSERT case,
572  * that refers to the root partitioned table, whereas in the UPDATE
573  * tuple routing case, that refers to the first partition in the
574  * mtstate->resultRelInfo array. In any case, both that relation and
575  * this partition should have the same columns, so we should be able
576  * to map attributes successfully.
577  */
578  wcoList = linitial(node->withCheckOptionLists);
579 
580  /*
581  * Convert Vars in it to contain this partition's attribute numbers.
582  */
583  part_attmap =
585  RelationGetDescr(firstResultRel),
586  false);
587  wcoList = (List *)
588  map_variable_attnos((Node *) wcoList,
589  firstVarno, 0,
590  part_attmap,
591  RelationGetForm(partrel)->reltype,
592  &found_whole_row);
593  /* We ignore the value of found_whole_row. */
594 
595  foreach(ll, wcoList)
596  {
598  ExprState *wcoExpr = ExecInitQual(castNode(List, wco->qual),
599  &mtstate->ps);
600 
601  wcoExprs = lappend(wcoExprs, wcoExpr);
602  }
603 
604  leaf_part_rri->ri_WithCheckOptions = wcoList;
605  leaf_part_rri->ri_WithCheckOptionExprs = wcoExprs;
606  }
607 
608  /*
609  * Build the RETURNING projection for the partition. Note that we didn't
610  * build the returningList for partitions within the planner, but simple
611  * translation of varattnos will suffice. This only occurs for the INSERT
612  * case or in the case of UPDATE/MERGE tuple routing where we didn't find
613  * a result rel to reuse.
614  */
615  if (node && node->returningLists != NIL)
616  {
617  TupleTableSlot *slot;
618  ExprContext *econtext;
619  List *returningList;
620 
621  /* See the comment above for WCO lists. */
622  Assert((node->operation == CMD_INSERT &&
623  list_length(node->returningLists) == 1 &&
624  list_length(node->resultRelations) == 1) ||
625  (node->operation == CMD_UPDATE &&
626  list_length(node->returningLists) ==
627  list_length(node->resultRelations)) ||
628  (node->operation == CMD_MERGE &&
629  list_length(node->returningLists) ==
630  list_length(node->resultRelations)));
631 
632  /*
633  * Use the RETURNING list of the first plan as a reference to
634  * calculate attno's for the RETURNING list of this partition. See
635  * the comment above for WCO lists for more details on why this is
636  * okay.
637  */
638  returningList = linitial(node->returningLists);
639 
640  /*
641  * Convert Vars in it to contain this partition's attribute numbers.
642  */
643  if (part_attmap == NULL)
644  part_attmap =
646  RelationGetDescr(firstResultRel),
647  false);
648  returningList = (List *)
649  map_variable_attnos((Node *) returningList,
650  firstVarno, 0,
651  part_attmap,
652  RelationGetForm(partrel)->reltype,
653  &found_whole_row);
654  /* We ignore the value of found_whole_row. */
655 
656  leaf_part_rri->ri_returningList = returningList;
657 
658  /*
659  * Initialize the projection itself.
660  *
661  * Use the slot and the expression context that would have been set up
662  * in ExecInitModifyTable() for projection's output.
663  */
664  Assert(mtstate->ps.ps_ResultTupleSlot != NULL);
665  slot = mtstate->ps.ps_ResultTupleSlot;
666  Assert(mtstate->ps.ps_ExprContext != NULL);
667  econtext = mtstate->ps.ps_ExprContext;
668  leaf_part_rri->ri_projectReturning =
669  ExecBuildProjectionInfo(returningList, econtext, slot,
670  &mtstate->ps, RelationGetDescr(partrel));
671  }
672 
673  /* Set up information needed for routing tuples to the partition. */
674  ExecInitRoutingInfo(mtstate, estate, proute, dispatch,
675  leaf_part_rri, partidx, false);
676 
677  /*
678  * If there is an ON CONFLICT clause, initialize state for it.
679  */
680  if (node && node->onConflictAction != ONCONFLICT_NONE)
681  {
682  TupleDesc partrelDesc = RelationGetDescr(partrel);
683  ExprContext *econtext = mtstate->ps.ps_ExprContext;
684  ListCell *lc;
685  List *arbiterIndexes = NIL;
686 
687  /*
688  * If there is a list of arbiter indexes, map it to a list of indexes
689  * in the partition. We do that by scanning the partition's index
690  * list and searching for ancestry relationships to each index in the
691  * ancestor table.
692  */
693  if (rootResultRelInfo->ri_onConflictArbiterIndexes != NIL)
694  {
695  List *childIdxs;
696 
697  childIdxs = RelationGetIndexList(leaf_part_rri->ri_RelationDesc);
698 
699  foreach(lc, childIdxs)
700  {
701  Oid childIdx = lfirst_oid(lc);
702  List *ancestors;
703  ListCell *lc2;
704 
705  ancestors = get_partition_ancestors(childIdx);
706  foreach(lc2, rootResultRelInfo->ri_onConflictArbiterIndexes)
707  {
708  if (list_member_oid(ancestors, lfirst_oid(lc2)))
709  arbiterIndexes = lappend_oid(arbiterIndexes, childIdx);
710  }
711  list_free(ancestors);
712  }
713  }
714 
715  /*
716  * If the resulting lists are of inequal length, something is wrong.
717  * (This shouldn't happen, since arbiter index selection should not
718  * pick up an invalid index.)
719  */
720  if (list_length(rootResultRelInfo->ri_onConflictArbiterIndexes) !=
721  list_length(arbiterIndexes))
722  elog(ERROR, "invalid arbiter index list");
723  leaf_part_rri->ri_onConflictArbiterIndexes = arbiterIndexes;
724 
725  /*
726  * In the DO UPDATE case, we have some more state to initialize.
727  */
728  if (node->onConflictAction == ONCONFLICT_UPDATE)
729  {
731  TupleConversionMap *map;
732 
733  map = ExecGetRootToChildMap(leaf_part_rri, estate);
734 
735  Assert(node->onConflictSet != NIL);
736  Assert(rootResultRelInfo->ri_onConflict != NULL);
737 
738  leaf_part_rri->ri_onConflict = onconfl;
739 
740  /*
741  * Need a separate existing slot for each partition, as the
742  * partition could be of a different AM, even if the tuple
743  * descriptors match.
744  */
745  onconfl->oc_Existing =
746  table_slot_create(leaf_part_rri->ri_RelationDesc,
747  &mtstate->ps.state->es_tupleTable);
748 
749  /*
750  * If the partition's tuple descriptor matches exactly the root
751  * parent (the common case), we can re-use most of the parent's ON
752  * CONFLICT SET state, skipping a bunch of work. Otherwise, we
753  * need to create state specific to this partition.
754  */
755  if (map == NULL)
756  {
757  /*
758  * It's safe to reuse these from the partition root, as we
759  * only process one tuple at a time (therefore we won't
760  * overwrite needed data in slots), and the results of
761  * projections are independent of the underlying storage.
762  * Projections and where clauses themselves don't store state
763  * / are independent of the underlying storage.
764  */
765  onconfl->oc_ProjSlot =
766  rootResultRelInfo->ri_onConflict->oc_ProjSlot;
767  onconfl->oc_ProjInfo =
768  rootResultRelInfo->ri_onConflict->oc_ProjInfo;
769  onconfl->oc_WhereClause =
770  rootResultRelInfo->ri_onConflict->oc_WhereClause;
771  }
772  else
773  {
774  List *onconflset;
775  List *onconflcols;
776 
777  /*
778  * Translate expressions in onConflictSet to account for
779  * different attribute numbers. For that, map partition
780  * varattnos twice: first to catch the EXCLUDED
781  * pseudo-relation (INNER_VAR), and second to handle the main
782  * target relation (firstVarno).
783  */
784  onconflset = copyObject(node->onConflictSet);
785  if (part_attmap == NULL)
786  part_attmap =
788  RelationGetDescr(firstResultRel),
789  false);
790  onconflset = (List *)
791  map_variable_attnos((Node *) onconflset,
792  INNER_VAR, 0,
793  part_attmap,
794  RelationGetForm(partrel)->reltype,
795  &found_whole_row);
796  /* We ignore the value of found_whole_row. */
797  onconflset = (List *)
798  map_variable_attnos((Node *) onconflset,
799  firstVarno, 0,
800  part_attmap,
801  RelationGetForm(partrel)->reltype,
802  &found_whole_row);
803  /* We ignore the value of found_whole_row. */
804 
805  /* Finally, adjust the target colnos to match the partition. */
806  onconflcols = adjust_partition_colnos(node->onConflictCols,
807  leaf_part_rri);
808 
809  /* create the tuple slot for the UPDATE SET projection */
810  onconfl->oc_ProjSlot =
811  table_slot_create(partrel,
812  &mtstate->ps.state->es_tupleTable);
813 
814  /* build UPDATE SET projection state */
815  onconfl->oc_ProjInfo =
816  ExecBuildUpdateProjection(onconflset,
817  true,
818  onconflcols,
819  partrelDesc,
820  econtext,
821  onconfl->oc_ProjSlot,
822  &mtstate->ps);
823 
824  /*
825  * If there is a WHERE clause, initialize state where it will
826  * be evaluated, mapping the attribute numbers appropriately.
827  * As with onConflictSet, we need to map partition varattnos
828  * to the partition's tupdesc.
829  */
830  if (node->onConflictWhere)
831  {
832  List *clause;
833 
834  clause = copyObject((List *) node->onConflictWhere);
835  clause = (List *)
836  map_variable_attnos((Node *) clause,
837  INNER_VAR, 0,
838  part_attmap,
839  RelationGetForm(partrel)->reltype,
840  &found_whole_row);
841  /* We ignore the value of found_whole_row. */
842  clause = (List *)
843  map_variable_attnos((Node *) clause,
844  firstVarno, 0,
845  part_attmap,
846  RelationGetForm(partrel)->reltype,
847  &found_whole_row);
848  /* We ignore the value of found_whole_row. */
849  onconfl->oc_WhereClause =
850  ExecInitQual((List *) clause, &mtstate->ps);
851  }
852  }
853  }
854  }
855 
856  /*
857  * Since we've just initialized this ResultRelInfo, it's not in any list
858  * attached to the estate as yet. Add it, so that it can be found later.
859  *
860  * Note that the entries in this list appear in no predetermined order,
861  * because partition result rels are initialized as and when they're
862  * needed.
863  */
867  leaf_part_rri);
868 
869  /*
870  * Initialize information about this partition that's needed to handle
871  * MERGE. We take the "first" result relation's mergeActionList as
872  * reference and make copy for this relation, converting stuff that
873  * references attribute numbers to match this relation's.
874  *
875  * This duplicates much of the logic in ExecInitMerge(), so something
876  * changes there, look here too.
877  */
878  if (node && node->operation == CMD_MERGE)
879  {
880  List *firstMergeActionList = linitial(node->mergeActionLists);
881  ListCell *lc;
882  ExprContext *econtext = mtstate->ps.ps_ExprContext;
883  Node *joinCondition;
884 
885  if (part_attmap == NULL)
886  part_attmap =
888  RelationGetDescr(firstResultRel),
889  false);
890 
891  if (unlikely(!leaf_part_rri->ri_projectNewInfoValid))
892  ExecInitMergeTupleSlots(mtstate, leaf_part_rri);
893 
894  /* Initialize state for join condition checking. */
895  joinCondition =
897  firstVarno, 0,
898  part_attmap,
899  RelationGetForm(partrel)->reltype,
900  &found_whole_row);
901  /* We ignore the value of found_whole_row. */
902  leaf_part_rri->ri_MergeJoinCondition =
903  ExecInitQual((List *) joinCondition, &mtstate->ps);
904 
905  foreach(lc, firstMergeActionList)
906  {
907  /* Make a copy for this relation to be safe. */
909  MergeActionState *action_state;
910 
911  /* Generate the action's state for this relation */
912  action_state = makeNode(MergeActionState);
913  action_state->mas_action = action;
914 
915  /* And put the action in the appropriate list */
916  leaf_part_rri->ri_MergeActions[action->matchKind] =
917  lappend(leaf_part_rri->ri_MergeActions[action->matchKind],
918  action_state);
919 
920  switch (action->commandType)
921  {
922  case CMD_INSERT:
923 
924  /*
925  * ExecCheckPlanOutput() already done on the targetlist
926  * when "first" result relation initialized and it is same
927  * for all result relations.
928  */
929  action_state->mas_proj =
930  ExecBuildProjectionInfo(action->targetList, econtext,
931  leaf_part_rri->ri_newTupleSlot,
932  &mtstate->ps,
933  RelationGetDescr(partrel));
934  break;
935  case CMD_UPDATE:
936 
937  /*
938  * Convert updateColnos from "first" result relation
939  * attribute numbers to this result rel's.
940  */
941  if (part_attmap)
942  action->updateColnos =
944  part_attmap);
945  action_state->mas_proj =
946  ExecBuildUpdateProjection(action->targetList,
947  true,
948  action->updateColnos,
949  RelationGetDescr(leaf_part_rri->ri_RelationDesc),
950  econtext,
951  leaf_part_rri->ri_newTupleSlot,
952  NULL);
953  break;
954  case CMD_DELETE:
955  break;
956 
957  default:
958  elog(ERROR, "unknown action in MERGE WHEN clause");
959  }
960 
961  /* found_whole_row intentionally ignored. */
962  action->qual =
964  firstVarno, 0,
965  part_attmap,
966  RelationGetForm(partrel)->reltype,
967  &found_whole_row);
968  action_state->mas_whenqual =
969  ExecInitQual((List *) action->qual, &mtstate->ps);
970  }
971  }
972  MemoryContextSwitchTo(oldcxt);
973 
974  return leaf_part_rri;
975 }
976 
977 /*
978  * ExecInitRoutingInfo
979  * Set up information needed for translating tuples between root
980  * partitioned table format and partition format, and keep track of it
981  * in PartitionTupleRouting.
982  */
983 static void
985  EState *estate,
986  PartitionTupleRouting *proute,
987  PartitionDispatch dispatch,
988  ResultRelInfo *partRelInfo,
989  int partidx,
990  bool is_borrowed_rel)
991 {
992  MemoryContext oldcxt;
993  int rri_index;
994 
995  oldcxt = MemoryContextSwitchTo(proute->memcxt);
996 
997  /*
998  * Set up tuple conversion between root parent and the partition if the
999  * two have different rowtypes. If conversion is indeed required, also
1000  * initialize a slot dedicated to storing this partition's converted
1001  * tuples. Various operations that are applied to tuples after routing,
1002  * such as checking constraints, will refer to this slot.
1003  */
1004  if (ExecGetRootToChildMap(partRelInfo, estate) != NULL)
1005  {
1006  Relation partrel = partRelInfo->ri_RelationDesc;
1007 
1008  /*
1009  * This pins the partition's TupleDesc, which will be released at the
1010  * end of the command.
1011  */
1012  partRelInfo->ri_PartitionTupleSlot =
1013  table_slot_create(partrel, &estate->es_tupleTable);
1014  }
1015  else
1016  partRelInfo->ri_PartitionTupleSlot = NULL;
1017 
1018  /*
1019  * If the partition is a foreign table, let the FDW init itself for
1020  * routing tuples to the partition.
1021  */
1022  if (partRelInfo->ri_FdwRoutine != NULL &&
1023  partRelInfo->ri_FdwRoutine->BeginForeignInsert != NULL)
1024  partRelInfo->ri_FdwRoutine->BeginForeignInsert(mtstate, partRelInfo);
1025 
1026  /*
1027  * Determine if the FDW supports batch insert and determine the batch size
1028  * (a FDW may support batching, but it may be disabled for the
1029  * server/table or for this particular query).
1030  *
1031  * If the FDW does not support batching, we set the batch size to 1.
1032  */
1033  if (partRelInfo->ri_FdwRoutine != NULL &&
1034  partRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize &&
1035  partRelInfo->ri_FdwRoutine->ExecForeignBatchInsert)
1036  partRelInfo->ri_BatchSize =
1037  partRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize(partRelInfo);
1038  else
1039  partRelInfo->ri_BatchSize = 1;
1040 
1041  Assert(partRelInfo->ri_BatchSize >= 1);
1042 
1043  partRelInfo->ri_CopyMultiInsertBuffer = NULL;
1044 
1045  /*
1046  * Keep track of it in the PartitionTupleRouting->partitions array.
1047  */
1048  Assert(dispatch->indexes[partidx] == -1);
1049 
1050  rri_index = proute->num_partitions++;
1051 
1052  /* Allocate or enlarge the array, as needed */
1053  if (proute->num_partitions >= proute->max_partitions)
1054  {
1055  if (proute->max_partitions == 0)
1056  {
1057  proute->max_partitions = 8;
1058  proute->partitions = (ResultRelInfo **)
1059  palloc(sizeof(ResultRelInfo *) * proute->max_partitions);
1060  proute->is_borrowed_rel = (bool *)
1061  palloc(sizeof(bool) * proute->max_partitions);
1062  }
1063  else
1064  {
1065  proute->max_partitions *= 2;
1066  proute->partitions = (ResultRelInfo **)
1067  repalloc(proute->partitions, sizeof(ResultRelInfo *) *
1068  proute->max_partitions);
1069  proute->is_borrowed_rel = (bool *)
1070  repalloc(proute->is_borrowed_rel, sizeof(bool) *
1071  proute->max_partitions);
1072  }
1073  }
1074 
1075  proute->partitions[rri_index] = partRelInfo;
1076  proute->is_borrowed_rel[rri_index] = is_borrowed_rel;
1077  dispatch->indexes[partidx] = rri_index;
1078 
1079  MemoryContextSwitchTo(oldcxt);
1080 }
1081 
1082 /*
1083  * ExecInitPartitionDispatchInfo
1084  * Lock the partitioned table (if not locked already) and initialize
1085  * PartitionDispatch for a partitioned table and store it in the next
1086  * available slot in the proute->partition_dispatch_info array. Also,
1087  * record the index into this array in the parent_pd->indexes[] array in
1088  * the partidx element so that we can properly retrieve the newly created
1089  * PartitionDispatch later.
1090  */
1091 static PartitionDispatch
1093  PartitionTupleRouting *proute, Oid partoid,
1094  PartitionDispatch parent_pd, int partidx,
1095  ResultRelInfo *rootResultRelInfo)
1096 {
1097  Relation rel;
1098  PartitionDesc partdesc;
1099  PartitionDispatch pd;
1100  int dispatchidx;
1101  MemoryContext oldcxt;
1102 
1103  /*
1104  * For data modification, it is better that executor does not include
1105  * partitions being detached, except when running in snapshot-isolation
1106  * mode. This means that a read-committed transaction immediately gets a
1107  * "no partition for tuple" error when a tuple is inserted into a
1108  * partition that's being detached concurrently, but a transaction in
1109  * repeatable-read mode can still use such a partition.
1110  */
1111  if (estate->es_partition_directory == NULL)
1112  estate->es_partition_directory =
1115 
1116  oldcxt = MemoryContextSwitchTo(proute->memcxt);
1117 
1118  /*
1119  * Only sub-partitioned tables need to be locked here. The root
1120  * partitioned table will already have been locked as it's referenced in
1121  * the query's rtable.
1122  */
1123  if (partoid != RelationGetRelid(proute->partition_root))
1124  rel = table_open(partoid, RowExclusiveLock);
1125  else
1126  rel = proute->partition_root;
1127  partdesc = PartitionDirectoryLookup(estate->es_partition_directory, rel);
1128 
1129  pd = (PartitionDispatch) palloc(offsetof(PartitionDispatchData, indexes) +
1130  partdesc->nparts * sizeof(int));
1131  pd->reldesc = rel;
1132  pd->key = RelationGetPartitionKey(rel);
1133  pd->keystate = NIL;
1134  pd->partdesc = partdesc;
1135  if (parent_pd != NULL)
1136  {
1137  TupleDesc tupdesc = RelationGetDescr(rel);
1138 
1139  /*
1140  * For sub-partitioned tables where the column order differs from its
1141  * direct parent partitioned table, we must store a tuple table slot
1142  * initialized with its tuple descriptor and a tuple conversion map to
1143  * convert a tuple from its parent's rowtype to its own. This is to
1144  * make sure that we are looking at the correct row using the correct
1145  * tuple descriptor when computing its partition key for tuple
1146  * routing.
1147  */
1149  tupdesc,
1150  false);
1151  pd->tupslot = pd->tupmap ?
1152  MakeSingleTupleTableSlot(tupdesc, &TTSOpsVirtual) : NULL;
1153  }
1154  else
1155  {
1156  /* Not required for the root partitioned table */
1157  pd->tupmap = NULL;
1158  pd->tupslot = NULL;
1159  }
1160 
1161  /*
1162  * Initialize with -1 to signify that the corresponding partition's
1163  * ResultRelInfo or PartitionDispatch has not been created yet.
1164  */
1165  memset(pd->indexes, -1, sizeof(int) * partdesc->nparts);
1166 
1167  /* Track in PartitionTupleRouting for later use */
1168  dispatchidx = proute->num_dispatch++;
1169 
1170  /* Allocate or enlarge the array, as needed */
1171  if (proute->num_dispatch >= proute->max_dispatch)
1172  {
1173  if (proute->max_dispatch == 0)
1174  {
1175  proute->max_dispatch = 4;
1177  palloc(sizeof(PartitionDispatch) * proute->max_dispatch);
1178  proute->nonleaf_partitions = (ResultRelInfo **)
1179  palloc(sizeof(ResultRelInfo *) * proute->max_dispatch);
1180  }
1181  else
1182  {
1183  proute->max_dispatch *= 2;
1186  sizeof(PartitionDispatch) * proute->max_dispatch);
1187  proute->nonleaf_partitions = (ResultRelInfo **)
1188  repalloc(proute->nonleaf_partitions,
1189  sizeof(ResultRelInfo *) * proute->max_dispatch);
1190  }
1191  }
1192  proute->partition_dispatch_info[dispatchidx] = pd;
1193 
1194  /*
1195  * If setting up a PartitionDispatch for a sub-partitioned table, we may
1196  * also need a minimally valid ResultRelInfo for checking the partition
1197  * constraint later; set that up now.
1198  */
1199  if (parent_pd)
1200  {
1202 
1203  InitResultRelInfo(rri, rel, 0, rootResultRelInfo, 0);
1204  proute->nonleaf_partitions[dispatchidx] = rri;
1205  }
1206  else
1207  proute->nonleaf_partitions[dispatchidx] = NULL;
1208 
1209  /*
1210  * Finally, if setting up a PartitionDispatch for a sub-partitioned table,
1211  * install a downlink in the parent to allow quick descent.
1212  */
1213  if (parent_pd)
1214  {
1215  Assert(parent_pd->indexes[partidx] == -1);
1216  parent_pd->indexes[partidx] = dispatchidx;
1217  }
1218 
1219  MemoryContextSwitchTo(oldcxt);
1220 
1221  return pd;
1222 }
1223 
1224 /*
1225  * ExecCleanupTupleRouting -- Clean up objects allocated for partition tuple
1226  * routing.
1227  *
1228  * Close all the partitioned tables, leaf partitions, and their indices.
1229  */
1230 void
1232  PartitionTupleRouting *proute)
1233 {
1234  int i;
1235 
1236  /*
1237  * Remember, proute->partition_dispatch_info[0] corresponds to the root
1238  * partitioned table, which we must not try to close, because it is the
1239  * main target table of the query that will be closed by callers such as
1240  * ExecEndPlan() or DoCopy(). Also, tupslot is NULL for the root
1241  * partitioned table.
1242  */
1243  for (i = 1; i < proute->num_dispatch; i++)
1244  {
1246 
1247  table_close(pd->reldesc, NoLock);
1248 
1249  if (pd->tupslot)
1251  }
1252 
1253  for (i = 0; i < proute->num_partitions; i++)
1254  {
1255  ResultRelInfo *resultRelInfo = proute->partitions[i];
1256 
1257  /* Allow any FDWs to shut down */
1258  if (resultRelInfo->ri_FdwRoutine != NULL &&
1259  resultRelInfo->ri_FdwRoutine->EndForeignInsert != NULL)
1260  resultRelInfo->ri_FdwRoutine->EndForeignInsert(mtstate->ps.state,
1261  resultRelInfo);
1262 
1263  /*
1264  * Close it if it's not one of the result relations borrowed from the
1265  * owning ModifyTableState; those will be closed by ExecEndPlan().
1266  */
1267  if (proute->is_borrowed_rel[i])
1268  continue;
1269 
1270  ExecCloseIndices(resultRelInfo);
1271  table_close(resultRelInfo->ri_RelationDesc, NoLock);
1272  }
1273 }
1274 
1275 /* ----------------
1276  * FormPartitionKeyDatum
1277  * Construct values[] and isnull[] arrays for the partition key
1278  * of a tuple.
1279  *
1280  * pd Partition dispatch object of the partitioned table
1281  * slot Heap tuple from which to extract partition key
1282  * estate executor state for evaluating any partition key
1283  * expressions (must be non-NULL)
1284  * values Array of partition key Datums (output area)
1285  * isnull Array of is-null indicators (output area)
1286  *
1287  * the ecxt_scantuple slot of estate's per-tuple expr context must point to
1288  * the heap tuple passed in.
1289  * ----------------
1290  */
1291 static void
1293  TupleTableSlot *slot,
1294  EState *estate,
1295  Datum *values,
1296  bool *isnull)
1297 {
1298  ListCell *partexpr_item;
1299  int i;
1300 
1301  if (pd->key->partexprs != NIL && pd->keystate == NIL)
1302  {
1303  /* Check caller has set up context correctly */
1304  Assert(estate != NULL &&
1305  GetPerTupleExprContext(estate)->ecxt_scantuple == slot);
1306 
1307  /* First time through, set up expression evaluation state */
1308  pd->keystate = ExecPrepareExprList(pd->key->partexprs, estate);
1309  }
1310 
1311  partexpr_item = list_head(pd->keystate);
1312  for (i = 0; i < pd->key->partnatts; i++)
1313  {
1314  AttrNumber keycol = pd->key->partattrs[i];
1315  Datum datum;
1316  bool isNull;
1317 
1318  if (keycol != 0)
1319  {
1320  /* Plain column; get the value directly from the heap tuple */
1321  datum = slot_getattr(slot, keycol, &isNull);
1322  }
1323  else
1324  {
1325  /* Expression; need to evaluate it */
1326  if (partexpr_item == NULL)
1327  elog(ERROR, "wrong number of partition key expressions");
1328  datum = ExecEvalExprSwitchContext((ExprState *) lfirst(partexpr_item),
1329  GetPerTupleExprContext(estate),
1330  &isNull);
1331  partexpr_item = lnext(pd->keystate, partexpr_item);
1332  }
1333  values[i] = datum;
1334  isnull[i] = isNull;
1335  }
1336 
1337  if (partexpr_item != NULL)
1338  elog(ERROR, "wrong number of partition key expressions");
1339 }
1340 
1341 /*
1342  * The number of times the same partition must be found in a row before we
1343  * switch from a binary search for the given values to just checking if the
1344  * values belong to the last found partition. This must be above 0.
1345  */
1346 #define PARTITION_CACHED_FIND_THRESHOLD 16
1347 
1348 /*
1349  * get_partition_for_tuple
1350  * Finds partition of relation which accepts the partition key specified
1351  * in values and isnull.
1352  *
1353  * Calling this function can be quite expensive when LIST and RANGE
1354  * partitioned tables have many partitions. This is due to the binary search
1355  * that's done to find the correct partition. Many of the use cases for LIST
1356  * and RANGE partitioned tables make it likely that the same partition is
1357  * found in subsequent ExecFindPartition() calls. This is especially true for
1358  * cases such as RANGE partitioned tables on a TIMESTAMP column where the
1359  * partition key is the current time. When asked to find a partition for a
1360  * RANGE or LIST partitioned table, we record the partition index and datum
1361  * offset we've found for the given 'values' in the PartitionDesc (which is
1362  * stored in relcache), and if we keep finding the same partition
1363  * PARTITION_CACHED_FIND_THRESHOLD times in a row, then we'll enable caching
1364  * logic and instead of performing a binary search to find the correct
1365  * partition, we'll just double-check that 'values' still belong to the last
1366  * found partition, and if so, we'll return that partition index, thus
1367  * skipping the need for the binary search. If we fail to match the last
1368  * partition when double checking, then we fall back on doing a binary search.
1369  * In this case, unless we find 'values' belong to the DEFAULT partition,
1370  * we'll reset the number of times we've hit the same partition so that we
1371  * don't attempt to use the cache again until we've found that partition at
1372  * least PARTITION_CACHED_FIND_THRESHOLD times in a row.
1373  *
1374  * For cases where the partition changes on each lookup, the amount of
1375  * additional work required just amounts to recording the last found partition
1376  * and bound offset then resetting the found counter. This is cheap and does
1377  * not appear to cause any meaningful slowdowns for such cases.
1378  *
1379  * No caching of partitions is done when the last found partition is the
1380  * DEFAULT or NULL partition. For the case of the DEFAULT partition, there
1381  * is no bound offset storing the matching datum, so we cannot confirm the
1382  * indexes match. For the NULL partition, this is just so cheap, there's no
1383  * sense in caching.
1384  *
1385  * Return value is index of the partition (>= 0 and < partdesc->nparts) if one
1386  * found or -1 if none found.
1387  */
1388 static int
1390 {
1391  int bound_offset = -1;
1392  int part_index = -1;
1393  PartitionKey key = pd->key;
1394  PartitionDesc partdesc = pd->partdesc;
1395  PartitionBoundInfo boundinfo = partdesc->boundinfo;
1396 
1397  /*
1398  * In the switch statement below, when we perform a cached lookup for
1399  * RANGE and LIST partitioned tables, if we find that the last found
1400  * partition matches the 'values', we return the partition index right
1401  * away. We do this instead of breaking out of the switch as we don't
1402  * want to execute the code about the DEFAULT partition or do any updates
1403  * for any of the cache-related fields. That would be a waste of effort
1404  * as we already know it's not the DEFAULT partition and have no need to
1405  * increment the number of times we found the same partition any higher
1406  * than PARTITION_CACHED_FIND_THRESHOLD.
1407  */
1408 
1409  /* Route as appropriate based on partitioning strategy. */
1410  switch (key->strategy)
1411  {
1413  {
1414  uint64 rowHash;
1415 
1416  /* hash partitioning is too cheap to bother caching */
1417  rowHash = compute_partition_hash_value(key->partnatts,
1418  key->partsupfunc,
1419  key->partcollation,
1420  values, isnull);
1421 
1422  /*
1423  * HASH partitions can't have a DEFAULT partition and we don't
1424  * do any caching work for them, so just return the part index
1425  */
1426  return boundinfo->indexes[rowHash % boundinfo->nindexes];
1427  }
1428 
1430  if (isnull[0])
1431  {
1432  /* this is far too cheap to bother doing any caching */
1433  if (partition_bound_accepts_nulls(boundinfo))
1434  {
1435  /*
1436  * When there is a NULL partition we just return that
1437  * directly. We don't have a bound_offset so it's not
1438  * valid to drop into the code after the switch which
1439  * checks and updates the cache fields. We perhaps should
1440  * be invalidating the details of the last cached
1441  * partition but there's no real need to. Keeping those
1442  * fields set gives a chance at matching to the cached
1443  * partition on the next lookup.
1444  */
1445  return boundinfo->null_index;
1446  }
1447  }
1448  else
1449  {
1450  bool equal;
1451 
1453  {
1454  int last_datum_offset = partdesc->last_found_datum_index;
1455  Datum lastDatum = boundinfo->datums[last_datum_offset][0];
1456  int32 cmpval;
1457 
1458  /* does the last found datum index match this datum? */
1459  cmpval = DatumGetInt32(FunctionCall2Coll(&key->partsupfunc[0],
1460  key->partcollation[0],
1461  lastDatum,
1462  values[0]));
1463 
1464  if (cmpval == 0)
1465  return boundinfo->indexes[last_datum_offset];
1466 
1467  /* fall-through and do a manual lookup */
1468  }
1469 
1470  bound_offset = partition_list_bsearch(key->partsupfunc,
1471  key->partcollation,
1472  boundinfo,
1473  values[0], &equal);
1474  if (bound_offset >= 0 && equal)
1475  part_index = boundinfo->indexes[bound_offset];
1476  }
1477  break;
1478 
1480  {
1481  bool equal = false,
1482  range_partkey_has_null = false;
1483  int i;
1484 
1485  /*
1486  * No range includes NULL, so this will be accepted by the
1487  * default partition if there is one, and otherwise rejected.
1488  */
1489  for (i = 0; i < key->partnatts; i++)
1490  {
1491  if (isnull[i])
1492  {
1493  range_partkey_has_null = true;
1494  break;
1495  }
1496  }
1497 
1498  /* NULLs belong in the DEFAULT partition */
1499  if (range_partkey_has_null)
1500  break;
1501 
1503  {
1504  int last_datum_offset = partdesc->last_found_datum_index;
1505  Datum *lastDatums = boundinfo->datums[last_datum_offset];
1506  PartitionRangeDatumKind *kind = boundinfo->kind[last_datum_offset];
1507  int32 cmpval;
1508 
1509  /* check if the value is >= to the lower bound */
1510  cmpval = partition_rbound_datum_cmp(key->partsupfunc,
1511  key->partcollation,
1512  lastDatums,
1513  kind,
1514  values,
1515  key->partnatts);
1516 
1517  /*
1518  * If it's equal to the lower bound then no need to check
1519  * the upper bound.
1520  */
1521  if (cmpval == 0)
1522  return boundinfo->indexes[last_datum_offset + 1];
1523 
1524  if (cmpval < 0 && last_datum_offset + 1 < boundinfo->ndatums)
1525  {
1526  /* check if the value is below the upper bound */
1527  lastDatums = boundinfo->datums[last_datum_offset + 1];
1528  kind = boundinfo->kind[last_datum_offset + 1];
1529  cmpval = partition_rbound_datum_cmp(key->partsupfunc,
1530  key->partcollation,
1531  lastDatums,
1532  kind,
1533  values,
1534  key->partnatts);
1535 
1536  if (cmpval > 0)
1537  return boundinfo->indexes[last_datum_offset + 1];
1538  }
1539  /* fall-through and do a manual lookup */
1540  }
1541 
1542  bound_offset = partition_range_datum_bsearch(key->partsupfunc,
1543  key->partcollation,
1544  boundinfo,
1545  key->partnatts,
1546  values,
1547  &equal);
1548 
1549  /*
1550  * The bound at bound_offset is less than or equal to the
1551  * tuple value, so the bound at offset+1 is the upper bound of
1552  * the partition we're looking for, if there actually exists
1553  * one.
1554  */
1555  part_index = boundinfo->indexes[bound_offset + 1];
1556  }
1557  break;
1558 
1559  default:
1560  elog(ERROR, "unexpected partition strategy: %d",
1561  (int) key->strategy);
1562  }
1563 
1564  /*
1565  * part_index < 0 means we failed to find a partition of this parent. Use
1566  * the default partition, if there is one.
1567  */
1568  if (part_index < 0)
1569  {
1570  /*
1571  * No need to reset the cache fields here. The next set of values
1572  * might end up belonging to the cached partition, so leaving the
1573  * cache alone improves the chances of a cache hit on the next lookup.
1574  */
1575  return boundinfo->default_index;
1576  }
1577 
1578  /* we should only make it here when the code above set bound_offset */
1579  Assert(bound_offset >= 0);
1580 
1581  /*
1582  * Attend to the cache fields. If the bound_offset matches the last
1583  * cached bound offset then we've found the same partition as last time,
1584  * so bump the count by one. If all goes well, we'll eventually reach
1585  * PARTITION_CACHED_FIND_THRESHOLD and try the cache path next time
1586  * around. Otherwise, we'll reset the cache count back to 1 to mark that
1587  * we've found this partition for the first time.
1588  */
1589  if (bound_offset == partdesc->last_found_datum_index)
1590  partdesc->last_found_count++;
1591  else
1592  {
1593  partdesc->last_found_count = 1;
1594  partdesc->last_found_part_index = part_index;
1595  partdesc->last_found_datum_index = bound_offset;
1596  }
1597 
1598  return part_index;
1599 }
1600 
1601 /*
1602  * ExecBuildSlotPartitionKeyDescription
1603  *
1604  * This works very much like BuildIndexValueDescription() and is currently
1605  * used for building error messages when ExecFindPartition() fails to find
1606  * partition for a row.
1607  */
1608 static char *
1610  Datum *values,
1611  bool *isnull,
1612  int maxfieldlen)
1613 {
1616  int partnatts = get_partition_natts(key);
1617  int i;
1618  Oid relid = RelationGetRelid(rel);
1619  AclResult aclresult;
1620 
1621  if (check_enable_rls(relid, InvalidOid, true) == RLS_ENABLED)
1622  return NULL;
1623 
1624  /* If the user has table-level access, just go build the description. */
1625  aclresult = pg_class_aclcheck(relid, GetUserId(), ACL_SELECT);
1626  if (aclresult != ACLCHECK_OK)
1627  {
1628  /*
1629  * Step through the columns of the partition key and make sure the
1630  * user has SELECT rights on all of them.
1631  */
1632  for (i = 0; i < partnatts; i++)
1633  {
1635 
1636  /*
1637  * If this partition key column is an expression, we return no
1638  * detail rather than try to figure out what column(s) the
1639  * expression includes and if the user has SELECT rights on them.
1640  */
1641  if (attnum == InvalidAttrNumber ||
1643  ACL_SELECT) != ACLCHECK_OK)
1644  return NULL;
1645  }
1646  }
1647 
1648  initStringInfo(&buf);
1649  appendStringInfo(&buf, "(%s) = (",
1650  pg_get_partkeydef_columns(relid, true));
1651 
1652  for (i = 0; i < partnatts; i++)
1653  {
1654  char *val;
1655  int vallen;
1656 
1657  if (isnull[i])
1658  val = "null";
1659  else
1660  {
1661  Oid foutoid;
1662  bool typisvarlena;
1663 
1665  &foutoid, &typisvarlena);
1666  val = OidOutputFunctionCall(foutoid, values[i]);
1667  }
1668 
1669  if (i > 0)
1670  appendStringInfoString(&buf, ", ");
1671 
1672  /* truncate if needed */
1673  vallen = strlen(val);
1674  if (vallen <= maxfieldlen)
1675  appendBinaryStringInfo(&buf, val, vallen);
1676  else
1677  {
1678  vallen = pg_mbcliplen(val, vallen, maxfieldlen);
1679  appendBinaryStringInfo(&buf, val, vallen);
1680  appendStringInfoString(&buf, "...");
1681  }
1682  }
1683 
1684  appendStringInfoChar(&buf, ')');
1685 
1686  return buf.data;
1687 }
1688 
1689 /*
1690  * adjust_partition_colnos
1691  * Adjust the list of UPDATE target column numbers to account for
1692  * attribute differences between the parent and the partition.
1693  *
1694  * Note: mustn't be called if no adjustment is required.
1695  */
1696 static List *
1698 {
1699  TupleConversionMap *map = ExecGetChildToRootMap(leaf_part_rri);
1700 
1701  Assert(map != NULL);
1702 
1703  return adjust_partition_colnos_using_map(colnos, map->attrMap);
1704 }
1705 
1706 /*
1707  * adjust_partition_colnos_using_map
1708  * Like adjust_partition_colnos, but uses a caller-supplied map instead
1709  * of assuming to map from the "root" result relation.
1710  *
1711  * Note: mustn't be called if no adjustment is required.
1712  */
1713 static List *
1715 {
1716  List *new_colnos = NIL;
1717  ListCell *lc;
1718 
1719  Assert(attrMap != NULL); /* else we shouldn't be here */
1720 
1721  foreach(lc, colnos)
1722  {
1723  AttrNumber parentattrno = lfirst_int(lc);
1724 
1725  if (parentattrno <= 0 ||
1726  parentattrno > attrMap->maplen ||
1727  attrMap->attnums[parentattrno - 1] == 0)
1728  elog(ERROR, "unexpected attno %d in target column list",
1729  parentattrno);
1730  new_colnos = lappend_int(new_colnos,
1731  attrMap->attnums[parentattrno - 1]);
1732  }
1733 
1734  return new_colnos;
1735 }
1736 
1737 /*-------------------------------------------------------------------------
1738  * Run-Time Partition Pruning Support.
1739  *
1740  * The following series of functions exist to support the removal of unneeded
1741  * subplans for queries against partitioned tables. The supporting functions
1742  * here are designed to work with any plan type which supports an arbitrary
1743  * number of subplans, e.g. Append, MergeAppend.
1744  *
1745  * When pruning involves comparison of a partition key to a constant, it's
1746  * done by the planner. However, if we have a comparison to a non-constant
1747  * but not volatile expression, that presents an opportunity for run-time
1748  * pruning by the executor, allowing irrelevant partitions to be skipped
1749  * dynamically.
1750  *
1751  * We must distinguish expressions containing PARAM_EXEC Params from
1752  * expressions that don't contain those. Even though a PARAM_EXEC Param is
1753  * considered to be a stable expression, it can change value from one plan
1754  * node scan to the next during query execution. Stable comparison
1755  * expressions that don't involve such Params allow partition pruning to be
1756  * done once during executor startup. Expressions that do involve such Params
1757  * require us to prune separately for each scan of the parent plan node.
1758  *
1759  * Note that pruning away unneeded subplans during executor startup has the
1760  * added benefit of not having to initialize the unneeded subplans at all.
1761  *
1762  *
1763  * Functions:
1764  *
1765  * ExecInitPartitionPruning:
1766  * Creates the PartitionPruneState required by ExecFindMatchingSubPlans.
1767  * Details stored include how to map the partition index returned by the
1768  * partition pruning code into subplan indexes. Also determines the set
1769  * of subplans to initialize considering the result of performing initial
1770  * pruning steps if any. Maps in PartitionPruneState are updated to
1771  * account for initial pruning possibly having eliminated some of the
1772  * subplans.
1773  *
1774  * ExecFindMatchingSubPlans:
1775  * Returns indexes of matching subplans after evaluating the expressions
1776  * that are safe to evaluate at a given point. This function is first
1777  * called during ExecInitPartitionPruning() to find the initially
1778  * matching subplans based on performing the initial pruning steps and
1779  * then must be called again each time the value of a Param listed in
1780  * PartitionPruneState's 'execparamids' changes.
1781  *-------------------------------------------------------------------------
1782  */
1783 
1784 /*
1785  * ExecInitPartitionPruning
1786  * Initialize data structure needed for run-time partition pruning and
1787  * do initial pruning if needed
1788  *
1789  * On return, *initially_valid_subplans is assigned the set of indexes of
1790  * child subplans that must be initialized along with the parent plan node.
1791  * Initial pruning is performed here if needed and in that case only the
1792  * surviving subplans' indexes are added.
1793  *
1794  * If subplans are indeed pruned, subplan_map arrays contained in the returned
1795  * PartitionPruneState are re-sequenced to not count those, though only if the
1796  * maps will be needed for subsequent execution pruning passes.
1797  */
1800  int n_total_subplans,
1801  PartitionPruneInfo *pruneinfo,
1802  Bitmapset **initially_valid_subplans)
1803 {
1804  PartitionPruneState *prunestate;
1805  EState *estate = planstate->state;
1806 
1807  /* We may need an expression context to evaluate partition exprs */
1808  ExecAssignExprContext(estate, planstate);
1809 
1810  /* Create the working data structure for pruning */
1811  prunestate = CreatePartitionPruneState(planstate, pruneinfo);
1812 
1813  /*
1814  * Perform an initial partition prune pass, if required.
1815  */
1816  if (prunestate->do_initial_prune)
1817  *initially_valid_subplans = ExecFindMatchingSubPlans(prunestate, true);
1818  else
1819  {
1820  /* No pruning, so we'll need to initialize all subplans */
1821  Assert(n_total_subplans > 0);
1822  *initially_valid_subplans = bms_add_range(NULL, 0,
1823  n_total_subplans - 1);
1824  }
1825 
1826  /*
1827  * Re-sequence subplan indexes contained in prunestate to account for any
1828  * that were removed above due to initial pruning. No need to do this if
1829  * no steps were removed.
1830  */
1831  if (bms_num_members(*initially_valid_subplans) < n_total_subplans)
1832  {
1833  /*
1834  * We can safely skip this when !do_exec_prune, even though that
1835  * leaves invalid data in prunestate, because that data won't be
1836  * consulted again (cf initial Assert in ExecFindMatchingSubPlans).
1837  */
1838  if (prunestate->do_exec_prune)
1839  PartitionPruneFixSubPlanMap(prunestate,
1840  *initially_valid_subplans,
1841  n_total_subplans);
1842  }
1843 
1844  return prunestate;
1845 }
1846 
1847 /*
1848  * CreatePartitionPruneState
1849  * Build the data structure required for calling ExecFindMatchingSubPlans
1850  *
1851  * 'planstate' is the parent plan node's execution state.
1852  *
1853  * 'pruneinfo' is a PartitionPruneInfo as generated by
1854  * make_partition_pruneinfo. Here we build a PartitionPruneState containing a
1855  * PartitionPruningData for each partitioning hierarchy (i.e., each sublist of
1856  * pruneinfo->prune_infos), each of which contains a PartitionedRelPruningData
1857  * for each PartitionedRelPruneInfo appearing in that sublist. This two-level
1858  * system is needed to keep from confusing the different hierarchies when a
1859  * UNION ALL contains multiple partitioned tables as children. The data
1860  * stored in each PartitionedRelPruningData can be re-used each time we
1861  * re-evaluate which partitions match the pruning steps provided in each
1862  * PartitionedRelPruneInfo.
1863  */
1864 static PartitionPruneState *
1866 {
1867  EState *estate = planstate->state;
1868  PartitionPruneState *prunestate;
1869  int n_part_hierarchies;
1870  ListCell *lc;
1871  int i;
1872  ExprContext *econtext = planstate->ps_ExprContext;
1873 
1874  /* For data reading, executor always omits detached partitions */
1875  if (estate->es_partition_directory == NULL)
1876  estate->es_partition_directory =
1877  CreatePartitionDirectory(estate->es_query_cxt, false);
1878 
1879  n_part_hierarchies = list_length(pruneinfo->prune_infos);
1880  Assert(n_part_hierarchies > 0);
1881 
1882  /*
1883  * Allocate the data structure
1884  */
1885  prunestate = (PartitionPruneState *)
1886  palloc(offsetof(PartitionPruneState, partprunedata) +
1887  sizeof(PartitionPruningData *) * n_part_hierarchies);
1888 
1889  prunestate->execparamids = NULL;
1890  /* other_subplans can change at runtime, so we need our own copy */
1891  prunestate->other_subplans = bms_copy(pruneinfo->other_subplans);
1892  prunestate->do_initial_prune = false; /* may be set below */
1893  prunestate->do_exec_prune = false; /* may be set below */
1894  prunestate->num_partprunedata = n_part_hierarchies;
1895 
1896  /*
1897  * Create a short-term memory context which we'll use when making calls to
1898  * the partition pruning functions. This avoids possible memory leaks,
1899  * since the pruning functions call comparison functions that aren't under
1900  * our control.
1901  */
1902  prunestate->prune_context =
1904  "Partition Prune",
1906 
1907  i = 0;
1908  foreach(lc, pruneinfo->prune_infos)
1909  {
1910  List *partrelpruneinfos = lfirst_node(List, lc);
1911  int npartrelpruneinfos = list_length(partrelpruneinfos);
1912  PartitionPruningData *prunedata;
1913  ListCell *lc2;
1914  int j;
1915 
1916  prunedata = (PartitionPruningData *)
1917  palloc(offsetof(PartitionPruningData, partrelprunedata) +
1918  npartrelpruneinfos * sizeof(PartitionedRelPruningData));
1919  prunestate->partprunedata[i] = prunedata;
1920  prunedata->num_partrelprunedata = npartrelpruneinfos;
1921 
1922  j = 0;
1923  foreach(lc2, partrelpruneinfos)
1924  {
1926  PartitionedRelPruningData *pprune = &prunedata->partrelprunedata[j];
1927  Relation partrel;
1928  PartitionDesc partdesc;
1929  PartitionKey partkey;
1930 
1931  /*
1932  * We can rely on the copies of the partitioned table's partition
1933  * key and partition descriptor appearing in its relcache entry,
1934  * because that entry will be held open and locked for the
1935  * duration of this executor run.
1936  */
1937  partrel = ExecGetRangeTableRelation(estate, pinfo->rtindex);
1938  partkey = RelationGetPartitionKey(partrel);
1940  partrel);
1941 
1942  /*
1943  * Initialize the subplan_map and subpart_map.
1944  *
1945  * Because we request detached partitions to be included, and
1946  * detaching waits for old transactions, it is safe to assume that
1947  * no partitions have disappeared since this query was planned.
1948  *
1949  * However, new partitions may have been added.
1950  */
1951  Assert(partdesc->nparts >= pinfo->nparts);
1952  pprune->nparts = partdesc->nparts;
1953  pprune->subplan_map = palloc(sizeof(int) * partdesc->nparts);
1954  if (partdesc->nparts == pinfo->nparts)
1955  {
1956  /*
1957  * There are no new partitions, so this is simple. We can
1958  * simply point to the subpart_map from the plan, but we must
1959  * copy the subplan_map since we may change it later.
1960  */
1961  pprune->subpart_map = pinfo->subpart_map;
1962  memcpy(pprune->subplan_map, pinfo->subplan_map,
1963  sizeof(int) * pinfo->nparts);
1964 
1965  /*
1966  * Double-check that the list of unpruned relations has not
1967  * changed. (Pruned partitions are not in relid_map[].)
1968  */
1969 #ifdef USE_ASSERT_CHECKING
1970  for (int k = 0; k < pinfo->nparts; k++)
1971  {
1972  Assert(partdesc->oids[k] == pinfo->relid_map[k] ||
1973  pinfo->subplan_map[k] == -1);
1974  }
1975 #endif
1976  }
1977  else
1978  {
1979  int pd_idx = 0;
1980  int pp_idx;
1981 
1982  /*
1983  * Some new partitions have appeared since plan time, and
1984  * those are reflected in our PartitionDesc but were not
1985  * present in the one used to construct subplan_map and
1986  * subpart_map. So we must construct new and longer arrays
1987  * where the partitions that were originally present map to
1988  * the same sub-structures, and any added partitions map to
1989  * -1, as if the new partitions had been pruned.
1990  *
1991  * Note: pinfo->relid_map[] may contain InvalidOid entries for
1992  * partitions pruned by the planner. We cannot tell exactly
1993  * which of the partdesc entries these correspond to, but we
1994  * don't have to; just skip over them. The non-pruned
1995  * relid_map entries, however, had better be a subset of the
1996  * partdesc entries and in the same order.
1997  */
1998  pprune->subpart_map = palloc(sizeof(int) * partdesc->nparts);
1999  for (pp_idx = 0; pp_idx < partdesc->nparts; pp_idx++)
2000  {
2001  /* Skip any InvalidOid relid_map entries */
2002  while (pd_idx < pinfo->nparts &&
2003  !OidIsValid(pinfo->relid_map[pd_idx]))
2004  pd_idx++;
2005 
2006  if (pd_idx < pinfo->nparts &&
2007  pinfo->relid_map[pd_idx] == partdesc->oids[pp_idx])
2008  {
2009  /* match... */
2010  pprune->subplan_map[pp_idx] =
2011  pinfo->subplan_map[pd_idx];
2012  pprune->subpart_map[pp_idx] =
2013  pinfo->subpart_map[pd_idx];
2014  pd_idx++;
2015  }
2016  else
2017  {
2018  /* this partdesc entry is not in the plan */
2019  pprune->subplan_map[pp_idx] = -1;
2020  pprune->subpart_map[pp_idx] = -1;
2021  }
2022  }
2023 
2024  /*
2025  * It might seem that we need to skip any trailing InvalidOid
2026  * entries in pinfo->relid_map before checking that we scanned
2027  * all of the relid_map. But we will have skipped them above,
2028  * because they must correspond to some partdesc->oids
2029  * entries; we just couldn't tell which.
2030  */
2031  if (pd_idx != pinfo->nparts)
2032  elog(ERROR, "could not match partition child tables to plan elements");
2033  }
2034 
2035  /* present_parts is also subject to later modification */
2036  pprune->present_parts = bms_copy(pinfo->present_parts);
2037 
2038  /*
2039  * Initialize pruning contexts as needed. Note that we must skip
2040  * execution-time partition pruning in EXPLAIN (GENERIC_PLAN),
2041  * since parameter values may be missing.
2042  */
2044  if (pinfo->initial_pruning_steps &&
2046  {
2048  pinfo->initial_pruning_steps,
2049  partdesc, partkey, planstate,
2050  econtext);
2051  /* Record whether initial pruning is needed at any level */
2052  prunestate->do_initial_prune = true;
2053  }
2054  pprune->exec_pruning_steps = pinfo->exec_pruning_steps;
2055  if (pinfo->exec_pruning_steps &&
2057  {
2059  pinfo->exec_pruning_steps,
2060  partdesc, partkey, planstate,
2061  econtext);
2062  /* Record whether exec pruning is needed at any level */
2063  prunestate->do_exec_prune = true;
2064  }
2065 
2066  /*
2067  * Accumulate the IDs of all PARAM_EXEC Params affecting the
2068  * partitioning decisions at this plan node.
2069  */
2070  prunestate->execparamids = bms_add_members(prunestate->execparamids,
2071  pinfo->execparamids);
2072 
2073  j++;
2074  }
2075  i++;
2076  }
2077 
2078  return prunestate;
2079 }
2080 
2081 /*
2082  * Initialize a PartitionPruneContext for the given list of pruning steps.
2083  */
2084 static void
2086  List *pruning_steps,
2087  PartitionDesc partdesc,
2088  PartitionKey partkey,
2089  PlanState *planstate,
2090  ExprContext *econtext)
2091 {
2092  int n_steps;
2093  int partnatts;
2094  ListCell *lc;
2095 
2096  n_steps = list_length(pruning_steps);
2097 
2098  context->strategy = partkey->strategy;
2099  context->partnatts = partnatts = partkey->partnatts;
2100  context->nparts = partdesc->nparts;
2101  context->boundinfo = partdesc->boundinfo;
2102  context->partcollation = partkey->partcollation;
2103  context->partsupfunc = partkey->partsupfunc;
2104 
2105  /* We'll look up type-specific support functions as needed */
2106  context->stepcmpfuncs = (FmgrInfo *)
2107  palloc0(sizeof(FmgrInfo) * n_steps * partnatts);
2108 
2109  context->ppccontext = CurrentMemoryContext;
2110  context->planstate = planstate;
2111  context->exprcontext = econtext;
2112 
2113  /* Initialize expression state for each expression we need */
2114  context->exprstates = (ExprState **)
2115  palloc0(sizeof(ExprState *) * n_steps * partnatts);
2116  foreach(lc, pruning_steps)
2117  {
2119  ListCell *lc2 = list_head(step->exprs);
2120  int keyno;
2121 
2122  /* not needed for other step kinds */
2123  if (!IsA(step, PartitionPruneStepOp))
2124  continue;
2125 
2126  Assert(list_length(step->exprs) <= partnatts);
2127 
2128  for (keyno = 0; keyno < partnatts; keyno++)
2129  {
2130  if (bms_is_member(keyno, step->nullkeys))
2131  continue;
2132 
2133  if (lc2 != NULL)
2134  {
2135  Expr *expr = lfirst(lc2);
2136 
2137  /* not needed for Consts */
2138  if (!IsA(expr, Const))
2139  {
2140  int stateidx = PruneCxtStateIdx(partnatts,
2141  step->step.step_id,
2142  keyno);
2143 
2144  /*
2145  * When planstate is NULL, pruning_steps is known not to
2146  * contain any expressions that depend on the parent plan.
2147  * Information of any available EXTERN parameters must be
2148  * passed explicitly in that case, which the caller must
2149  * have made available via econtext.
2150  */
2151  if (planstate == NULL)
2152  context->exprstates[stateidx] =
2154  econtext->ecxt_param_list_info);
2155  else
2156  context->exprstates[stateidx] =
2157  ExecInitExpr(expr, context->planstate);
2158  }
2159  lc2 = lnext(step->exprs, lc2);
2160  }
2161  }
2162  }
2163 }
2164 
2165 /*
2166  * PartitionPruneFixSubPlanMap
2167  * Fix mapping of partition indexes to subplan indexes contained in
2168  * prunestate by considering the new list of subplans that survived
2169  * initial pruning
2170  *
2171  * Current values of the indexes present in PartitionPruneState count all the
2172  * subplans that would be present before initial pruning was done. If initial
2173  * pruning got rid of some of the subplans, any subsequent pruning passes will
2174  * be looking at a different set of target subplans to choose from than those
2175  * in the pre-initial-pruning set, so the maps in PartitionPruneState
2176  * containing those indexes must be updated to reflect the new indexes of
2177  * subplans in the post-initial-pruning set.
2178  */
2179 static void
2181  Bitmapset *initially_valid_subplans,
2182  int n_total_subplans)
2183 {
2184  int *new_subplan_indexes;
2185  Bitmapset *new_other_subplans;
2186  int i;
2187  int newidx;
2188 
2189  /*
2190  * First we must build a temporary array which maps old subplan indexes to
2191  * new ones. For convenience of initialization, we use 1-based indexes in
2192  * this array and leave pruned items as 0.
2193  */
2194  new_subplan_indexes = (int *) palloc0(sizeof(int) * n_total_subplans);
2195  newidx = 1;
2196  i = -1;
2197  while ((i = bms_next_member(initially_valid_subplans, i)) >= 0)
2198  {
2199  Assert(i < n_total_subplans);
2200  new_subplan_indexes[i] = newidx++;
2201  }
2202 
2203  /*
2204  * Now we can update each PartitionedRelPruneInfo's subplan_map with new
2205  * subplan indexes. We must also recompute its present_parts bitmap.
2206  */
2207  for (i = 0; i < prunestate->num_partprunedata; i++)
2208  {
2209  PartitionPruningData *prunedata = prunestate->partprunedata[i];
2210  int j;
2211 
2212  /*
2213  * Within each hierarchy, we perform this loop in back-to-front order
2214  * so that we determine present_parts for the lowest-level partitioned
2215  * tables first. This way we can tell whether a sub-partitioned
2216  * table's partitions were entirely pruned so we can exclude it from
2217  * the current level's present_parts.
2218  */
2219  for (j = prunedata->num_partrelprunedata - 1; j >= 0; j--)
2220  {
2221  PartitionedRelPruningData *pprune = &prunedata->partrelprunedata[j];
2222  int nparts = pprune->nparts;
2223  int k;
2224 
2225  /* We just rebuild present_parts from scratch */
2226  bms_free(pprune->present_parts);
2227  pprune->present_parts = NULL;
2228 
2229  for (k = 0; k < nparts; k++)
2230  {
2231  int oldidx = pprune->subplan_map[k];
2232  int subidx;
2233 
2234  /*
2235  * If this partition existed as a subplan then change the old
2236  * subplan index to the new subplan index. The new index may
2237  * become -1 if the partition was pruned above, or it may just
2238  * come earlier in the subplan list due to some subplans being
2239  * removed earlier in the list. If it's a subpartition, add
2240  * it to present_parts unless it's entirely pruned.
2241  */
2242  if (oldidx >= 0)
2243  {
2244  Assert(oldidx < n_total_subplans);
2245  pprune->subplan_map[k] = new_subplan_indexes[oldidx] - 1;
2246 
2247  if (new_subplan_indexes[oldidx] > 0)
2248  pprune->present_parts =
2249  bms_add_member(pprune->present_parts, k);
2250  }
2251  else if ((subidx = pprune->subpart_map[k]) >= 0)
2252  {
2253  PartitionedRelPruningData *subprune;
2254 
2255  subprune = &prunedata->partrelprunedata[subidx];
2256 
2257  if (!bms_is_empty(subprune->present_parts))
2258  pprune->present_parts =
2259  bms_add_member(pprune->present_parts, k);
2260  }
2261  }
2262  }
2263  }
2264 
2265  /*
2266  * We must also recompute the other_subplans set, since indexes in it may
2267  * change.
2268  */
2269  new_other_subplans = NULL;
2270  i = -1;
2271  while ((i = bms_next_member(prunestate->other_subplans, i)) >= 0)
2272  new_other_subplans = bms_add_member(new_other_subplans,
2273  new_subplan_indexes[i] - 1);
2274 
2275  bms_free(prunestate->other_subplans);
2276  prunestate->other_subplans = new_other_subplans;
2277 
2278  pfree(new_subplan_indexes);
2279 }
2280 
2281 /*
2282  * ExecFindMatchingSubPlans
2283  * Determine which subplans match the pruning steps detailed in
2284  * 'prunestate' for the current comparison expression values.
2285  *
2286  * Pass initial_prune if PARAM_EXEC Params cannot yet be evaluated. This
2287  * differentiates the initial executor-time pruning step from later
2288  * runtime pruning.
2289  */
2290 Bitmapset *
2292  bool initial_prune)
2293 {
2294  Bitmapset *result = NULL;
2295  MemoryContext oldcontext;
2296  int i;
2297 
2298  /*
2299  * Either we're here on the initial prune done during pruning
2300  * initialization, or we're at a point where PARAM_EXEC Params can be
2301  * evaluated *and* there are steps in which to do so.
2302  */
2303  Assert(initial_prune || prunestate->do_exec_prune);
2304 
2305  /*
2306  * Switch to a temp context to avoid leaking memory in the executor's
2307  * query-lifespan memory context.
2308  */
2309  oldcontext = MemoryContextSwitchTo(prunestate->prune_context);
2310 
2311  /*
2312  * For each hierarchy, do the pruning tests, and add nondeletable
2313  * subplans' indexes to "result".
2314  */
2315  for (i = 0; i < prunestate->num_partprunedata; i++)
2316  {
2317  PartitionPruningData *prunedata = prunestate->partprunedata[i];
2318  PartitionedRelPruningData *pprune;
2319 
2320  /*
2321  * We pass the zeroth item, belonging to the root table of the
2322  * hierarchy, and find_matching_subplans_recurse() takes care of
2323  * recursing to other (lower-level) parents as needed.
2324  */
2325  pprune = &prunedata->partrelprunedata[0];
2326  find_matching_subplans_recurse(prunedata, pprune, initial_prune,
2327  &result);
2328 
2329  /* Expression eval may have used space in ExprContext too */
2330  if (pprune->exec_pruning_steps)
2332  }
2333 
2334  /* Add in any subplans that partition pruning didn't account for */
2335  result = bms_add_members(result, prunestate->other_subplans);
2336 
2337  MemoryContextSwitchTo(oldcontext);
2338 
2339  /* Copy result out of the temp context before we reset it */
2340  result = bms_copy(result);
2341 
2342  MemoryContextReset(prunestate->prune_context);
2343 
2344  return result;
2345 }
2346 
2347 /*
2348  * find_matching_subplans_recurse
2349  * Recursive worker function for ExecFindMatchingSubPlans
2350  *
2351  * Adds valid (non-prunable) subplan IDs to *validsubplans
2352  */
2353 static void
2355  PartitionedRelPruningData *pprune,
2356  bool initial_prune,
2357  Bitmapset **validsubplans)
2358 {
2359  Bitmapset *partset;
2360  int i;
2361 
2362  /* Guard against stack overflow due to overly deep partition hierarchy. */
2364 
2365  /*
2366  * Prune as appropriate, if we have pruning steps matching the current
2367  * execution context. Otherwise just include all partitions at this
2368  * level.
2369  */
2370  if (initial_prune && pprune->initial_pruning_steps)
2371  partset = get_matching_partitions(&pprune->initial_context,
2372  pprune->initial_pruning_steps);
2373  else if (!initial_prune && pprune->exec_pruning_steps)
2374  partset = get_matching_partitions(&pprune->exec_context,
2375  pprune->exec_pruning_steps);
2376  else
2377  partset = pprune->present_parts;
2378 
2379  /* Translate partset into subplan indexes */
2380  i = -1;
2381  while ((i = bms_next_member(partset, i)) >= 0)
2382  {
2383  if (pprune->subplan_map[i] >= 0)
2384  *validsubplans = bms_add_member(*validsubplans,
2385  pprune->subplan_map[i]);
2386  else
2387  {
2388  int partidx = pprune->subpart_map[i];
2389 
2390  if (partidx >= 0)
2392  &prunedata->partrelprunedata[partidx],
2393  initial_prune, validsubplans);
2394  else
2395  {
2396  /*
2397  * We get here if the planner already pruned all the sub-
2398  * partitions for this partition. Silently ignore this
2399  * partition in this case. The end result is the same: we
2400  * would have pruned all partitions just the same, but we
2401  * don't have any pruning steps to execute to verify this.
2402  */
2403  }
2404  }
2405  }
2406 }
AclResult
Definition: acl.h:182
@ ACLCHECK_OK
Definition: acl.h:183
AclResult pg_attribute_aclcheck(Oid table_oid, AttrNumber attnum, Oid roleid, AclMode mode)
Definition: aclchk.c:3908
AclResult pg_class_aclcheck(Oid table_oid, Oid roleid, AclMode mode)
Definition: aclchk.c:4079
AttrMap * build_attrmap_by_name_if_req(TupleDesc indesc, TupleDesc outdesc, bool missing_ok)
Definition: attmap.c:263
AttrMap * build_attrmap_by_name(TupleDesc indesc, TupleDesc outdesc, bool missing_ok)
Definition: attmap.c:177
int16 AttrNumber
Definition: attnum.h:21
#define InvalidAttrNumber
Definition: attnum.h:23
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1306
void bms_free(Bitmapset *a)
Definition: bitmapset.c:239
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:751
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:510
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:815
Bitmapset * bms_add_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:917
Bitmapset * bms_copy(const Bitmapset *a)
Definition: bitmapset.c:122
Bitmapset * bms_add_range(Bitmapset *a, int lower, int upper)
Definition: bitmapset.c:1019
#define bms_is_empty(a)
Definition: bitmapset.h:118
static Datum values[MAXATTR]
Definition: bootstrap.c:152
#define likely(x)
Definition: c.h:310
signed int int32
Definition: c.h:494
#define Assert(condition)
Definition: c.h:858
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:398
#define unlikely(x)
Definition: c.h:311
#define OidIsValid(objectId)
Definition: c.h:775
int errdetail(const char *fmt,...)
Definition: elog.c:1205
int errcode(int sqlerrcode)
Definition: elog.c:859
int errmsg(const char *fmt,...)
Definition: elog.c:1072
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:224
#define ereport(elevel,...)
Definition: elog.h:149
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:223
ExprState * ExecInitExprWithParams(Expr *node, ParamListInfo ext_params)
Definition: execExpr.c:171
ProjectionInfo * ExecBuildUpdateProjection(List *targetList, bool evalTargetList, List *targetColnos, TupleDesc relDesc, ExprContext *econtext, TupleTableSlot *slot, PlanState *parent)
Definition: execExpr.c:521
ExprState * ExecInitQual(List *qual, PlanState *parent)
Definition: execExpr.c:220
ExprState * ExecInitExpr(Expr *node, PlanState *parent)
Definition: execExpr.c:134
ProjectionInfo * ExecBuildProjectionInfo(List *targetList, ExprContext *econtext, TupleTableSlot *slot, PlanState *parent, TupleDesc inputDesc)
Definition: execExpr.c:361
List * ExecPrepareExprList(List *nodes, EState *estate)
Definition: execExpr.c:813
void ExecCloseIndices(ResultRelInfo *resultRelInfo)
Definition: execIndexing.c:231
void ExecOpenIndices(ResultRelInfo *resultRelInfo, bool speculative)
Definition: execIndexing.c:156
void CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation, List *mergeActions)
Definition: execMain.c:1026
bool ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate, bool emitError)
Definition: execMain.c:1792
void InitResultRelInfo(ResultRelInfo *resultRelInfo, Relation resultRelationDesc, Index resultRelationIndex, ResultRelInfo *partition_root_rri, int instrument_options)
Definition: execMain.c:1199
static PartitionDispatch ExecInitPartitionDispatchInfo(EState *estate, PartitionTupleRouting *proute, Oid partoid, PartitionDispatch parent_pd, int partidx, ResultRelInfo *rootResultRelInfo)
static PartitionPruneState * CreatePartitionPruneState(PlanState *planstate, PartitionPruneInfo *pruneinfo)
static ResultRelInfo * ExecInitPartitionInfo(ModifyTableState *mtstate, EState *estate, PartitionTupleRouting *proute, PartitionDispatch dispatch, ResultRelInfo *rootResultRelInfo, int partidx)
static void ExecInitRoutingInfo(ModifyTableState *mtstate, EState *estate, PartitionTupleRouting *proute, PartitionDispatch dispatch, ResultRelInfo *partRelInfo, int partidx, bool is_borrowed_rel)
static char * ExecBuildSlotPartitionKeyDescription(Relation rel, Datum *values, bool *isnull, int maxfieldlen)
Bitmapset * ExecFindMatchingSubPlans(PartitionPruneState *prunestate, bool initial_prune)
static void FormPartitionKeyDatum(PartitionDispatch pd, TupleTableSlot *slot, EState *estate, Datum *values, bool *isnull)
static void PartitionPruneFixSubPlanMap(PartitionPruneState *prunestate, Bitmapset *initially_valid_subplans, int n_total_subplans)
#define PARTITION_CACHED_FIND_THRESHOLD
static List * adjust_partition_colnos(List *colnos, ResultRelInfo *leaf_part_rri)
static List * adjust_partition_colnos_using_map(List *colnos, AttrMap *attrMap)
ResultRelInfo * ExecFindPartition(ModifyTableState *mtstate, ResultRelInfo *rootResultRelInfo, PartitionTupleRouting *proute, TupleTableSlot *slot, EState *estate)
static void InitPartitionPruneContext(PartitionPruneContext *context, List *pruning_steps, PartitionDesc partdesc, PartitionKey partkey, PlanState *planstate, ExprContext *econtext)
struct PartitionDispatchData PartitionDispatchData
static void find_matching_subplans_recurse(PartitionPruningData *prunedata, PartitionedRelPruningData *pprune, bool initial_prune, Bitmapset **validsubplans)
static int get_partition_for_tuple(PartitionDispatch pd, Datum *values, bool *isnull)
PartitionTupleRouting * ExecSetupPartitionTupleRouting(EState *estate, Relation rel)
PartitionPruneState * ExecInitPartitionPruning(PlanState *planstate, int n_total_subplans, PartitionPruneInfo *pruneinfo, Bitmapset **initially_valid_subplans)
void ExecCleanupTupleRouting(ModifyTableState *mtstate, PartitionTupleRouting *proute)
struct PartitionDispatchData * PartitionDispatch
Definition: execPartition.h:22
struct PartitionedRelPruningData PartitionedRelPruningData
const TupleTableSlotOps TTSOpsVirtual
Definition: execTuples.c:84
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
Definition: execTuples.c:1341
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1325
void ExecAssignExprContext(EState *estate, PlanState *planstate)
Definition: execUtils.c:483
Relation ExecGetRangeTableRelation(EState *estate, Index rti)
Definition: execUtils.c:762
TupleConversionMap * ExecGetRootToChildMap(ResultRelInfo *resultRelInfo, EState *estate)
Definition: execUtils.c:1232
TupleConversionMap * ExecGetChildToRootMap(ResultRelInfo *resultRelInfo)
Definition: execUtils.c:1206
#define GetPerTupleExprContext(estate)
Definition: executor.h:550
#define EXEC_FLAG_EXPLAIN_GENERIC
Definition: executor.h:66
#define ResetExprContext(econtext)
Definition: executor.h:544
#define GetPerTupleMemoryContext(estate)
Definition: executor.h:555
static Datum ExecEvalExprSwitchContext(ExprState *state, ExprContext *econtext, bool *isNull)
Definition: executor.h:348
Datum FunctionCall2Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2)
Definition: fmgr.c:1149
char * OidOutputFunctionCall(Oid functionId, Datum val)
Definition: fmgr.c:1763
long val
Definition: informix.c:670
int j
Definition: isn.c:74
int i
Definition: isn.c:73
List * lappend(List *list, void *datum)
Definition: list.c:339
List * lappend_int(List *list, int datum)
Definition: list.c:357
List * lappend_oid(List *list, Oid datum)
Definition: list.c:375
void list_free(List *list)
Definition: list.c:1546
bool list_member_oid(const List *list, Oid datum)
Definition: list.c:722
#define NoLock
Definition: lockdefs.h:34
#define RowExclusiveLock
Definition: lockdefs.h:38
void getTypeOutputInfo(Oid type, Oid *typOutput, bool *typIsVarlena)
Definition: lsyscache.c:2907
int pg_mbcliplen(const char *mbstr, int len, int limit)
Definition: mbutils.c:1083
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:383
void pfree(void *pointer)
Definition: mcxt.c:1520
void * palloc0(Size size)
Definition: mcxt.c:1346
MemoryContext CurrentMemoryContext
Definition: mcxt.c:143
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1540
void * palloc(Size size)
Definition: mcxt.c:1316
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:160
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:122
Oid GetUserId(void)
Definition: miscinit.c:514
void ExecInitMergeTupleSlots(ModifyTableState *mtstate, ResultRelInfo *resultRelInfo)
ResultRelInfo * ExecLookupResultRelByOid(ModifyTableState *node, Oid resultoid, bool missing_ok, bool update_cache)
#define IsA(nodeptr, _type_)
Definition: nodes.h:158
#define copyObject(obj)
Definition: nodes.h:224
@ ONCONFLICT_NONE
Definition: nodes.h:417
@ ONCONFLICT_UPDATE
Definition: nodes.h:419
@ CMD_MERGE
Definition: nodes.h:269
@ CMD_INSERT
Definition: nodes.h:267
@ CMD_DELETE
Definition: nodes.h:268
@ CMD_UPDATE
Definition: nodes.h:266
#define makeNode(_type_)
Definition: nodes.h:155
#define castNode(_type_, nodeptr)
Definition: nodes.h:176
@ PARTITION_STRATEGY_HASH
Definition: parsenodes.h:874
@ PARTITION_STRATEGY_LIST
Definition: parsenodes.h:872
@ PARTITION_STRATEGY_RANGE
Definition: parsenodes.h:873
PartitionRangeDatumKind
Definition: parsenodes.h:923
#define ACL_SELECT
Definition: parsenodes.h:77
int32 partition_rbound_datum_cmp(FmgrInfo *partsupfunc, Oid *partcollation, Datum *rb_datums, PartitionRangeDatumKind *rb_kind, Datum *tuple_datums, int n_tuple_datums)
Definition: partbounds.c:3557
uint64 compute_partition_hash_value(int partnatts, FmgrInfo *partsupfunc, const Oid *partcollation, const Datum *values, const bool *isnull)
Definition: partbounds.c:4723
int partition_range_datum_bsearch(FmgrInfo *partsupfunc, Oid *partcollation, PartitionBoundInfo boundinfo, int nvalues, Datum *values, bool *is_equal)
Definition: partbounds.c:3696
int partition_list_bsearch(FmgrInfo *partsupfunc, Oid *partcollation, PartitionBoundInfo boundinfo, Datum value, bool *is_equal)
Definition: partbounds.c:3608
#define partition_bound_accepts_nulls(bi)
Definition: partbounds.h:98
PartitionKey RelationGetPartitionKey(Relation rel)
Definition: partcache.c:51
static int16 get_partition_col_attnum(PartitionKey key, int col)
Definition: partcache.h:80
static int get_partition_natts(PartitionKey key)
Definition: partcache.h:65
static Oid get_partition_col_typid(PartitionKey key, int col)
Definition: partcache.h:86
PartitionDirectory CreatePartitionDirectory(MemoryContext mcxt, bool omit_detached)
Definition: partdesc.c:381
PartitionDesc PartitionDirectoryLookup(PartitionDirectory pdir, Relation rel)
Definition: partdesc.c:414
List * get_partition_ancestors(Oid relid)
Definition: partition.c:134
Bitmapset * get_matching_partitions(PartitionPruneContext *context, List *pruning_steps)
Definition: partprune.c:817
#define PruneCxtStateIdx(partnatts, step_id, keyno)
Definition: partprune.h:70
int16 attnum
Definition: pg_attribute.h:74
#define PARTITION_MAX_KEYS
#define lfirst(lc)
Definition: pg_list.h:172
#define lfirst_node(type, lc)
Definition: pg_list.h:176
static int list_length(const List *l)
Definition: pg_list.h:152
#define NIL
Definition: pg_list.h:68
#define lfirst_int(lc)
Definition: pg_list.h:173
static ListCell * list_head(const List *l)
Definition: pg_list.h:128
#define linitial(l)
Definition: pg_list.h:178
static ListCell * lnext(const List *l, const ListCell *c)
Definition: pg_list.h:343
#define lfirst_oid(lc)
Definition: pg_list.h:174
static char * buf
Definition: pg_test_fsync.c:73
void check_stack_depth(void)
Definition: postgres.c:3531
uintptr_t Datum
Definition: postgres.h:64
static int32 DatumGetInt32(Datum X)
Definition: postgres.h:202
#define InvalidOid
Definition: postgres_ext.h:36
unsigned int Oid
Definition: postgres_ext.h:31
#define INNER_VAR
Definition: primnodes.h:236
tree context
Definition: radixtree.h:1833
MemoryContextSwitchTo(old_ctx)
#define RelationGetForm(relation)
Definition: rel.h:499
#define RelationGetRelid(relation)
Definition: rel.h:505
#define RelationGetDescr(relation)
Definition: rel.h:531
#define RelationGetRelationName(relation)
Definition: rel.h:539
List * RelationGetIndexList(Relation relation)
Definition: relcache.c:4760
int errtable(Relation rel)
Definition: relcache.c:5945
Node * map_variable_attnos(Node *node, int target_varno, int sublevels_up, const AttrMap *attno_map, Oid to_rowtype, bool *found_whole_row)
int check_enable_rls(Oid relid, Oid checkAsUser, bool noError)
Definition: rls.c:52
@ RLS_ENABLED
Definition: rls.h:45
char * pg_get_partkeydef_columns(Oid relid, bool pretty)
Definition: ruleutils.c:1908
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:97
void appendBinaryStringInfo(StringInfo str, const void *data, int datalen)
Definition: stringinfo.c:233
void appendStringInfoString(StringInfo str, const char *s)
Definition: stringinfo.c:182
void appendStringInfoChar(StringInfo str, char ch)
Definition: stringinfo.c:194
void initStringInfo(StringInfo str)
Definition: stringinfo.c:59
Definition: attmap.h:35
int maplen
Definition: attmap.h:37
AttrNumber * attnums
Definition: attmap.h:36
List * es_tuple_routing_result_relations
Definition: execnodes.h:655
int es_top_eflags
Definition: execnodes.h:676
int es_instrument
Definition: execnodes.h:677
MemoryContext es_query_cxt
Definition: execnodes.h:667
List * es_tupleTable
Definition: execnodes.h:669
PartitionDirectory es_partition_directory
Definition: execnodes.h:649
ParamListInfo ecxt_param_list_info
Definition: execnodes.h:267
TupleTableSlot * ecxt_scantuple
Definition: execnodes.h:255
struct EState * ecxt_estate
Definition: execnodes.h:291
EndForeignInsert_function EndForeignInsert
Definition: fdwapi.h:239
BeginForeignInsert_function BeginForeignInsert
Definition: fdwapi.h:238
ExecForeignBatchInsert_function ExecForeignBatchInsert
Definition: fdwapi.h:233
GetForeignModifyBatchSize_function GetForeignModifyBatchSize
Definition: fdwapi.h:234
Definition: fmgr.h:57
Definition: pg_list.h:54
MergeAction * mas_action
Definition: execnodes.h:425
ProjectionInfo * mas_proj
Definition: execnodes.h:426
ExprState * mas_whenqual
Definition: execnodes.h:428
ResultRelInfo * resultRelInfo
Definition: execnodes.h:1360
PlanState ps
Definition: execnodes.h:1355
ResultRelInfo * rootResultRelInfo
Definition: execnodes.h:1368
List * onConflictCols
Definition: plannodes.h:248
List * mergeJoinConditions
Definition: plannodes.h:254
CmdType operation
Definition: plannodes.h:232
List * resultRelations
Definition: plannodes.h:237
List * onConflictSet
Definition: plannodes.h:247
List * mergeActionLists
Definition: plannodes.h:252
List * returningLists
Definition: plannodes.h:240
List * withCheckOptionLists
Definition: plannodes.h:239
Node * onConflictWhere
Definition: plannodes.h:249
OnConflictAction onConflictAction
Definition: plannodes.h:245
Definition: nodes.h:129
TupleTableSlot * oc_ProjSlot
Definition: execnodes.h:410
TupleTableSlot * oc_Existing
Definition: execnodes.h:409
ExprState * oc_WhereClause
Definition: execnodes.h:412
ProjectionInfo * oc_ProjInfo
Definition: execnodes.h:411
PartitionRangeDatumKind ** kind
Definition: partbounds.h:84
int last_found_datum_index
Definition: partdesc.h:46
PartitionBoundInfo boundinfo
Definition: partdesc.h:38
int last_found_count
Definition: partdesc.h:63
bool * is_leaf
Definition: partdesc.h:35
int last_found_part_index
Definition: partdesc.h:52
TupleTableSlot * tupslot
PartitionDesc partdesc
int indexes[FLEXIBLE_ARRAY_MEMBER]
Oid * partcollation
Definition: partcache.h:39
PartitionStrategy strategy
Definition: partcache.h:27
List * partexprs
Definition: partcache.h:31
FmgrInfo * partsupfunc
Definition: partcache.h:36
AttrNumber * partattrs
Definition: partcache.h:29
ExprContext * exprcontext
Definition: partprune.h:60
Bitmapset * other_subplans
Definition: plannodes.h:1429
PartitionPruningData * partprunedata[FLEXIBLE_ARRAY_MEMBER]
Bitmapset * execparamids
Bitmapset * other_subplans
MemoryContext prune_context
PartitionPruneStep step
Definition: plannodes.h:1529
Bitmapset * nullkeys
Definition: plannodes.h:1534
PartitionedRelPruningData partrelprunedata[FLEXIBLE_ARRAY_MEMBER]
Definition: execPartition.h:81
PartitionDispatch * partition_dispatch_info
Definition: execPartition.c:94
ResultRelInfo ** partitions
Definition: execPartition.c:98
MemoryContext memcxt
ResultRelInfo ** nonleaf_partitions
Definition: execPartition.c:95
Bitmapset * present_parts
Definition: plannodes.h:1459
Bitmapset * execparamids
Definition: plannodes.h:1483
PartitionPruneContext exec_context
Definition: execPartition.h:68
PartitionPruneContext initial_context
Definition: execPartition.h:67
Plan * plan
Definition: execnodes.h:1117
EState * state
Definition: execnodes.h:1119
ExprContext * ps_ExprContext
Definition: execnodes.h:1156
TupleTableSlot * ps_ResultTupleSlot
Definition: execnodes.h:1155
Form_pg_class rd_rel
Definition: rel.h:111
TupleTableSlot * ri_PartitionTupleSlot
Definition: execnodes.h:583
OnConflictSetState * ri_onConflict
Definition: execnodes.h:545
List * ri_onConflictArbiterIndexes
Definition: execnodes.h:542
Relation ri_RelationDesc
Definition: execnodes.h:456
struct CopyMultiInsertBuffer * ri_CopyMultiInsertBuffer
Definition: execnodes.h:586
Index ri_RangeTableIndex
Definition: execnodes.h:453
struct FdwRoutine * ri_FdwRoutine
Definition: execnodes.h:503
int ri_BatchSize
Definition: execnodes.h:514
AttrMap * attrMap
Definition: tupconvert.h:28
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:40
TupleTableSlot * table_slot_create(Relation relation, List **reglist)
Definition: tableam.c:91
TupleTableSlot * execute_attr_map_slot(AttrMap *attrMap, TupleTableSlot *in_slot, TupleTableSlot *out_slot)
Definition: tupconvert.c:192
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:454
static Datum slot_getattr(TupleTableSlot *slot, int attnum, bool *isnull)
Definition: tuptable.h:395
#define IsolationUsesXactSnapshot()
Definition: xact.h:51