PostgreSQL Source Code  git master
execPartition.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * execPartition.c
4  * Support routines for partitioning.
5  *
6  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  * src/backend/executor/execPartition.c
11  *
12  *-------------------------------------------------------------------------
13  */
14 #include "postgres.h"
15 
16 #include "access/table.h"
17 #include "access/tableam.h"
18 #include "catalog/partition.h"
19 #include "catalog/pg_inherits.h"
20 #include "catalog/pg_type.h"
21 #include "executor/execPartition.h"
22 #include "executor/executor.h"
23 #include "foreign/fdwapi.h"
24 #include "mb/pg_wchar.h"
25 #include "miscadmin.h"
26 #include "nodes/makefuncs.h"
28 #include "partitioning/partdesc.h"
29 #include "partitioning/partprune.h"
30 #include "rewrite/rewriteManip.h"
31 #include "utils/acl.h"
32 #include "utils/lsyscache.h"
33 #include "utils/partcache.h"
34 #include "utils/rls.h"
35 #include "utils/ruleutils.h"
36 
37 
38 /*-----------------------
39  * PartitionTupleRouting - Encapsulates all information required to
40  * route a tuple inserted into a partitioned table to one of its leaf
41  * partitions.
42  *
43  * partition_root
44  * The partitioned table that's the target of the command.
45  *
46  * partition_dispatch_info
47  * Array of 'max_dispatch' elements containing a pointer to a
48  * PartitionDispatch object for every partitioned table touched by tuple
49  * routing. The entry for the target partitioned table is *always*
50  * present in the 0th element of this array. See comment for
51  * PartitionDispatchData->indexes for details on how this array is
52  * indexed.
53  *
54  * num_dispatch
55  * The current number of items stored in the 'partition_dispatch_info'
56  * array. Also serves as the index of the next free array element for
57  * new PartitionDispatch objects that need to be stored.
58  *
59  * max_dispatch
60  * The current allocated size of the 'partition_dispatch_info' array.
61  *
62  * partitions
63  * Array of 'max_partitions' elements containing a pointer to a
64  * ResultRelInfo for every leaf partitions touched by tuple routing.
65  * Some of these are pointers to ResultRelInfos which are borrowed out of
66  * 'subplan_resultrel_htab'. The remainder have been built especially
67  * for tuple routing. See comment for PartitionDispatchData->indexes for
68  * details on how this array is indexed.
69  *
70  * num_partitions
71  * The current number of items stored in the 'partitions' array. Also
72  * serves as the index of the next free array element for new
73  * ResultRelInfo objects that need to be stored.
74  *
75  * max_partitions
76  * The current allocated size of the 'partitions' array.
77  *
78  * subplan_resultrel_htab
79  * Hash table to store subplan ResultRelInfos by Oid. This is used to
80  * cache ResultRelInfos from subplans of an UPDATE ModifyTable node;
81  * NULL in other cases. Some of these may be useful for tuple routing
82  * to save having to build duplicates.
83  *
84  * memcxt
85  * Memory context used to allocate subsidiary structs.
86  *-----------------------
87  */
89 {
99 };
100 
101 /*-----------------------
102  * PartitionDispatch - information about one partitioned table in a partition
103  * hierarchy required to route a tuple to any of its partitions. A
104  * PartitionDispatch is always encapsulated inside a PartitionTupleRouting
105  * struct and stored inside its 'partition_dispatch_info' array.
106  *
107  * reldesc
108  * Relation descriptor of the table
109  *
110  * key
111  * Partition key information of the table
112  *
113  * keystate
114  * Execution state required for expressions in the partition key
115  *
116  * partdesc
117  * Partition descriptor of the table
118  *
119  * tupslot
120  * A standalone TupleTableSlot initialized with this table's tuple
121  * descriptor, or NULL if no tuple conversion between the parent is
122  * required.
123  *
124  * tupmap
125  * TupleConversionMap to convert from the parent's rowtype to this table's
126  * rowtype (when extracting the partition key of a tuple just before
127  * routing it through this table). A NULL value is stored if no tuple
128  * conversion is required.
129  *
130  * indexes
131  * Array of partdesc->nparts elements. For leaf partitions the index
132  * corresponds to the partition's ResultRelInfo in the encapsulating
133  * PartitionTupleRouting's partitions array. For partitioned partitions,
134  * the index corresponds to the PartitionDispatch for it in its
135  * partition_dispatch_info array. -1 indicates we've not yet allocated
136  * anything in PartitionTupleRouting for the partition.
137  *-----------------------
138  */
139 typedef struct PartitionDispatchData
140 {
143  List *keystate; /* list of ExprState */
147  int indexes[FLEXIBLE_ARRAY_MEMBER];
149 
150 /* struct to hold result relations coming from UPDATE subplans */
152 {
153  Oid relid; /* hash key -- must be first */
156 
157 
159  PartitionTupleRouting *proute);
161  EState *estate, PartitionTupleRouting *proute,
162  PartitionDispatch dispatch,
163  ResultRelInfo *rootResultRelInfo,
164  int partidx);
165 static void ExecInitRoutingInfo(ModifyTableState *mtstate,
166  EState *estate,
167  PartitionTupleRouting *proute,
168  PartitionDispatch dispatch,
169  ResultRelInfo *partRelInfo,
170  int partidx);
172  PartitionTupleRouting *proute,
173  Oid partoid, PartitionDispatch parent_pd, int partidx);
175  TupleTableSlot *slot,
176  EState *estate,
177  Datum *values,
178  bool *isnull);
180  bool *isnull);
182  Datum *values,
183  bool *isnull,
184  int maxfieldlen);
185 static List *adjust_partition_tlist(List *tlist, TupleConversionMap *map);
186 static void ExecInitPruningContext(PartitionPruneContext *context,
187  List *pruning_steps,
188  PartitionDesc partdesc,
189  PartitionKey partkey,
190  PlanState *planstate);
193  bool initial_prune,
194  Bitmapset **validsubplans);
195 
196 
197 /*
198  * ExecSetupPartitionTupleRouting - sets up information needed during
199  * tuple routing for partitioned tables, encapsulates it in
200  * PartitionTupleRouting, and returns it.
201  *
202  * Callers must use the returned PartitionTupleRouting during calls to
203  * ExecFindPartition(). The actual ResultRelInfo for a partition is only
204  * allocated when the partition is found for the first time.
205  *
206  * The current memory context is used to allocate this struct and all
207  * subsidiary structs that will be allocated from it later on. Typically
208  * it should be estate->es_query_cxt.
209  */
212  Relation rel)
213 {
214  PartitionTupleRouting *proute;
215  ModifyTable *node = mtstate ? (ModifyTable *) mtstate->ps.plan : NULL;
216 
217  /*
218  * Here we attempt to expend as little effort as possible in setting up
219  * the PartitionTupleRouting. Each partition's ResultRelInfo is built on
220  * demand, only when we actually need to route a tuple to that partition.
221  * The reason for this is that a common case is for INSERT to insert a
222  * single tuple into a partitioned table and this must be fast.
223  */
225  proute->partition_root = rel;
226  proute->memcxt = CurrentMemoryContext;
227  /* Rest of members initialized by zeroing */
228 
229  /*
230  * Initialize this table's PartitionDispatch object. Here we pass in the
231  * parent as NULL as we don't need to care about any parent of the target
232  * partitioned table.
233  */
234  ExecInitPartitionDispatchInfo(estate, proute, RelationGetRelid(rel),
235  NULL, 0);
236 
237  /*
238  * If performing an UPDATE with tuple routing, we can reuse partition
239  * sub-plan result rels. We build a hash table to map the OIDs of
240  * partitions present in mtstate->resultRelInfo to their ResultRelInfos.
241  * Every time a tuple is routed to a partition that we've yet to set the
242  * ResultRelInfo for, before we go to the trouble of making one, we check
243  * for a pre-made one in the hash table.
244  */
245  if (node && node->operation == CMD_UPDATE)
246  ExecHashSubPlanResultRelsByOid(mtstate, proute);
247 
248  return proute;
249 }
250 
251 /*
252  * ExecFindPartition -- Return the ResultRelInfo for the leaf partition that
253  * the tuple contained in *slot should belong to.
254  *
255  * If the partition's ResultRelInfo does not yet exist in 'proute' then we set
256  * one up or reuse one from mtstate's resultRelInfo array. When reusing a
257  * ResultRelInfo from the mtstate we verify that the relation is a valid
258  * target for INSERTs and then set up a PartitionRoutingInfo for it.
259  *
260  * rootResultRelInfo is the relation named in the query.
261  *
262  * estate must be non-NULL; we'll need it to compute any expressions in the
263  * partition keys. Also, its per-tuple contexts are used as evaluation
264  * scratch space.
265  *
266  * If no leaf partition is found, this routine errors out with the appropriate
267  * error message. An error may also be raised if the found target partition
268  * is not a valid target for an INSERT.
269  */
272  ResultRelInfo *rootResultRelInfo,
273  PartitionTupleRouting *proute,
274  TupleTableSlot *slot, EState *estate)
275 {
278  bool isnull[PARTITION_MAX_KEYS];
279  Relation rel;
280  PartitionDispatch dispatch;
281  PartitionDesc partdesc;
282  ExprContext *ecxt = GetPerTupleExprContext(estate);
283  TupleTableSlot *ecxt_scantuple_old = ecxt->ecxt_scantuple;
284  TupleTableSlot *myslot = NULL;
285  MemoryContext oldcxt;
286 
287  /* use per-tuple context here to avoid leaking memory */
289 
290  /*
291  * First check the root table's partition constraint, if any. No point in
292  * routing the tuple if it doesn't belong in the root table itself.
293  */
294  if (rootResultRelInfo->ri_PartitionCheck)
295  ExecPartitionCheck(rootResultRelInfo, slot, estate, true);
296 
297  /* start with the root partitioned table */
298  dispatch = pd[0];
299  while (true)
300  {
301  AttrMap *map = dispatch->tupmap;
302  int partidx = -1;
303 
305 
306  rel = dispatch->reldesc;
307  partdesc = dispatch->partdesc;
308 
309  /*
310  * Convert the tuple to this parent's layout, if different from the
311  * current relation.
312  */
313  myslot = dispatch->tupslot;
314  if (myslot != NULL)
315  {
316  Assert(map != NULL);
317  slot = execute_attr_map_slot(map, slot, myslot);
318  }
319 
320  /*
321  * Extract partition key from tuple. Expression evaluation machinery
322  * that FormPartitionKeyDatum() invokes expects ecxt_scantuple to
323  * point to the correct tuple slot. The slot might have changed from
324  * what was used for the parent table if the table of the current
325  * partitioning level has different tuple descriptor from the parent.
326  * So update ecxt_scantuple accordingly.
327  */
328  ecxt->ecxt_scantuple = slot;
329  FormPartitionKeyDatum(dispatch, slot, estate, values, isnull);
330 
331  /*
332  * If this partitioned table has no partitions or no partition for
333  * these values, error out.
334  */
335  if (partdesc->nparts == 0 ||
336  (partidx = get_partition_for_tuple(dispatch, values, isnull)) < 0)
337  {
338  char *val_desc;
339 
341  values, isnull, 64);
343  ereport(ERROR,
344  (errcode(ERRCODE_CHECK_VIOLATION),
345  errmsg("no partition of relation \"%s\" found for row",
347  val_desc ?
348  errdetail("Partition key of the failing row contains %s.",
349  val_desc) : 0,
350  errtable(rel)));
351  }
352 
353  if (partdesc->is_leaf[partidx])
354  {
355  ResultRelInfo *rri;
356 
357  /*
358  * Look to see if we've already got a ResultRelInfo for this
359  * partition.
360  */
361  if (likely(dispatch->indexes[partidx] >= 0))
362  {
363  /* ResultRelInfo already built */
364  Assert(dispatch->indexes[partidx] < proute->num_partitions);
365  rri = proute->partitions[dispatch->indexes[partidx]];
366  }
367  else
368  {
369  bool found = false;
370 
371  /*
372  * We have not yet set up a ResultRelInfo for this partition,
373  * but if we have a subplan hash table, we might have one
374  * there. If not, we'll have to create one.
375  */
376  if (proute->subplan_resultrel_htab)
377  {
378  Oid partoid = partdesc->oids[partidx];
380 
381  elem = hash_search(proute->subplan_resultrel_htab,
382  &partoid, HASH_FIND, NULL);
383  if (elem)
384  {
385  found = true;
386  rri = elem->rri;
387 
388  /* Verify this ResultRelInfo allows INSERTs */
390 
391  /* Set up the PartitionRoutingInfo for it */
392  ExecInitRoutingInfo(mtstate, estate, proute, dispatch,
393  rri, partidx);
394  }
395  }
396 
397  /* We need to create a new one. */
398  if (!found)
399  rri = ExecInitPartitionInfo(mtstate, estate, proute,
400  dispatch,
401  rootResultRelInfo, partidx);
402  }
403 
404  /* Release the tuple in the lowest parent's dedicated slot. */
405  if (slot == myslot)
406  ExecClearTuple(myslot);
407 
408  MemoryContextSwitchTo(oldcxt);
409  ecxt->ecxt_scantuple = ecxt_scantuple_old;
410  return rri;
411  }
412  else
413  {
414  /*
415  * Partition is a sub-partitioned table; get the PartitionDispatch
416  */
417  if (likely(dispatch->indexes[partidx] >= 0))
418  {
419  /* Already built. */
420  Assert(dispatch->indexes[partidx] < proute->num_dispatch);
421 
422  /*
423  * Move down to the next partition level and search again
424  * until we find a leaf partition that matches this tuple
425  */
426  dispatch = pd[dispatch->indexes[partidx]];
427  }
428  else
429  {
430  /* Not yet built. Do that now. */
431  PartitionDispatch subdispatch;
432 
433  /*
434  * Create the new PartitionDispatch. We pass the current one
435  * in as the parent PartitionDispatch
436  */
437  subdispatch = ExecInitPartitionDispatchInfo(mtstate->ps.state,
438  proute,
439  partdesc->oids[partidx],
440  dispatch, partidx);
441  Assert(dispatch->indexes[partidx] >= 0 &&
442  dispatch->indexes[partidx] < proute->num_dispatch);
443  dispatch = subdispatch;
444  }
445  }
446  }
447 }
448 
449 /*
450  * ExecHashSubPlanResultRelsByOid
451  * Build a hash table to allow fast lookups of subplan ResultRelInfos by
452  * partition Oid. We also populate the subplan ResultRelInfo with an
453  * ri_PartitionRoot.
454  */
455 static void
457  PartitionTupleRouting *proute)
458 {
459  HASHCTL ctl;
460  HTAB *htab;
461  int i;
462 
463  memset(&ctl, 0, sizeof(ctl));
464  ctl.keysize = sizeof(Oid);
465  ctl.entrysize = sizeof(SubplanResultRelHashElem);
467 
468  htab = hash_create("PartitionTupleRouting table", mtstate->mt_nplans,
469  &ctl, HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
470  proute->subplan_resultrel_htab = htab;
471 
472  /* Hash all subplans by their Oid */
473  for (i = 0; i < mtstate->mt_nplans; i++)
474  {
475  ResultRelInfo *rri = &mtstate->resultRelInfo[i];
476  bool found;
477  Oid partoid = RelationGetRelid(rri->ri_RelationDesc);
479 
480  elem = (SubplanResultRelHashElem *)
481  hash_search(htab, &partoid, HASH_ENTER, &found);
482  Assert(!found);
483  elem->rri = rri;
484 
485  /*
486  * This is required in order to convert the partition's tuple to be
487  * compatible with the root partitioned table's tuple descriptor. When
488  * generating the per-subplan result rels, this was not set.
489  */
490  rri->ri_PartitionRoot = proute->partition_root;
491  }
492 }
493 
494 /*
495  * ExecInitPartitionInfo
496  * Lock the partition and initialize ResultRelInfo. Also setup other
497  * information for the partition and store it in the next empty slot in
498  * the proute->partitions array.
499  *
500  * Returns the ResultRelInfo
501  */
502 static ResultRelInfo *
504  PartitionTupleRouting *proute,
505  PartitionDispatch dispatch,
506  ResultRelInfo *rootResultRelInfo,
507  int partidx)
508 {
509  ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
510  Relation rootrel = rootResultRelInfo->ri_RelationDesc,
511  partrel;
512  Relation firstResultRel = mtstate->resultRelInfo[0].ri_RelationDesc;
513  ResultRelInfo *leaf_part_rri;
514  MemoryContext oldcxt;
515  AttrMap *part_attmap = NULL;
516  bool found_whole_row;
517 
518  oldcxt = MemoryContextSwitchTo(proute->memcxt);
519 
520  partrel = table_open(dispatch->partdesc->oids[partidx], RowExclusiveLock);
521 
522  leaf_part_rri = makeNode(ResultRelInfo);
523  InitResultRelInfo(leaf_part_rri,
524  partrel,
525  node ? node->rootRelation : 1,
526  rootrel,
527  estate->es_instrument);
528 
529  /*
530  * Verify result relation is a valid target for an INSERT. An UPDATE of a
531  * partition-key becomes a DELETE+INSERT operation, so this check is still
532  * required when the operation is CMD_UPDATE.
533  */
534  CheckValidResultRel(leaf_part_rri, CMD_INSERT);
535 
536  /*
537  * Open partition indices. The user may have asked to check for conflicts
538  * within this leaf partition and do "nothing" instead of throwing an
539  * error. Be prepared in that case by initializing the index information
540  * needed by ExecInsert() to perform speculative insertions.
541  */
542  if (partrel->rd_rel->relhasindex &&
543  leaf_part_rri->ri_IndexRelationDescs == NULL)
544  ExecOpenIndices(leaf_part_rri,
545  (node != NULL &&
547 
548  /*
549  * Build WITH CHECK OPTION constraints for the partition. Note that we
550  * didn't build the withCheckOptionList for partitions within the planner,
551  * but simple translation of varattnos will suffice. This only occurs for
552  * the INSERT case or in the case of UPDATE tuple routing where we didn't
553  * find a result rel to reuse in ExecSetupPartitionTupleRouting().
554  */
555  if (node && node->withCheckOptionLists != NIL)
556  {
557  List *wcoList;
558  List *wcoExprs = NIL;
559  ListCell *ll;
560  int firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex;
561 
562  /*
563  * In the case of INSERT on a partitioned table, there is only one
564  * plan. Likewise, there is only one WCO list, not one per partition.
565  * For UPDATE, there are as many WCO lists as there are plans.
566  */
567  Assert((node->operation == CMD_INSERT &&
568  list_length(node->withCheckOptionLists) == 1 &&
569  list_length(node->plans) == 1) ||
570  (node->operation == CMD_UPDATE &&
572  list_length(node->plans)));
573 
574  /*
575  * Use the WCO list of the first plan as a reference to calculate
576  * attno's for the WCO list of this partition. In the INSERT case,
577  * that refers to the root partitioned table, whereas in the UPDATE
578  * tuple routing case, that refers to the first partition in the
579  * mtstate->resultRelInfo array. In any case, both that relation and
580  * this partition should have the same columns, so we should be able
581  * to map attributes successfully.
582  */
583  wcoList = linitial(node->withCheckOptionLists);
584 
585  /*
586  * Convert Vars in it to contain this partition's attribute numbers.
587  */
588  part_attmap =
590  RelationGetDescr(firstResultRel));
591  wcoList = (List *)
592  map_variable_attnos((Node *) wcoList,
593  firstVarno, 0,
594  part_attmap,
595  RelationGetForm(partrel)->reltype,
596  &found_whole_row);
597  /* We ignore the value of found_whole_row. */
598 
599  foreach(ll, wcoList)
600  {
602  ExprState *wcoExpr = ExecInitQual(castNode(List, wco->qual),
603  &mtstate->ps);
604 
605  wcoExprs = lappend(wcoExprs, wcoExpr);
606  }
607 
608  leaf_part_rri->ri_WithCheckOptions = wcoList;
609  leaf_part_rri->ri_WithCheckOptionExprs = wcoExprs;
610  }
611 
612  /*
613  * Build the RETURNING projection for the partition. Note that we didn't
614  * build the returningList for partitions within the planner, but simple
615  * translation of varattnos will suffice. This only occurs for the INSERT
616  * case or in the case of UPDATE tuple routing where we didn't find a
617  * result rel to reuse in ExecSetupPartitionTupleRouting().
618  */
619  if (node && node->returningLists != NIL)
620  {
621  TupleTableSlot *slot;
622  ExprContext *econtext;
623  List *returningList;
624  int firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex;
625 
626  /* See the comment above for WCO lists. */
627  Assert((node->operation == CMD_INSERT &&
628  list_length(node->returningLists) == 1 &&
629  list_length(node->plans) == 1) ||
630  (node->operation == CMD_UPDATE &&
631  list_length(node->returningLists) ==
632  list_length(node->plans)));
633 
634  /*
635  * Use the RETURNING list of the first plan as a reference to
636  * calculate attno's for the RETURNING list of this partition. See
637  * the comment above for WCO lists for more details on why this is
638  * okay.
639  */
640  returningList = linitial(node->returningLists);
641 
642  /*
643  * Convert Vars in it to contain this partition's attribute numbers.
644  */
645  if (part_attmap == NULL)
646  part_attmap =
648  RelationGetDescr(firstResultRel));
649  returningList = (List *)
650  map_variable_attnos((Node *) returningList,
651  firstVarno, 0,
652  part_attmap,
653  RelationGetForm(partrel)->reltype,
654  &found_whole_row);
655  /* We ignore the value of found_whole_row. */
656 
657  leaf_part_rri->ri_returningList = returningList;
658 
659  /*
660  * Initialize the projection itself.
661  *
662  * Use the slot and the expression context that would have been set up
663  * in ExecInitModifyTable() for projection's output.
664  */
665  Assert(mtstate->ps.ps_ResultTupleSlot != NULL);
666  slot = mtstate->ps.ps_ResultTupleSlot;
667  Assert(mtstate->ps.ps_ExprContext != NULL);
668  econtext = mtstate->ps.ps_ExprContext;
669  leaf_part_rri->ri_projectReturning =
670  ExecBuildProjectionInfo(returningList, econtext, slot,
671  &mtstate->ps, RelationGetDescr(partrel));
672  }
673 
674  /* Set up information needed for routing tuples to the partition. */
675  ExecInitRoutingInfo(mtstate, estate, proute, dispatch,
676  leaf_part_rri, partidx);
677 
678  /*
679  * If there is an ON CONFLICT clause, initialize state for it.
680  */
681  if (node && node->onConflictAction != ONCONFLICT_NONE)
682  {
683  int firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex;
684  TupleDesc partrelDesc = RelationGetDescr(partrel);
685  ExprContext *econtext = mtstate->ps.ps_ExprContext;
686  ListCell *lc;
687  List *arbiterIndexes = NIL;
688 
689  /*
690  * If there is a list of arbiter indexes, map it to a list of indexes
691  * in the partition. We do that by scanning the partition's index
692  * list and searching for ancestry relationships to each index in the
693  * ancestor table.
694  */
695  if (list_length(rootResultRelInfo->ri_onConflictArbiterIndexes) > 0)
696  {
697  List *childIdxs;
698 
699  childIdxs = RelationGetIndexList(leaf_part_rri->ri_RelationDesc);
700 
701  foreach(lc, childIdxs)
702  {
703  Oid childIdx = lfirst_oid(lc);
704  List *ancestors;
705  ListCell *lc2;
706 
707  ancestors = get_partition_ancestors(childIdx);
708  foreach(lc2, rootResultRelInfo->ri_onConflictArbiterIndexes)
709  {
710  if (list_member_oid(ancestors, lfirst_oid(lc2)))
711  arbiterIndexes = lappend_oid(arbiterIndexes, childIdx);
712  }
713  list_free(ancestors);
714  }
715  }
716 
717  /*
718  * If the resulting lists are of inequal length, something is wrong.
719  * (This shouldn't happen, since arbiter index selection should not
720  * pick up an invalid index.)
721  */
722  if (list_length(rootResultRelInfo->ri_onConflictArbiterIndexes) !=
723  list_length(arbiterIndexes))
724  elog(ERROR, "invalid arbiter index list");
725  leaf_part_rri->ri_onConflictArbiterIndexes = arbiterIndexes;
726 
727  /*
728  * In the DO UPDATE case, we have some more state to initialize.
729  */
730  if (node->onConflictAction == ONCONFLICT_UPDATE)
731  {
732  TupleConversionMap *map;
733 
734  map = leaf_part_rri->ri_PartitionInfo->pi_RootToPartitionMap;
735 
736  Assert(node->onConflictSet != NIL);
737  Assert(rootResultRelInfo->ri_onConflict != NULL);
738 
739  leaf_part_rri->ri_onConflict = makeNode(OnConflictSetState);
740 
741  /*
742  * Need a separate existing slot for each partition, as the
743  * partition could be of a different AM, even if the tuple
744  * descriptors match.
745  */
746  leaf_part_rri->ri_onConflict->oc_Existing =
747  table_slot_create(leaf_part_rri->ri_RelationDesc,
748  &mtstate->ps.state->es_tupleTable);
749 
750  /*
751  * If the partition's tuple descriptor matches exactly the root
752  * parent (the common case), we can re-use most of the parent's ON
753  * CONFLICT SET state, skipping a bunch of work. Otherwise, we
754  * need to create state specific to this partition.
755  */
756  if (map == NULL)
757  {
758  /*
759  * It's safe to reuse these from the partition root, as we
760  * only process one tuple at a time (therefore we won't
761  * overwrite needed data in slots), and the results of
762  * projections are independent of the underlying storage.
763  * Projections and where clauses themselves don't store state
764  * / are independent of the underlying storage.
765  */
766  leaf_part_rri->ri_onConflict->oc_ProjSlot =
767  rootResultRelInfo->ri_onConflict->oc_ProjSlot;
768  leaf_part_rri->ri_onConflict->oc_ProjInfo =
769  rootResultRelInfo->ri_onConflict->oc_ProjInfo;
770  leaf_part_rri->ri_onConflict->oc_WhereClause =
771  rootResultRelInfo->ri_onConflict->oc_WhereClause;
772  }
773  else
774  {
775  List *onconflset;
776  TupleDesc tupDesc;
777  bool found_whole_row;
778 
779  /*
780  * Translate expressions in onConflictSet to account for
781  * different attribute numbers. For that, map partition
782  * varattnos twice: first to catch the EXCLUDED
783  * pseudo-relation (INNER_VAR), and second to handle the main
784  * target relation (firstVarno).
785  */
786  onconflset = (List *) copyObject((Node *) node->onConflictSet);
787  if (part_attmap == NULL)
788  part_attmap =
790  RelationGetDescr(firstResultRel));
791  onconflset = (List *)
792  map_variable_attnos((Node *) onconflset,
793  INNER_VAR, 0,
794  part_attmap,
795  RelationGetForm(partrel)->reltype,
796  &found_whole_row);
797  /* We ignore the value of found_whole_row. */
798  onconflset = (List *)
799  map_variable_attnos((Node *) onconflset,
800  firstVarno, 0,
801  part_attmap,
802  RelationGetForm(partrel)->reltype,
803  &found_whole_row);
804  /* We ignore the value of found_whole_row. */
805 
806  /* Finally, adjust this tlist to match the partition. */
807  onconflset = adjust_partition_tlist(onconflset, map);
808 
809  /* create the tuple slot for the UPDATE SET projection */
810  tupDesc = ExecTypeFromTL(onconflset);
811  leaf_part_rri->ri_onConflict->oc_ProjSlot =
812  ExecInitExtraTupleSlot(mtstate->ps.state, tupDesc,
813  &TTSOpsVirtual);
814 
815  /* build UPDATE SET projection state */
816  leaf_part_rri->ri_onConflict->oc_ProjInfo =
817  ExecBuildProjectionInfo(onconflset, econtext,
818  leaf_part_rri->ri_onConflict->oc_ProjSlot,
819  &mtstate->ps, partrelDesc);
820 
821  /*
822  * If there is a WHERE clause, initialize state where it will
823  * be evaluated, mapping the attribute numbers appropriately.
824  * As with onConflictSet, we need to map partition varattnos
825  * to the partition's tupdesc.
826  */
827  if (node->onConflictWhere)
828  {
829  List *clause;
830 
831  clause = copyObject((List *) node->onConflictWhere);
832  clause = (List *)
833  map_variable_attnos((Node *) clause,
834  INNER_VAR, 0,
835  part_attmap,
836  RelationGetForm(partrel)->reltype,
837  &found_whole_row);
838  /* We ignore the value of found_whole_row. */
839  clause = (List *)
840  map_variable_attnos((Node *) clause,
841  firstVarno, 0,
842  part_attmap,
843  RelationGetForm(partrel)->reltype,
844  &found_whole_row);
845  /* We ignore the value of found_whole_row. */
846  leaf_part_rri->ri_onConflict->oc_WhereClause =
847  ExecInitQual((List *) clause, &mtstate->ps);
848  }
849  }
850  }
851  }
852 
853  /*
854  * Since we've just initialized this ResultRelInfo, it's not in any list
855  * attached to the estate as yet. Add it, so that it can be found later.
856  *
857  * Note that the entries in this list appear in no predetermined order,
858  * because partition result rels are initialized as and when they're
859  * needed.
860  */
864  leaf_part_rri);
865 
866  MemoryContextSwitchTo(oldcxt);
867 
868  return leaf_part_rri;
869 }
870 
871 /*
872  * ExecInitRoutingInfo
873  * Set up information needed for translating tuples between root
874  * partitioned table format and partition format, and keep track of it
875  * in PartitionTupleRouting.
876  */
877 static void
879  EState *estate,
880  PartitionTupleRouting *proute,
881  PartitionDispatch dispatch,
882  ResultRelInfo *partRelInfo,
883  int partidx)
884 {
885  MemoryContext oldcxt;
886  PartitionRoutingInfo *partrouteinfo;
887  int rri_index;
888 
889  oldcxt = MemoryContextSwitchTo(proute->memcxt);
890 
891  partrouteinfo = palloc(sizeof(PartitionRoutingInfo));
892 
893  /*
894  * Set up a tuple conversion map to convert a tuple routed to the
895  * partition from the parent's type to the partition's.
896  */
897  partrouteinfo->pi_RootToPartitionMap =
899  RelationGetDescr(partRelInfo->ri_RelationDesc));
900 
901  /*
902  * If a partition has a different rowtype than the root parent, initialize
903  * a slot dedicated to storing this partition's tuples. The slot is used
904  * for various operations that are applied to tuples after routing, such
905  * as checking constraints.
906  */
907  if (partrouteinfo->pi_RootToPartitionMap != NULL)
908  {
909  Relation partrel = partRelInfo->ri_RelationDesc;
910 
911  /*
912  * Initialize the slot itself setting its descriptor to this
913  * partition's TupleDesc; TupleDesc reference will be released at the
914  * end of the command.
915  */
916  partrouteinfo->pi_PartitionTupleSlot =
917  table_slot_create(partrel, &estate->es_tupleTable);
918  }
919  else
920  partrouteinfo->pi_PartitionTupleSlot = NULL;
921 
922  /*
923  * Also, if transition capture is required, store a map to convert tuples
924  * from partition's rowtype to the root partition table's.
925  */
926  if (mtstate &&
927  (mtstate->mt_transition_capture || mtstate->mt_oc_transition_capture))
928  {
929  partrouteinfo->pi_PartitionToRootMap =
931  RelationGetDescr(partRelInfo->ri_PartitionRoot));
932  }
933  else
934  partrouteinfo->pi_PartitionToRootMap = NULL;
935 
936  /*
937  * If the partition is a foreign table, let the FDW init itself for
938  * routing tuples to the partition.
939  */
940  if (partRelInfo->ri_FdwRoutine != NULL &&
941  partRelInfo->ri_FdwRoutine->BeginForeignInsert != NULL)
942  partRelInfo->ri_FdwRoutine->BeginForeignInsert(mtstate, partRelInfo);
943 
944  partRelInfo->ri_PartitionInfo = partrouteinfo;
945  partRelInfo->ri_CopyMultiInsertBuffer = NULL;
946 
947  /*
948  * Keep track of it in the PartitionTupleRouting->partitions array.
949  */
950  Assert(dispatch->indexes[partidx] == -1);
951 
952  rri_index = proute->num_partitions++;
953 
954  /* Allocate or enlarge the array, as needed */
955  if (proute->num_partitions >= proute->max_partitions)
956  {
957  if (proute->max_partitions == 0)
958  {
959  proute->max_partitions = 8;
960  proute->partitions = (ResultRelInfo **)
961  palloc(sizeof(ResultRelInfo *) * proute->max_partitions);
962  }
963  else
964  {
965  proute->max_partitions *= 2;
966  proute->partitions = (ResultRelInfo **)
967  repalloc(proute->partitions, sizeof(ResultRelInfo *) *
968  proute->max_partitions);
969  }
970  }
971 
972  proute->partitions[rri_index] = partRelInfo;
973  dispatch->indexes[partidx] = rri_index;
974 
975  MemoryContextSwitchTo(oldcxt);
976 }
977 
978 /*
979  * ExecInitPartitionDispatchInfo
980  * Lock the partitioned table (if not locked already) and initialize
981  * PartitionDispatch for a partitioned table and store it in the next
982  * available slot in the proute->partition_dispatch_info array. Also,
983  * record the index into this array in the parent_pd->indexes[] array in
984  * the partidx element so that we can properly retrieve the newly created
985  * PartitionDispatch later.
986  */
987 static PartitionDispatch
989  PartitionTupleRouting *proute, Oid partoid,
990  PartitionDispatch parent_pd, int partidx)
991 {
992  Relation rel;
993  PartitionDesc partdesc;
995  int dispatchidx;
996  MemoryContext oldcxt;
997 
998  if (estate->es_partition_directory == NULL)
999  estate->es_partition_directory =
1001 
1002  oldcxt = MemoryContextSwitchTo(proute->memcxt);
1003 
1004  /*
1005  * Only sub-partitioned tables need to be locked here. The root
1006  * partitioned table will already have been locked as it's referenced in
1007  * the query's rtable.
1008  */
1009  if (partoid != RelationGetRelid(proute->partition_root))
1010  rel = table_open(partoid, RowExclusiveLock);
1011  else
1012  rel = proute->partition_root;
1013  partdesc = PartitionDirectoryLookup(estate->es_partition_directory, rel);
1014 
1016  partdesc->nparts * sizeof(int));
1017  pd->reldesc = rel;
1018  pd->key = RelationGetPartitionKey(rel);
1019  pd->keystate = NIL;
1020  pd->partdesc = partdesc;
1021  if (parent_pd != NULL)
1022  {
1023  TupleDesc tupdesc = RelationGetDescr(rel);
1024 
1025  /*
1026  * For sub-partitioned tables where the column order differs from its
1027  * direct parent partitioned table, we must store a tuple table slot
1028  * initialized with its tuple descriptor and a tuple conversion map to
1029  * convert a tuple from its parent's rowtype to its own. This is to
1030  * make sure that we are looking at the correct row using the correct
1031  * tuple descriptor when computing its partition key for tuple
1032  * routing.
1033  */
1035  tupdesc);
1036  pd->tupslot = pd->tupmap ?
1037  MakeSingleTupleTableSlot(tupdesc, &TTSOpsVirtual) : NULL;
1038  }
1039  else
1040  {
1041  /* Not required for the root partitioned table */
1042  pd->tupmap = NULL;
1043  pd->tupslot = NULL;
1044  }
1045 
1046  /*
1047  * Initialize with -1 to signify that the corresponding partition's
1048  * ResultRelInfo or PartitionDispatch has not been created yet.
1049  */
1050  memset(pd->indexes, -1, sizeof(int) * partdesc->nparts);
1051 
1052  /* Track in PartitionTupleRouting for later use */
1053  dispatchidx = proute->num_dispatch++;
1054 
1055  /* Allocate or enlarge the array, as needed */
1056  if (proute->num_dispatch >= proute->max_dispatch)
1057  {
1058  if (proute->max_dispatch == 0)
1059  {
1060  proute->max_dispatch = 4;
1062  palloc(sizeof(PartitionDispatch) * proute->max_dispatch);
1063  }
1064  else
1065  {
1066  proute->max_dispatch *= 2;
1069  sizeof(PartitionDispatch) * proute->max_dispatch);
1070  }
1071  }
1072  proute->partition_dispatch_info[dispatchidx] = pd;
1073 
1074  /*
1075  * Finally, if setting up a PartitionDispatch for a sub-partitioned table,
1076  * install a downlink in the parent to allow quick descent.
1077  */
1078  if (parent_pd)
1079  {
1080  Assert(parent_pd->indexes[partidx] == -1);
1081  parent_pd->indexes[partidx] = dispatchidx;
1082  }
1083 
1084  MemoryContextSwitchTo(oldcxt);
1085 
1086  return pd;
1087 }
1088 
1089 /*
1090  * ExecCleanupTupleRouting -- Clean up objects allocated for partition tuple
1091  * routing.
1092  *
1093  * Close all the partitioned tables, leaf partitions, and their indices.
1094  */
1095 void
1097  PartitionTupleRouting *proute)
1098 {
1099  HTAB *htab = proute->subplan_resultrel_htab;
1100  int i;
1101 
1102  /*
1103  * Remember, proute->partition_dispatch_info[0] corresponds to the root
1104  * partitioned table, which we must not try to close, because it is the
1105  * main target table of the query that will be closed by callers such as
1106  * ExecEndPlan() or DoCopy(). Also, tupslot is NULL for the root
1107  * partitioned table.
1108  */
1109  for (i = 1; i < proute->num_dispatch; i++)
1110  {
1112 
1113  table_close(pd->reldesc, NoLock);
1114 
1115  if (pd->tupslot)
1117  }
1118 
1119  for (i = 0; i < proute->num_partitions; i++)
1120  {
1121  ResultRelInfo *resultRelInfo = proute->partitions[i];
1122 
1123  /* Allow any FDWs to shut down */
1124  if (resultRelInfo->ri_FdwRoutine != NULL &&
1125  resultRelInfo->ri_FdwRoutine->EndForeignInsert != NULL)
1126  resultRelInfo->ri_FdwRoutine->EndForeignInsert(mtstate->ps.state,
1127  resultRelInfo);
1128 
1129  /*
1130  * Check if this result rel is one belonging to the node's subplans,
1131  * if so, let ExecEndPlan() clean it up.
1132  */
1133  if (htab)
1134  {
1135  Oid partoid;
1136  bool found;
1137 
1138  partoid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1139 
1140  (void) hash_search(htab, &partoid, HASH_FIND, &found);
1141  if (found)
1142  continue;
1143  }
1144 
1145  ExecCloseIndices(resultRelInfo);
1146  table_close(resultRelInfo->ri_RelationDesc, NoLock);
1147  }
1148 }
1149 
1150 /* ----------------
1151  * FormPartitionKeyDatum
1152  * Construct values[] and isnull[] arrays for the partition key
1153  * of a tuple.
1154  *
1155  * pd Partition dispatch object of the partitioned table
1156  * slot Heap tuple from which to extract partition key
1157  * estate executor state for evaluating any partition key
1158  * expressions (must be non-NULL)
1159  * values Array of partition key Datums (output area)
1160  * isnull Array of is-null indicators (output area)
1161  *
1162  * the ecxt_scantuple slot of estate's per-tuple expr context must point to
1163  * the heap tuple passed in.
1164  * ----------------
1165  */
1166 static void
1168  TupleTableSlot *slot,
1169  EState *estate,
1170  Datum *values,
1171  bool *isnull)
1172 {
1173  ListCell *partexpr_item;
1174  int i;
1175 
1176  if (pd->key->partexprs != NIL && pd->keystate == NIL)
1177  {
1178  /* Check caller has set up context correctly */
1179  Assert(estate != NULL &&
1180  GetPerTupleExprContext(estate)->ecxt_scantuple == slot);
1181 
1182  /* First time through, set up expression evaluation state */
1183  pd->keystate = ExecPrepareExprList(pd->key->partexprs, estate);
1184  }
1185 
1186  partexpr_item = list_head(pd->keystate);
1187  for (i = 0; i < pd->key->partnatts; i++)
1188  {
1189  AttrNumber keycol = pd->key->partattrs[i];
1190  Datum datum;
1191  bool isNull;
1192 
1193  if (keycol != 0)
1194  {
1195  /* Plain column; get the value directly from the heap tuple */
1196  datum = slot_getattr(slot, keycol, &isNull);
1197  }
1198  else
1199  {
1200  /* Expression; need to evaluate it */
1201  if (partexpr_item == NULL)
1202  elog(ERROR, "wrong number of partition key expressions");
1203  datum = ExecEvalExprSwitchContext((ExprState *) lfirst(partexpr_item),
1204  GetPerTupleExprContext(estate),
1205  &isNull);
1206  partexpr_item = lnext(pd->keystate, partexpr_item);
1207  }
1208  values[i] = datum;
1209  isnull[i] = isNull;
1210  }
1211 
1212  if (partexpr_item != NULL)
1213  elog(ERROR, "wrong number of partition key expressions");
1214 }
1215 
1216 /*
1217  * get_partition_for_tuple
1218  * Finds partition of relation which accepts the partition key specified
1219  * in values and isnull
1220  *
1221  * Return value is index of the partition (>= 0 and < partdesc->nparts) if one
1222  * found or -1 if none found.
1223  */
1224 static int
1226 {
1227  int bound_offset;
1228  int part_index = -1;
1229  PartitionKey key = pd->key;
1230  PartitionDesc partdesc = pd->partdesc;
1231  PartitionBoundInfo boundinfo = partdesc->boundinfo;
1232 
1233  /* Route as appropriate based on partitioning strategy. */
1234  switch (key->strategy)
1235  {
1237  {
1238  int greatest_modulus;
1239  uint64 rowHash;
1240 
1241  greatest_modulus = get_hash_partition_greatest_modulus(boundinfo);
1242  rowHash = compute_partition_hash_value(key->partnatts,
1243  key->partsupfunc,
1244  key->partcollation,
1245  values, isnull);
1246 
1247  part_index = boundinfo->indexes[rowHash % greatest_modulus];
1248  }
1249  break;
1250 
1252  if (isnull[0])
1253  {
1254  if (partition_bound_accepts_nulls(boundinfo))
1255  part_index = boundinfo->null_index;
1256  }
1257  else
1258  {
1259  bool equal = false;
1260 
1261  bound_offset = partition_list_bsearch(key->partsupfunc,
1262  key->partcollation,
1263  boundinfo,
1264  values[0], &equal);
1265  if (bound_offset >= 0 && equal)
1266  part_index = boundinfo->indexes[bound_offset];
1267  }
1268  break;
1269 
1271  {
1272  bool equal = false,
1273  range_partkey_has_null = false;
1274  int i;
1275 
1276  /*
1277  * No range includes NULL, so this will be accepted by the
1278  * default partition if there is one, and otherwise rejected.
1279  */
1280  for (i = 0; i < key->partnatts; i++)
1281  {
1282  if (isnull[i])
1283  {
1284  range_partkey_has_null = true;
1285  break;
1286  }
1287  }
1288 
1289  if (!range_partkey_has_null)
1290  {
1291  bound_offset = partition_range_datum_bsearch(key->partsupfunc,
1292  key->partcollation,
1293  boundinfo,
1294  key->partnatts,
1295  values,
1296  &equal);
1297 
1298  /*
1299  * The bound at bound_offset is less than or equal to the
1300  * tuple value, so the bound at offset+1 is the upper
1301  * bound of the partition we're looking for, if there
1302  * actually exists one.
1303  */
1304  part_index = boundinfo->indexes[bound_offset + 1];
1305  }
1306  }
1307  break;
1308 
1309  default:
1310  elog(ERROR, "unexpected partition strategy: %d",
1311  (int) key->strategy);
1312  }
1313 
1314  /*
1315  * part_index < 0 means we failed to find a partition of this parent. Use
1316  * the default partition, if there is one.
1317  */
1318  if (part_index < 0)
1319  part_index = boundinfo->default_index;
1320 
1321  return part_index;
1322 }
1323 
1324 /*
1325  * ExecBuildSlotPartitionKeyDescription
1326  *
1327  * This works very much like BuildIndexValueDescription() and is currently
1328  * used for building error messages when ExecFindPartition() fails to find
1329  * partition for a row.
1330  */
1331 static char *
1333  Datum *values,
1334  bool *isnull,
1335  int maxfieldlen)
1336 {
1339  int partnatts = get_partition_natts(key);
1340  int i;
1341  Oid relid = RelationGetRelid(rel);
1342  AclResult aclresult;
1343 
1344  if (check_enable_rls(relid, InvalidOid, true) == RLS_ENABLED)
1345  return NULL;
1346 
1347  /* If the user has table-level access, just go build the description. */
1348  aclresult = pg_class_aclcheck(relid, GetUserId(), ACL_SELECT);
1349  if (aclresult != ACLCHECK_OK)
1350  {
1351  /*
1352  * Step through the columns of the partition key and make sure the
1353  * user has SELECT rights on all of them.
1354  */
1355  for (i = 0; i < partnatts; i++)
1356  {
1358 
1359  /*
1360  * If this partition key column is an expression, we return no
1361  * detail rather than try to figure out what column(s) the
1362  * expression includes and if the user has SELECT rights on them.
1363  */
1364  if (attnum == InvalidAttrNumber ||
1365  pg_attribute_aclcheck(relid, attnum, GetUserId(),
1366  ACL_SELECT) != ACLCHECK_OK)
1367  return NULL;
1368  }
1369  }
1370 
1371  initStringInfo(&buf);
1372  appendStringInfo(&buf, "(%s) = (",
1373  pg_get_partkeydef_columns(relid, true));
1374 
1375  for (i = 0; i < partnatts; i++)
1376  {
1377  char *val;
1378  int vallen;
1379 
1380  if (isnull[i])
1381  val = "null";
1382  else
1383  {
1384  Oid foutoid;
1385  bool typisvarlena;
1386 
1388  &foutoid, &typisvarlena);
1389  val = OidOutputFunctionCall(foutoid, values[i]);
1390  }
1391 
1392  if (i > 0)
1393  appendStringInfoString(&buf, ", ");
1394 
1395  /* truncate if needed */
1396  vallen = strlen(val);
1397  if (vallen <= maxfieldlen)
1398  appendBinaryStringInfo(&buf, val, vallen);
1399  else
1400  {
1401  vallen = pg_mbcliplen(val, vallen, maxfieldlen);
1402  appendBinaryStringInfo(&buf, val, vallen);
1403  appendStringInfoString(&buf, "...");
1404  }
1405  }
1406 
1407  appendStringInfoChar(&buf, ')');
1408 
1409  return buf.data;
1410 }
1411 
1412 /*
1413  * adjust_partition_tlist
1414  * Adjust the targetlist entries for a given partition to account for
1415  * attribute differences between parent and the partition
1416  *
1417  * The expressions have already been fixed, but here we fix the list to make
1418  * target resnos match the partition's attribute numbers. This results in a
1419  * copy of the original target list in which the entries appear in resno
1420  * order, including both the existing entries (that may have their resno
1421  * changed in-place) and the newly added entries for columns that don't exist
1422  * in the parent.
1423  *
1424  * Scribbles on the input tlist, so callers must make sure to make a copy
1425  * before passing it to us.
1426  */
1427 static List *
1429 {
1430  List *new_tlist = NIL;
1431  TupleDesc tupdesc = map->outdesc;
1432  AttrMap *attrMap = map->attrMap;
1433  AttrNumber attrno;
1434 
1435  Assert(tupdesc->natts == attrMap->maplen);
1436  for (attrno = 1; attrno <= tupdesc->natts; attrno++)
1437  {
1438  Form_pg_attribute att_tup = TupleDescAttr(tupdesc, attrno - 1);
1439  TargetEntry *tle;
1440 
1441  if (attrMap->attnums[attrno - 1] != InvalidAttrNumber)
1442  {
1443  Assert(!att_tup->attisdropped);
1444 
1445  /*
1446  * Use the corresponding entry from the parent's tlist, adjusting
1447  * the resno the match the partition's attno.
1448  */
1449  tle = (TargetEntry *) list_nth(tlist, attrMap->attnums[attrno - 1] - 1);
1450  tle->resno = attrno;
1451  }
1452  else
1453  {
1454  Const *expr;
1455 
1456  /*
1457  * For a dropped attribute in the partition, generate a dummy
1458  * entry with resno matching the partition's attno.
1459  */
1460  Assert(att_tup->attisdropped);
1461  expr = makeConst(INT4OID,
1462  -1,
1463  InvalidOid,
1464  sizeof(int32),
1465  (Datum) 0,
1466  true, /* isnull */
1467  true /* byval */ );
1468  tle = makeTargetEntry((Expr *) expr,
1469  attrno,
1470  pstrdup(NameStr(att_tup->attname)),
1471  false);
1472  }
1473 
1474  new_tlist = lappend(new_tlist, tle);
1475  }
1476 
1477  return new_tlist;
1478 }
1479 
1480 /*-------------------------------------------------------------------------
1481  * Run-Time Partition Pruning Support.
1482  *
1483  * The following series of functions exist to support the removal of unneeded
1484  * subplans for queries against partitioned tables. The supporting functions
1485  * here are designed to work with any plan type which supports an arbitrary
1486  * number of subplans, e.g. Append, MergeAppend.
1487  *
1488  * When pruning involves comparison of a partition key to a constant, it's
1489  * done by the planner. However, if we have a comparison to a non-constant
1490  * but not volatile expression, that presents an opportunity for run-time
1491  * pruning by the executor, allowing irrelevant partitions to be skipped
1492  * dynamically.
1493  *
1494  * We must distinguish expressions containing PARAM_EXEC Params from
1495  * expressions that don't contain those. Even though a PARAM_EXEC Param is
1496  * considered to be a stable expression, it can change value from one plan
1497  * node scan to the next during query execution. Stable comparison
1498  * expressions that don't involve such Params allow partition pruning to be
1499  * done once during executor startup. Expressions that do involve such Params
1500  * require us to prune separately for each scan of the parent plan node.
1501  *
1502  * Note that pruning away unneeded subplans during executor startup has the
1503  * added benefit of not having to initialize the unneeded subplans at all.
1504  *
1505  *
1506  * Functions:
1507  *
1508  * ExecCreatePartitionPruneState:
1509  * Creates the PartitionPruneState required by each of the two pruning
1510  * functions. Details stored include how to map the partition index
1511  * returned by the partition pruning code into subplan indexes.
1512  *
1513  * ExecFindInitialMatchingSubPlans:
1514  * Returns indexes of matching subplans. Partition pruning is attempted
1515  * without any evaluation of expressions containing PARAM_EXEC Params.
1516  * This function must be called during executor startup for the parent
1517  * plan before the subplans themselves are initialized. Subplans which
1518  * are found not to match by this function must be removed from the
1519  * plan's list of subplans during execution, as this function performs a
1520  * remap of the partition index to subplan index map and the newly
1521  * created map provides indexes only for subplans which remain after
1522  * calling this function.
1523  *
1524  * ExecFindMatchingSubPlans:
1525  * Returns indexes of matching subplans after evaluating all available
1526  * expressions. This function can only be called during execution and
1527  * must be called again each time the value of a Param listed in
1528  * PartitionPruneState's 'execparamids' changes.
1529  *-------------------------------------------------------------------------
1530  */
1531 
1532 /*
1533  * ExecCreatePartitionPruneState
1534  * Build the data structure required for calling
1535  * ExecFindInitialMatchingSubPlans and ExecFindMatchingSubPlans.
1536  *
1537  * 'planstate' is the parent plan node's execution state.
1538  *
1539  * 'partitionpruneinfo' is a PartitionPruneInfo as generated by
1540  * make_partition_pruneinfo. Here we build a PartitionPruneState containing a
1541  * PartitionPruningData for each partitioning hierarchy (i.e., each sublist of
1542  * partitionpruneinfo->prune_infos), each of which contains a
1543  * PartitionedRelPruningData for each PartitionedRelPruneInfo appearing in
1544  * that sublist. This two-level system is needed to keep from confusing the
1545  * different hierarchies when a UNION ALL contains multiple partitioned tables
1546  * as children. The data stored in each PartitionedRelPruningData can be
1547  * re-used each time we re-evaluate which partitions match the pruning steps
1548  * provided in each PartitionedRelPruneInfo.
1549  */
1552  PartitionPruneInfo *partitionpruneinfo)
1553 {
1554  EState *estate = planstate->state;
1555  PartitionPruneState *prunestate;
1556  int n_part_hierarchies;
1557  ListCell *lc;
1558  int i;
1559 
1560  if (estate->es_partition_directory == NULL)
1561  estate->es_partition_directory =
1563 
1564  n_part_hierarchies = list_length(partitionpruneinfo->prune_infos);
1565  Assert(n_part_hierarchies > 0);
1566 
1567  /*
1568  * Allocate the data structure
1569  */
1570  prunestate = (PartitionPruneState *)
1571  palloc(offsetof(PartitionPruneState, partprunedata) +
1572  sizeof(PartitionPruningData *) * n_part_hierarchies);
1573 
1574  prunestate->execparamids = NULL;
1575  /* other_subplans can change at runtime, so we need our own copy */
1576  prunestate->other_subplans = bms_copy(partitionpruneinfo->other_subplans);
1577  prunestate->do_initial_prune = false; /* may be set below */
1578  prunestate->do_exec_prune = false; /* may be set below */
1579  prunestate->num_partprunedata = n_part_hierarchies;
1580 
1581  /*
1582  * Create a short-term memory context which we'll use when making calls to
1583  * the partition pruning functions. This avoids possible memory leaks,
1584  * since the pruning functions call comparison functions that aren't under
1585  * our control.
1586  */
1587  prunestate->prune_context =
1589  "Partition Prune",
1591 
1592  i = 0;
1593  foreach(lc, partitionpruneinfo->prune_infos)
1594  {
1595  List *partrelpruneinfos = lfirst_node(List, lc);
1596  int npartrelpruneinfos = list_length(partrelpruneinfos);
1597  PartitionPruningData *prunedata;
1598  ListCell *lc2;
1599  int j;
1600 
1601  prunedata = (PartitionPruningData *)
1602  palloc(offsetof(PartitionPruningData, partrelprunedata) +
1603  npartrelpruneinfos * sizeof(PartitionedRelPruningData));
1604  prunestate->partprunedata[i] = prunedata;
1605  prunedata->num_partrelprunedata = npartrelpruneinfos;
1606 
1607  j = 0;
1608  foreach(lc2, partrelpruneinfos)
1609  {
1611  PartitionedRelPruningData *pprune = &prunedata->partrelprunedata[j];
1612  Relation partrel;
1613  PartitionDesc partdesc;
1614  PartitionKey partkey;
1615 
1616  /*
1617  * We can rely on the copies of the partitioned table's partition
1618  * key and partition descriptor appearing in its relcache entry,
1619  * because that entry will be held open and locked for the
1620  * duration of this executor run.
1621  */
1622  partrel = ExecGetRangeTableRelation(estate, pinfo->rtindex);
1623  partkey = RelationGetPartitionKey(partrel);
1625  partrel);
1626 
1627  /*
1628  * Initialize the subplan_map and subpart_map. Since detaching a
1629  * partition requires AccessExclusiveLock, no partitions can have
1630  * disappeared, nor can the bounds for any partition have changed.
1631  * However, new partitions may have been added.
1632  */
1633  Assert(partdesc->nparts >= pinfo->nparts);
1634  pprune->nparts = partdesc->nparts;
1635  pprune->subplan_map = palloc(sizeof(int) * partdesc->nparts);
1636  if (partdesc->nparts == pinfo->nparts)
1637  {
1638  /*
1639  * There are no new partitions, so this is simple. We can
1640  * simply point to the subpart_map from the plan, but we must
1641  * copy the subplan_map since we may change it later.
1642  */
1643  pprune->subpart_map = pinfo->subpart_map;
1644  memcpy(pprune->subplan_map, pinfo->subplan_map,
1645  sizeof(int) * pinfo->nparts);
1646 
1647  /*
1648  * Double-check that the list of unpruned relations has not
1649  * changed. (Pruned partitions are not in relid_map[].)
1650  */
1651 #ifdef USE_ASSERT_CHECKING
1652  for (int k = 0; k < pinfo->nparts; k++)
1653  {
1654  Assert(partdesc->oids[k] == pinfo->relid_map[k] ||
1655  pinfo->subplan_map[k] == -1);
1656  }
1657 #endif
1658  }
1659  else
1660  {
1661  int pd_idx = 0;
1662  int pp_idx;
1663 
1664  /*
1665  * Some new partitions have appeared since plan time, and
1666  * those are reflected in our PartitionDesc but were not
1667  * present in the one used to construct subplan_map and
1668  * subpart_map. So we must construct new and longer arrays
1669  * where the partitions that were originally present map to
1670  * the same place, and any added indexes map to -1, as if the
1671  * new partitions had been pruned.
1672  */
1673  pprune->subpart_map = palloc(sizeof(int) * partdesc->nparts);
1674  for (pp_idx = 0; pp_idx < partdesc->nparts; ++pp_idx)
1675  {
1676  if (pinfo->relid_map[pd_idx] != partdesc->oids[pp_idx])
1677  {
1678  pprune->subplan_map[pp_idx] = -1;
1679  pprune->subpart_map[pp_idx] = -1;
1680  }
1681  else
1682  {
1683  pprune->subplan_map[pp_idx] =
1684  pinfo->subplan_map[pd_idx];
1685  pprune->subpart_map[pp_idx] =
1686  pinfo->subpart_map[pd_idx++];
1687  }
1688  }
1689  Assert(pd_idx == pinfo->nparts);
1690  }
1691 
1692  /* present_parts is also subject to later modification */
1693  pprune->present_parts = bms_copy(pinfo->present_parts);
1694 
1695  /*
1696  * Initialize pruning contexts as needed.
1697  */
1699  if (pinfo->initial_pruning_steps)
1700  {
1702  pinfo->initial_pruning_steps,
1703  partdesc, partkey, planstate);
1704  /* Record whether initial pruning is needed at any level */
1705  prunestate->do_initial_prune = true;
1706  }
1707  pprune->exec_pruning_steps = pinfo->exec_pruning_steps;
1708  if (pinfo->exec_pruning_steps)
1709  {
1711  pinfo->exec_pruning_steps,
1712  partdesc, partkey, planstate);
1713  /* Record whether exec pruning is needed at any level */
1714  prunestate->do_exec_prune = true;
1715  }
1716 
1717  /*
1718  * Accumulate the IDs of all PARAM_EXEC Params affecting the
1719  * partitioning decisions at this plan node.
1720  */
1721  prunestate->execparamids = bms_add_members(prunestate->execparamids,
1722  pinfo->execparamids);
1723 
1724  j++;
1725  }
1726  i++;
1727  }
1728 
1729  return prunestate;
1730 }
1731 
1732 /*
1733  * Initialize a PartitionPruneContext for the given list of pruning steps.
1734  */
1735 static void
1737  List *pruning_steps,
1738  PartitionDesc partdesc,
1739  PartitionKey partkey,
1740  PlanState *planstate)
1741 {
1742  int n_steps;
1743  int partnatts;
1744  ListCell *lc;
1745 
1746  n_steps = list_length(pruning_steps);
1747 
1748  context->strategy = partkey->strategy;
1749  context->partnatts = partnatts = partkey->partnatts;
1750  context->nparts = partdesc->nparts;
1751  context->boundinfo = partdesc->boundinfo;
1752  context->partcollation = partkey->partcollation;
1753  context->partsupfunc = partkey->partsupfunc;
1754 
1755  /* We'll look up type-specific support functions as needed */
1756  context->stepcmpfuncs = (FmgrInfo *)
1757  palloc0(sizeof(FmgrInfo) * n_steps * partnatts);
1758 
1759  context->ppccontext = CurrentMemoryContext;
1760  context->planstate = planstate;
1761 
1762  /* Initialize expression state for each expression we need */
1763  context->exprstates = (ExprState **)
1764  palloc0(sizeof(ExprState *) * n_steps * partnatts);
1765  foreach(lc, pruning_steps)
1766  {
1768  ListCell *lc2;
1769  int keyno;
1770 
1771  /* not needed for other step kinds */
1772  if (!IsA(step, PartitionPruneStepOp))
1773  continue;
1774 
1775  Assert(list_length(step->exprs) <= partnatts);
1776 
1777  keyno = 0;
1778  foreach(lc2, step->exprs)
1779  {
1780  Expr *expr = (Expr *) lfirst(lc2);
1781 
1782  /* not needed for Consts */
1783  if (!IsA(expr, Const))
1784  {
1785  int stateidx = PruneCxtStateIdx(partnatts,
1786  step->step.step_id,
1787  keyno);
1788 
1789  context->exprstates[stateidx] =
1790  ExecInitExpr(expr, context->planstate);
1791  }
1792  keyno++;
1793  }
1794  }
1795 }
1796 
1797 /*
1798  * ExecFindInitialMatchingSubPlans
1799  * Identify the set of subplans that cannot be eliminated by initial
1800  * pruning, disregarding any pruning constraints involving PARAM_EXEC
1801  * Params.
1802  *
1803  * If additional pruning passes will be required (because of PARAM_EXEC
1804  * Params), we must also update the translation data that allows conversion
1805  * of partition indexes into subplan indexes to account for the unneeded
1806  * subplans having been removed.
1807  *
1808  * Must only be called once per 'prunestate', and only if initial pruning
1809  * is required.
1810  *
1811  * 'nsubplans' must be passed as the total number of unpruned subplans.
1812  */
1813 Bitmapset *
1815 {
1816  Bitmapset *result = NULL;
1817  MemoryContext oldcontext;
1818  int i;
1819 
1820  /* Caller error if we get here without do_initial_prune */
1821  Assert(prunestate->do_initial_prune);
1822 
1823  /*
1824  * Switch to a temp context to avoid leaking memory in the executor's
1825  * query-lifespan memory context.
1826  */
1827  oldcontext = MemoryContextSwitchTo(prunestate->prune_context);
1828 
1829  /*
1830  * For each hierarchy, do the pruning tests, and add nondeletable
1831  * subplans' indexes to "result".
1832  */
1833  for (i = 0; i < prunestate->num_partprunedata; i++)
1834  {
1835  PartitionPruningData *prunedata;
1836  PartitionedRelPruningData *pprune;
1837 
1838  prunedata = prunestate->partprunedata[i];
1839  pprune = &prunedata->partrelprunedata[0];
1840 
1841  /* Perform pruning without using PARAM_EXEC Params */
1842  find_matching_subplans_recurse(prunedata, pprune, true, &result);
1843 
1844  /* Expression eval may have used space in node's ps_ExprContext too */
1845  if (pprune->initial_pruning_steps)
1847  }
1848 
1849  /* Add in any subplans that partition pruning didn't account for */
1850  result = bms_add_members(result, prunestate->other_subplans);
1851 
1852  MemoryContextSwitchTo(oldcontext);
1853 
1854  /* Copy result out of the temp context before we reset it */
1855  result = bms_copy(result);
1856 
1857  MemoryContextReset(prunestate->prune_context);
1858 
1859  /*
1860  * If exec-time pruning is required and we pruned subplans above, then we
1861  * must re-sequence the subplan indexes so that ExecFindMatchingSubPlans
1862  * properly returns the indexes from the subplans which will remain after
1863  * execution of this function.
1864  *
1865  * We can safely skip this when !do_exec_prune, even though that leaves
1866  * invalid data in prunestate, because that data won't be consulted again
1867  * (cf initial Assert in ExecFindMatchingSubPlans).
1868  */
1869  if (prunestate->do_exec_prune && bms_num_members(result) < nsubplans)
1870  {
1871  int *new_subplan_indexes;
1872  Bitmapset *new_other_subplans;
1873  int i;
1874  int newidx;
1875 
1876  /*
1877  * First we must build a temporary array which maps old subplan
1878  * indexes to new ones. For convenience of initialization, we use
1879  * 1-based indexes in this array and leave pruned items as 0.
1880  */
1881  new_subplan_indexes = (int *) palloc0(sizeof(int) * nsubplans);
1882  newidx = 1;
1883  i = -1;
1884  while ((i = bms_next_member(result, i)) >= 0)
1885  {
1886  Assert(i < nsubplans);
1887  new_subplan_indexes[i] = newidx++;
1888  }
1889 
1890  /*
1891  * Now we can update each PartitionedRelPruneInfo's subplan_map with
1892  * new subplan indexes. We must also recompute its present_parts
1893  * bitmap.
1894  */
1895  for (i = 0; i < prunestate->num_partprunedata; i++)
1896  {
1897  PartitionPruningData *prunedata = prunestate->partprunedata[i];
1898  int j;
1899 
1900  /*
1901  * Within each hierarchy, we perform this loop in back-to-front
1902  * order so that we determine present_parts for the lowest-level
1903  * partitioned tables first. This way we can tell whether a
1904  * sub-partitioned table's partitions were entirely pruned so we
1905  * can exclude it from the current level's present_parts.
1906  */
1907  for (j = prunedata->num_partrelprunedata - 1; j >= 0; j--)
1908  {
1909  PartitionedRelPruningData *pprune = &prunedata->partrelprunedata[j];
1910  int nparts = pprune->nparts;
1911  int k;
1912 
1913  /* We just rebuild present_parts from scratch */
1914  bms_free(pprune->present_parts);
1915  pprune->present_parts = NULL;
1916 
1917  for (k = 0; k < nparts; k++)
1918  {
1919  int oldidx = pprune->subplan_map[k];
1920  int subidx;
1921 
1922  /*
1923  * If this partition existed as a subplan then change the
1924  * old subplan index to the new subplan index. The new
1925  * index may become -1 if the partition was pruned above,
1926  * or it may just come earlier in the subplan list due to
1927  * some subplans being removed earlier in the list. If
1928  * it's a subpartition, add it to present_parts unless
1929  * it's entirely pruned.
1930  */
1931  if (oldidx >= 0)
1932  {
1933  Assert(oldidx < nsubplans);
1934  pprune->subplan_map[k] = new_subplan_indexes[oldidx] - 1;
1935 
1936  if (new_subplan_indexes[oldidx] > 0)
1937  pprune->present_parts =
1938  bms_add_member(pprune->present_parts, k);
1939  }
1940  else if ((subidx = pprune->subpart_map[k]) >= 0)
1941  {
1942  PartitionedRelPruningData *subprune;
1943 
1944  subprune = &prunedata->partrelprunedata[subidx];
1945 
1946  if (!bms_is_empty(subprune->present_parts))
1947  pprune->present_parts =
1948  bms_add_member(pprune->present_parts, k);
1949  }
1950  }
1951  }
1952  }
1953 
1954  /*
1955  * We must also recompute the other_subplans set, since indexes in it
1956  * may change.
1957  */
1958  new_other_subplans = NULL;
1959  i = -1;
1960  while ((i = bms_next_member(prunestate->other_subplans, i)) >= 0)
1961  new_other_subplans = bms_add_member(new_other_subplans,
1962  new_subplan_indexes[i] - 1);
1963 
1964  bms_free(prunestate->other_subplans);
1965  prunestate->other_subplans = new_other_subplans;
1966 
1967  pfree(new_subplan_indexes);
1968  }
1969 
1970  return result;
1971 }
1972 
1973 /*
1974  * ExecFindMatchingSubPlans
1975  * Determine which subplans match the pruning steps detailed in
1976  * 'prunestate' for the current comparison expression values.
1977  *
1978  * Here we assume we may evaluate PARAM_EXEC Params.
1979  */
1980 Bitmapset *
1982 {
1983  Bitmapset *result = NULL;
1984  MemoryContext oldcontext;
1985  int i;
1986 
1987  /*
1988  * If !do_exec_prune, we've got problems because
1989  * ExecFindInitialMatchingSubPlans will not have bothered to update
1990  * prunestate for whatever pruning it did.
1991  */
1992  Assert(prunestate->do_exec_prune);
1993 
1994  /*
1995  * Switch to a temp context to avoid leaking memory in the executor's
1996  * query-lifespan memory context.
1997  */
1998  oldcontext = MemoryContextSwitchTo(prunestate->prune_context);
1999 
2000  /*
2001  * For each hierarchy, do the pruning tests, and add nondeletable
2002  * subplans' indexes to "result".
2003  */
2004  for (i = 0; i < prunestate->num_partprunedata; i++)
2005  {
2006  PartitionPruningData *prunedata;
2007  PartitionedRelPruningData *pprune;
2008 
2009  prunedata = prunestate->partprunedata[i];
2010  pprune = &prunedata->partrelprunedata[0];
2011 
2012  find_matching_subplans_recurse(prunedata, pprune, false, &result);
2013 
2014  /* Expression eval may have used space in node's ps_ExprContext too */
2015  if (pprune->exec_pruning_steps)
2017  }
2018 
2019  /* Add in any subplans that partition pruning didn't account for */
2020  result = bms_add_members(result, prunestate->other_subplans);
2021 
2022  MemoryContextSwitchTo(oldcontext);
2023 
2024  /* Copy result out of the temp context before we reset it */
2025  result = bms_copy(result);
2026 
2027  MemoryContextReset(prunestate->prune_context);
2028 
2029  return result;
2030 }
2031 
2032 /*
2033  * find_matching_subplans_recurse
2034  * Recursive worker function for ExecFindMatchingSubPlans and
2035  * ExecFindInitialMatchingSubPlans
2036  *
2037  * Adds valid (non-prunable) subplan IDs to *validsubplans
2038  */
2039 static void
2041  PartitionedRelPruningData *pprune,
2042  bool initial_prune,
2043  Bitmapset **validsubplans)
2044 {
2045  Bitmapset *partset;
2046  int i;
2047 
2048  /* Guard against stack overflow due to overly deep partition hierarchy. */
2050 
2051  /* Only prune if pruning would be useful at this level. */
2052  if (initial_prune && pprune->initial_pruning_steps)
2053  {
2054  partset = get_matching_partitions(&pprune->initial_context,
2055  pprune->initial_pruning_steps);
2056  }
2057  else if (!initial_prune && pprune->exec_pruning_steps)
2058  {
2059  partset = get_matching_partitions(&pprune->exec_context,
2060  pprune->exec_pruning_steps);
2061  }
2062  else
2063  {
2064  /*
2065  * If no pruning is to be done, just include all partitions at this
2066  * level.
2067  */
2068  partset = pprune->present_parts;
2069  }
2070 
2071  /* Translate partset into subplan indexes */
2072  i = -1;
2073  while ((i = bms_next_member(partset, i)) >= 0)
2074  {
2075  if (pprune->subplan_map[i] >= 0)
2076  *validsubplans = bms_add_member(*validsubplans,
2077  pprune->subplan_map[i]);
2078  else
2079  {
2080  int partidx = pprune->subpart_map[i];
2081 
2082  if (partidx >= 0)
2084  &prunedata->partrelprunedata[partidx],
2085  initial_prune, validsubplans);
2086  else
2087  {
2088  /*
2089  * We get here if the planner already pruned all the sub-
2090  * partitions for this partition. Silently ignore this
2091  * partition in this case. The end result is the same: we
2092  * would have pruned all partitions just the same, but we
2093  * don't have any pruning steps to execute to verify this.
2094  */
2095  }
2096  }
2097  }
2098 }
TupleTableSlot * table_slot_create(Relation relation, List **reglist)
Definition: tableam.c:77
#define NIL
Definition: pg_list.h:65
static int get_partition_for_tuple(PartitionDispatch pd, Datum *values, bool *isnull)
Definition: fmgr.h:56
struct TransitionCaptureState * mt_oc_transition_capture
Definition: execnodes.h:1194
struct PartitionDispatchData PartitionDispatchData
FmgrInfo * partsupfunc
Definition: partprune.h:55
void InitResultRelInfo(ResultRelInfo *resultRelInfo, Relation resultRelationDesc, Index resultRelationIndex, Relation partition_root, int instrument_options)
Definition: execMain.c:1277
Relation ri_RelationDesc
Definition: execnodes.h:413
Bitmapset * execparamids
Definition: plannodes.h:1161
#define IsA(nodeptr, _type_)
Definition: nodes.h:580
MemoryContext prune_context
#define AllocSetContextCreate
Definition: memutils.h:170
PartitionDesc partdesc
static Datum ExecEvalExprSwitchContext(ExprState *state, ExprContext *econtext, bool *isNull)
Definition: executor.h:305
char * pg_get_partkeydef_columns(Oid relid, bool pretty)
Definition: ruleutils.c:1661
TupleDesc outdesc
Definition: tupconvert.h:26
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:133
#define likely(x)
Definition: c.h:205
void getTypeOutputInfo(Oid type, Oid *typOutput, bool *typIsVarlena)
Definition: lsyscache.c:2784
TupleTableSlot * ExecInitExtraTupleSlot(EState *estate, TupleDesc tupledesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1801
static void ExecInitPruningContext(PartitionPruneContext *context, List *pruning_steps, PartitionDesc partdesc, PartitionKey partkey, PlanState *planstate)
Bitmapset * bms_copy(const Bitmapset *a)
Definition: bitmapset.c:74
struct SubplanResultRelHashElem SubplanResultRelHashElem
struct CopyMultiInsertBuffer * ri_CopyMultiInsertBuffer
Definition: execnodes.h:493
#define HASH_CONTEXT
Definition: hsearch.h:93
#define HASH_ELEM
Definition: hsearch.h:87
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:425
AclResult pg_attribute_aclcheck(Oid table_oid, AttrNumber attnum, Oid roleid, AclMode mode)
Definition: aclchk.c:4449
static ListCell * lnext(const List *l, const ListCell *c)
Definition: pg_list.h:321
MemoryContext hcxt
Definition: hsearch.h:78
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:3033
#define RelationGetDescr(relation)
Definition: rel.h:482
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1208
Oid GetUserId(void)
Definition: miscinit.c:448
FmgrInfo * stepcmpfuncs
Definition: partprune.h:56
List * withCheckOptionLists
Definition: plannodes.h:230
FmgrInfo * partsupfunc
Definition: partcache.h:35
#define castNode(_type_, nodeptr)
Definition: nodes.h:598
BeginForeignInsert_function BeginForeignInsert
Definition: fdwapi.h:215
ResultRelInfo * resultRelInfo
Definition: execnodes.h:1174
#define TupleDescAttr(tupdesc, i)
Definition: tupdesc.h:92
struct PartitionRoutingInfo * ri_PartitionInfo
Definition: execnodes.h:490
char * pstrdup(const char *in)
Definition: mcxt.c:1186
#define RelationGetForm(relation)
Definition: rel.h:450
Relation ri_PartitionRoot
Definition: execnodes.h:487
ExprContext * ps_ExprContext
Definition: execnodes.h:984
MemoryContext ppccontext
Definition: partprune.h:57
static Oid get_partition_col_typid(PartitionKey key, int col)
Definition: partcache.h:85
PartitionPruningData * partprunedata[FLEXIBLE_ARRAY_MEMBER]
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1043
const TupleTableSlotOps TTSOpsVirtual
Definition: execTuples.c:83
int maplen
Definition: attmap.h:37
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
PartitionDirectory CreatePartitionDirectory(MemoryContext mcxt)
Definition: partdesc.c:284
Size entrysize
Definition: hsearch.h:73
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:276
Definition: nodes.h:529
static int get_partition_natts(PartitionKey key)
Definition: partcache.h:64
int errcode(int sqlerrcode)
Definition: elog.c:610
#define PARTITION_MAX_KEYS
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:136
bool * is_leaf
Definition: partdesc.h:26
List * partexprs
Definition: partcache.h:30
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:908
EState * state
Definition: execnodes.h:947
Node * map_variable_attnos(Node *node, int target_varno, int sublevels_up, const AttrMap *attno_map, Oid to_rowtype, bool *found_whole_row)
PartitionKey RelationGetPartitionKey(Relation rel)
Definition: partcache.c:54
unsigned int Oid
Definition: postgres_ext.h:31
Const * makeConst(Oid consttype, int32 consttypmod, Oid constcollid, int constlen, Datum constvalue, bool constisnull, bool constbyval)
Definition: makefuncs.c:299
List * lappend_oid(List *list, Oid datum)
Definition: list.c:357
#define OidIsValid(objectId)
Definition: c.h:644
List * plans
Definition: plannodes.h:229
ResultRelInfo ** partitions
Definition: execPartition.c:94
ExprState * ExecInitQual(List *qual, PlanState *parent)
Definition: execExpr.c:209
Index ri_RangeTableIndex
Definition: execnodes.h:410
signed int int32
Definition: c.h:355
List * onConflictSet
Definition: plannodes.h:238
Definition: attmap.h:34
PartitionBoundInfo boundinfo
Definition: partdesc.h:29
Index rootRelation
Definition: plannodes.h:224
void ExecOpenIndices(ResultRelInfo *resultRelInfo, bool speculative)
Definition: execIndexing.c:151
static void ExecHashSubPlanResultRelsByOid(ModifyTableState *mtstate, PartitionTupleRouting *proute)
#define GetPerTupleExprContext(estate)
Definition: executor.h:507
Definition: dynahash.c:210
int partition_range_datum_bsearch(FmgrInfo *partsupfunc, Oid *partcollation, PartitionBoundInfo boundinfo, int nvalues, Datum *values, bool *is_equal)
Definition: partbounds.c:3541
TupleTableSlot * ps_ResultTupleSlot
Definition: execnodes.h:983
TupleConversionMap * pi_RootToPartitionMap
Definition: execPartition.h:37
void pfree(void *pointer)
Definition: mcxt.c:1056
MemoryContext es_query_cxt
Definition: execnodes.h:555
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:91
#define linitial(l)
Definition: pg_list.h:195
AttrMap * build_attrmap_by_name_if_req(TupleDesc indesc, TupleDesc outdesc)
Definition: attmap.c:259
#define ERROR
Definition: elog.h:43
static void find_matching_subplans_recurse(PartitionPruningData *prunedata, PartitionedRelPruningData *pprune, bool initial_prune, Bitmapset **validsubplans)
PlanState ps
Definition: execnodes.h:1165
ProjectionInfo * oc_ProjInfo
Definition: execnodes.h:387
void ExecCleanupTupleRouting(ModifyTableState *mtstate, PartitionTupleRouting *proute)
static void * list_nth(const List *list, int n)
Definition: pg_list.h:277
ExprState ** exprstates
Definition: partprune.h:59
int pg_mbcliplen(const char *mbstr, int len, int limit)
Definition: mbutils.c:967
static void FormPartitionKeyDatum(PartitionDispatch pd, TupleTableSlot *slot, EState *estate, Datum *values, bool *isnull)
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:192
void CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation)
Definition: execMain.c:1076
#define lfirst_node(type, lc)
Definition: pg_list.h:193
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:646
void appendStringInfoString(StringInfo str, const char *s)
Definition: stringinfo.c:176
TupleConversionMap * convert_tuples_by_name(TupleDesc indesc, TupleDesc outdesc)
Definition: tupconvert.c:102
int get_hash_partition_greatest_modulus(PartitionBoundInfo bound)
Definition: partbounds.c:3264
Bitmapset * ExecFindInitialMatchingSubPlans(PartitionPruneState *prunestate, int nsubplans)
struct TransitionCaptureState * mt_transition_capture
Definition: execnodes.h:1191
#define NoLock
Definition: lockdefs.h:34
static char * buf
Definition: pg_test_fsync.c:67
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
Definition: execTuples.c:1224
void check_stack_depth(void)
Definition: postgres.c:3312
#define RowExclusiveLock
Definition: lockdefs.h:38
int errdetail(const char *fmt,...)
Definition: elog.c:957
AttrNumber resno
Definition: primnodes.h:1408
#define RelationGetRelationName(relation)
Definition: rel.h:490
static ListCell * list_head(const List *l)
Definition: pg_list.h:125
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:193
struct FdwRoutine * ri_FdwRoutine
Definition: execnodes.h:442
Bitmapset * present_parts
Definition: plannodes.h:1146
PartitionDispatch * partition_dispatch_info
Definition: execPartition.c:91
MemoryContext CurrentMemoryContext
Definition: mcxt.c:38
Bitmapset * execparamids
int es_instrument
Definition: execnodes.h:562
ExprState * oc_WhereClause
Definition: execnodes.h:388
PartitionPruneStep step
Definition: plannodes.h:1206
PartitionTupleRouting * ExecSetupPartitionTupleRouting(EState *estate, ModifyTableState *mtstate, Relation rel)
TargetEntry * makeTargetEntry(Expr *expr, AttrNumber resno, char *resname, bool resjunk)
Definition: makefuncs.c:238
AttrMap * attrMap
Definition: tupconvert.h:27
List * ExecPrepareExprList(List *nodes, EState *estate)
Definition: execExpr.c:566
PartitionPruneContext exec_context
Definition: execPartition.h:84
List * lappend(List *list, void *datum)
Definition: list.c:321
bool bms_is_empty(const Bitmapset *a)
Definition: bitmapset.c:701
void appendStringInfoChar(StringInfo str, char ch)
Definition: stringinfo.c:188
void initStringInfo(StringInfo str)
Definition: stringinfo.c:59
OnConflictSetState * ri_onConflict
Definition: execnodes.h:478
static int16 get_partition_col_attnum(PartitionKey key, int col)
Definition: partcache.h:79
#define HASH_BLOBS
Definition: hsearch.h:88
Oid * partcollation
Definition: partcache.h:38
List * es_tupleTable
Definition: execnodes.h:557
void * palloc0(Size size)
Definition: mcxt.c:980
AclResult
Definition: acl.h:177
static PartitionDispatch ExecInitPartitionDispatchInfo(EState *estate, PartitionTupleRouting *proute, Oid partoid, PartitionDispatch parent_pd, int partidx)
AttrNumber * partattrs
Definition: partcache.h:28
HTAB * hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
Definition: dynahash.c:318
uintptr_t Datum
Definition: postgres.h:367
#define ACL_SELECT
Definition: parsenodes.h:75
TupleTableSlot * tupslot
Size keysize
Definition: hsearch.h:72
TupleConversionMap * pi_PartitionToRootMap
Definition: execPartition.h:43
List * ri_PartitionCheck
Definition: execnodes.h:481
#define PARTITION_STRATEGY_HASH
Definition: parsenodes.h:800
#define partition_bound_accepts_nulls(bi)
Definition: partbounds.h:75
Plan * plan
Definition: execnodes.h:945
int partition_list_bsearch(FmgrInfo *partsupfunc, Oid *partcollation, PartitionBoundInfo boundinfo, Datum value, bool *is_equal)
Definition: partbounds.c:3452
uint64 compute_partition_hash_value(int partnatts, FmgrInfo *partsupfunc, Oid *partcollation, Datum *values, bool *isnull)
Definition: partbounds.c:4615
#define InvalidOid
Definition: postgres_ext.h:36
static Datum slot_getattr(TupleTableSlot *slot, int attnum, bool *isnull)
Definition: tuptable.h:381
List * es_tuple_routing_result_relations
Definition: execnodes.h:543
int16 attnum
Definition: pg_attribute.h:79
#define ereport(elevel,...)
Definition: elog.h:144
TupleTableSlot * oc_ProjSlot
Definition: execnodes.h:386
#define INNER_VAR
Definition: primnodes.h:171
AttrMap * build_attrmap_by_name(TupleDesc indesc, TupleDesc outdesc)
Definition: attmap.c:174
int check_enable_rls(Oid relid, Oid checkAsUser, bool noError)
Definition: rls.c:52
Relation ExecGetRangeTableRelation(EState *estate, Index rti)
Definition: execUtils.c:795
static ResultRelInfo * ExecInitPartitionInfo(ModifyTableState *mtstate, EState *estate, PartitionTupleRouting *proute, PartitionDispatch dispatch, ResultRelInfo *rootResultRelInfo, int partidx)
void bms_free(Bitmapset *a)
Definition: bitmapset.c:208
#define makeNode(_type_)
Definition: nodes.h:577
bool list_member_oid(const List *list, Oid datum)
Definition: list.c:674
TupleTableSlot * execute_attr_map_slot(AttrMap *attrMap, TupleTableSlot *in_slot, TupleTableSlot *out_slot)
Definition: tupconvert.c:177
Bitmapset * other_subplans
Definition: plannodes.h:1122
#define Assert(condition)
Definition: c.h:738
#define lfirst(lc)
Definition: pg_list.h:190
OnConflictAction onConflictAction
Definition: plannodes.h:236
AttrNumber * attnums
Definition: attmap.h:36
static List * adjust_partition_tlist(List *tlist, TupleConversionMap *map)
static int list_length(const List *l)
Definition: pg_list.h:169
TupleDesc ExecTypeFromTL(List *targetList)
Definition: execTuples.c:1908
TupleTableSlot * ecxt_scantuple
Definition: execnodes.h:226
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:736
PartitionDirectory es_partition_directory
Definition: execnodes.h:537
#define PARTITION_STRATEGY_LIST
Definition: parsenodes.h:801
List * RelationGetIndexList(Relation relation)
Definition: relcache.c:4507
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1069
PartitionedRelPruningData partrelprunedata[FLEXIBLE_ARRAY_MEMBER]
Definition: execPartition.h:97
#define InvalidAttrNumber
Definition: attnum.h:23
#define GetPerTupleMemoryContext(estate)
Definition: executor.h:512
AclResult pg_class_aclcheck(Oid table_oid, Oid roleid, AclMode mode)
Definition: aclchk.c:4563
static Datum values[MAXATTR]
Definition: bootstrap.c:167
#define PARTITION_STRATEGY_RANGE
Definition: parsenodes.h:802
static char * ExecBuildSlotPartitionKeyDescription(Relation rel, Datum *values, bool *isnull, int maxfieldlen)
char * OidOutputFunctionCall(Oid functionId, Datum val)
Definition: fmgr.c:1657
void * palloc(Size size)
Definition: mcxt.c:949
PartitionBoundInfo boundinfo
Definition: partprune.h:53
ProjectionInfo * ExecBuildProjectionInfo(List *targetList, ExprContext *econtext, TupleTableSlot *slot, PlanState *parent, TupleDesc inputDesc)
Definition: execExpr.c:353
int errmsg(const char *fmt,...)
Definition: elog.c:824
Bitmapset * get_matching_partitions(PartitionPruneContext *context, List *pruning_steps)
Definition: partprune.c:716
CmdType operation
Definition: plannodes.h:221
void list_free(List *list)
Definition: list.c:1376
#define elog(elevel,...)
Definition: elog.h:214
int i
#define NameStr(name)
Definition: c.h:615
bool ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate, bool emitError)
Definition: execMain.c:1783
List * returningLists
Definition: plannodes.h:231
PartitionDesc PartitionDirectoryLookup(PartitionDirectory pdir, Relation rel)
Definition: partdesc.c:316
ExprState * ExecInitExpr(Expr *node, PlanState *parent)
Definition: execExpr.c:123
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
ResultRelInfo * ExecFindPartition(ModifyTableState *mtstate, ResultRelInfo *rootResultRelInfo, PartitionTupleRouting *proute, TupleTableSlot *slot, EState *estate)
PartitionPruneState * ExecCreatePartitionPruneState(PlanState *planstate, PartitionPruneInfo *partitionpruneinfo)
MemoryContext memcxt
Definition: execPartition.c:98
#define copyObject(obj)
Definition: nodes.h:645
#define PruneCxtStateIdx(partnatts, step_id, keyno)
Definition: partprune.h:68
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:39
struct PartitionDispatchData * PartitionDispatch
Definition: execPartition.h:22
Definition: pg_list.h:50
int errtable(Relation rel)
Definition: relcache.c:5490
List * get_partition_ancestors(Oid relid)
Definition: partition.c:115
PartitionPruneContext initial_context
Definition: execPartition.h:83
int16 AttrNumber
Definition: attnum.h:21
static void ExecInitRoutingInfo(ModifyTableState *mtstate, EState *estate, PartitionTupleRouting *proute, PartitionDispatch dispatch, ResultRelInfo *partRelInfo, int partidx)
#define RelationGetRelid(relation)
Definition: rel.h:456
void appendBinaryStringInfo(StringInfo str, const char *data, int datalen)
Definition: stringinfo.c:227
long val
Definition: informix.c:664
List * ri_onConflictArbiterIndexes
Definition: execnodes.h:475
void ExecCloseIndices(ResultRelInfo *resultRelInfo)
Definition: execIndexing.c:226
#define offsetof(type, field)
Definition: c.h:661
Bitmapset * bms_add_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:793
int indexes[FLEXIBLE_ARRAY_MEMBER]
#define ResetExprContext(econtext)
Definition: executor.h:501
#define lfirst_oid(lc)
Definition: pg_list.h:192
Bitmapset * other_subplans
PlanState * planstate
Definition: partprune.h:58
EndForeignInsert_function EndForeignInsert
Definition: fdwapi.h:216
struct PartitionedRelPruningData PartitionedRelPruningData
Node * onConflictWhere
Definition: plannodes.h:239
TupleTableSlot * pi_PartitionTupleSlot
Definition: execPartition.h:49
Bitmapset * ExecFindMatchingSubPlans(PartitionPruneState *prunestate)