PostgreSQL Source Code  git master
execPartition.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * execPartition.c
4  * Support routines for partitioning.
5  *
6  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  * src/backend/executor/execPartition.c
11  *
12  *-------------------------------------------------------------------------
13  */
14 #include "postgres.h"
15 
16 #include "access/table.h"
17 #include "access/tableam.h"
18 #include "catalog/partition.h"
19 #include "catalog/pg_inherits.h"
20 #include "catalog/pg_type.h"
21 #include "executor/execPartition.h"
22 #include "executor/executor.h"
23 #include "foreign/fdwapi.h"
24 #include "mb/pg_wchar.h"
25 #include "miscadmin.h"
26 #include "nodes/makefuncs.h"
28 #include "partitioning/partdesc.h"
29 #include "partitioning/partprune.h"
30 #include "rewrite/rewriteManip.h"
31 #include "utils/lsyscache.h"
32 #include "utils/partcache.h"
33 #include "utils/rel.h"
34 #include "utils/rls.h"
35 #include "utils/ruleutils.h"
36 
37 
38 /*-----------------------
39  * PartitionTupleRouting - Encapsulates all information required to
40  * route a tuple inserted into a partitioned table to one of its leaf
41  * partitions.
42  *
43  * partition_root
44  * The partitioned table that's the target of the command.
45  *
46  * partition_dispatch_info
47  * Array of 'max_dispatch' elements containing a pointer to a
48  * PartitionDispatch object for every partitioned table touched by tuple
49  * routing. The entry for the target partitioned table is *always*
50  * present in the 0th element of this array. See comment for
51  * PartitionDispatchData->indexes for details on how this array is
52  * indexed.
53  *
54  * num_dispatch
55  * The current number of items stored in the 'partition_dispatch_info'
56  * array. Also serves as the index of the next free array element for
57  * new PartitionDispatch objects that need to be stored.
58  *
59  * max_dispatch
60  * The current allocated size of the 'partition_dispatch_info' array.
61  *
62  * partitions
63  * Array of 'max_partitions' elements containing a pointer to a
64  * ResultRelInfo for every leaf partitions touched by tuple routing.
65  * Some of these are pointers to ResultRelInfos which are borrowed out of
66  * 'subplan_resultrel_htab'. The remainder have been built especially
67  * for tuple routing. See comment for PartitionDispatchData->indexes for
68  * details on how this array is indexed.
69  *
70  * num_partitions
71  * The current number of items stored in the 'partitions' array. Also
72  * serves as the index of the next free array element for new
73  * ResultRelInfo objects that need to be stored.
74  *
75  * max_partitions
76  * The current allocated size of the 'partitions' array.
77  *
78  * subplan_resultrel_htab
79  * Hash table to store subplan ResultRelInfos by Oid. This is used to
80  * cache ResultRelInfos from subplans of an UPDATE ModifyTable node;
81  * NULL in other cases. Some of these may be useful for tuple routing
82  * to save having to build duplicates.
83  *
84  * memcxt
85  * Memory context used to allocate subsidiary structs.
86  *-----------------------
87  */
89 {
99 };
100 
101 /*-----------------------
102  * PartitionDispatch - information about one partitioned table in a partition
103  * hierarchy required to route a tuple to any of its partitions. A
104  * PartitionDispatch is always encapsulated inside a PartitionTupleRouting
105  * struct and stored inside its 'partition_dispatch_info' array.
106  *
107  * reldesc
108  * Relation descriptor of the table
109  *
110  * key
111  * Partition key information of the table
112  *
113  * keystate
114  * Execution state required for expressions in the partition key
115  *
116  * partdesc
117  * Partition descriptor of the table
118  *
119  * tupslot
120  * A standalone TupleTableSlot initialized with this table's tuple
121  * descriptor, or NULL if no tuple conversion between the parent is
122  * required.
123  *
124  * tupmap
125  * TupleConversionMap to convert from the parent's rowtype to this table's
126  * rowtype (when extracting the partition key of a tuple just before
127  * routing it through this table). A NULL value is stored if no tuple
128  * conversion is required.
129  *
130  * indexes
131  * Array of partdesc->nparts elements. For leaf partitions the index
132  * corresponds to the partition's ResultRelInfo in the encapsulating
133  * PartitionTupleRouting's partitions array. For partitioned partitions,
134  * the index corresponds to the PartitionDispatch for it in its
135  * partition_dispatch_info array. -1 indicates we've not yet allocated
136  * anything in PartitionTupleRouting for the partition.
137  *-----------------------
138  */
139 typedef struct PartitionDispatchData
140 {
143  List *keystate; /* list of ExprState */
147  int indexes[FLEXIBLE_ARRAY_MEMBER];
149 
150 /* struct to hold result relations coming from UPDATE subplans */
152 {
153  Oid relid; /* hash key -- must be first */
156 
157 
159  PartitionTupleRouting *proute);
161  EState *estate, PartitionTupleRouting *proute,
162  PartitionDispatch dispatch,
163  ResultRelInfo *rootResultRelInfo,
164  int partidx);
165 static void ExecInitRoutingInfo(ModifyTableState *mtstate,
166  EState *estate,
167  PartitionTupleRouting *proute,
168  PartitionDispatch dispatch,
169  ResultRelInfo *partRelInfo,
170  int partidx);
172  PartitionTupleRouting *proute,
173  Oid partoid, PartitionDispatch parent_pd, int partidx);
175  TupleTableSlot *slot,
176  EState *estate,
177  Datum *values,
178  bool *isnull);
180  bool *isnull);
182  Datum *values,
183  bool *isnull,
184  int maxfieldlen);
185 static List *adjust_partition_tlist(List *tlist, TupleConversionMap *map);
186 static void ExecInitPruningContext(PartitionPruneContext *context,
187  List *pruning_steps,
188  PartitionDesc partdesc,
189  PartitionKey partkey,
190  PlanState *planstate);
193  bool initial_prune,
194  Bitmapset **validsubplans);
195 
196 
197 /*
198  * ExecSetupPartitionTupleRouting - sets up information needed during
199  * tuple routing for partitioned tables, encapsulates it in
200  * PartitionTupleRouting, and returns it.
201  *
202  * Callers must use the returned PartitionTupleRouting during calls to
203  * ExecFindPartition(). The actual ResultRelInfo for a partition is only
204  * allocated when the partition is found for the first time.
205  *
206  * The current memory context is used to allocate this struct and all
207  * subsidiary structs that will be allocated from it later on. Typically
208  * it should be estate->es_query_cxt.
209  */
212  Relation rel)
213 {
214  PartitionTupleRouting *proute;
215  ModifyTable *node = mtstate ? (ModifyTable *) mtstate->ps.plan : NULL;
216 
217  /*
218  * Here we attempt to expend as little effort as possible in setting up
219  * the PartitionTupleRouting. Each partition's ResultRelInfo is built on
220  * demand, only when we actually need to route a tuple to that partition.
221  * The reason for this is that a common case is for INSERT to insert a
222  * single tuple into a partitioned table and this must be fast.
223  */
225  proute->partition_root = rel;
226  proute->memcxt = CurrentMemoryContext;
227  /* Rest of members initialized by zeroing */
228 
229  /*
230  * Initialize this table's PartitionDispatch object. Here we pass in the
231  * parent as NULL as we don't need to care about any parent of the target
232  * partitioned table.
233  */
234  ExecInitPartitionDispatchInfo(estate, proute, RelationGetRelid(rel),
235  NULL, 0);
236 
237  /*
238  * If performing an UPDATE with tuple routing, we can reuse partition
239  * sub-plan result rels. We build a hash table to map the OIDs of
240  * partitions present in mtstate->resultRelInfo to their ResultRelInfos.
241  * Every time a tuple is routed to a partition that we've yet to set the
242  * ResultRelInfo for, before we go to the trouble of making one, we check
243  * for a pre-made one in the hash table.
244  */
245  if (node && node->operation == CMD_UPDATE)
246  ExecHashSubPlanResultRelsByOid(mtstate, proute);
247 
248  return proute;
249 }
250 
251 /*
252  * ExecFindPartition -- Return the ResultRelInfo for the leaf partition that
253  * the tuple contained in *slot should belong to.
254  *
255  * If the partition's ResultRelInfo does not yet exist in 'proute' then we set
256  * one up or reuse one from mtstate's resultRelInfo array. When reusing a
257  * ResultRelInfo from the mtstate we verify that the relation is a valid
258  * target for INSERTs and then set up a PartitionRoutingInfo for it.
259  *
260  * rootResultRelInfo is the relation named in the query.
261  *
262  * estate must be non-NULL; we'll need it to compute any expressions in the
263  * partition keys. Also, its per-tuple contexts are used as evaluation
264  * scratch space.
265  *
266  * If no leaf partition is found, this routine errors out with the appropriate
267  * error message. An error may also be raised if the found target partition
268  * is not a valid target for an INSERT.
269  */
272  ResultRelInfo *rootResultRelInfo,
273  PartitionTupleRouting *proute,
274  TupleTableSlot *slot, EState *estate)
275 {
278  bool isnull[PARTITION_MAX_KEYS];
279  Relation rel;
280  PartitionDispatch dispatch;
281  PartitionDesc partdesc;
282  ExprContext *ecxt = GetPerTupleExprContext(estate);
283  TupleTableSlot *ecxt_scantuple_old = ecxt->ecxt_scantuple;
284  TupleTableSlot *myslot = NULL;
285  MemoryContext oldcxt;
286 
287  /* use per-tuple context here to avoid leaking memory */
289 
290  /*
291  * First check the root table's partition constraint, if any. No point in
292  * routing the tuple if it doesn't belong in the root table itself.
293  */
294  if (rootResultRelInfo->ri_PartitionCheck)
295  ExecPartitionCheck(rootResultRelInfo, slot, estate, true);
296 
297  /* start with the root partitioned table */
298  dispatch = pd[0];
299  while (true)
300  {
301  AttrNumber *map = dispatch->tupmap;
302  int partidx = -1;
303 
305 
306  rel = dispatch->reldesc;
307  partdesc = dispatch->partdesc;
308 
309  /*
310  * Convert the tuple to this parent's layout, if different from the
311  * current relation.
312  */
313  myslot = dispatch->tupslot;
314  if (myslot != NULL)
315  {
316  Assert(map != NULL);
317  slot = execute_attr_map_slot(map, slot, myslot);
318  }
319 
320  /*
321  * Extract partition key from tuple. Expression evaluation machinery
322  * that FormPartitionKeyDatum() invokes expects ecxt_scantuple to
323  * point to the correct tuple slot. The slot might have changed from
324  * what was used for the parent table if the table of the current
325  * partitioning level has different tuple descriptor from the parent.
326  * So update ecxt_scantuple accordingly.
327  */
328  ecxt->ecxt_scantuple = slot;
329  FormPartitionKeyDatum(dispatch, slot, estate, values, isnull);
330 
331  /*
332  * If this partitioned table has no partitions or no partition for
333  * these values, error out.
334  */
335  if (partdesc->nparts == 0 ||
336  (partidx = get_partition_for_tuple(dispatch, values, isnull)) < 0)
337  {
338  char *val_desc;
339 
341  values, isnull, 64);
343  ereport(ERROR,
344  (errcode(ERRCODE_CHECK_VIOLATION),
345  errmsg("no partition of relation \"%s\" found for row",
347  val_desc ?
348  errdetail("Partition key of the failing row contains %s.",
349  val_desc) : 0));
350  }
351 
352  if (partdesc->is_leaf[partidx])
353  {
354  ResultRelInfo *rri;
355 
356  /*
357  * Look to see if we've already got a ResultRelInfo for this
358  * partition.
359  */
360  if (likely(dispatch->indexes[partidx] >= 0))
361  {
362  /* ResultRelInfo already built */
363  Assert(dispatch->indexes[partidx] < proute->num_partitions);
364  rri = proute->partitions[dispatch->indexes[partidx]];
365  }
366  else
367  {
368  bool found = false;
369 
370  /*
371  * We have not yet set up a ResultRelInfo for this partition,
372  * but if we have a subplan hash table, we might have one
373  * there. If not, we'll have to create one.
374  */
375  if (proute->subplan_resultrel_htab)
376  {
377  Oid partoid = partdesc->oids[partidx];
379 
380  elem = hash_search(proute->subplan_resultrel_htab,
381  &partoid, HASH_FIND, NULL);
382  if (elem)
383  {
384  found = true;
385  rri = elem->rri;
386 
387  /* Verify this ResultRelInfo allows INSERTs */
389 
390  /* Set up the PartitionRoutingInfo for it */
391  ExecInitRoutingInfo(mtstate, estate, proute, dispatch,
392  rri, partidx);
393  }
394  }
395 
396  /* We need to create a new one. */
397  if (!found)
398  rri = ExecInitPartitionInfo(mtstate, estate, proute,
399  dispatch,
400  rootResultRelInfo, partidx);
401  }
402 
403  /* Release the tuple in the lowest parent's dedicated slot. */
404  if (slot == myslot)
405  ExecClearTuple(myslot);
406 
407  MemoryContextSwitchTo(oldcxt);
408  ecxt->ecxt_scantuple = ecxt_scantuple_old;
409  return rri;
410  }
411  else
412  {
413  /*
414  * Partition is a sub-partitioned table; get the PartitionDispatch
415  */
416  if (likely(dispatch->indexes[partidx] >= 0))
417  {
418  /* Already built. */
419  Assert(dispatch->indexes[partidx] < proute->num_dispatch);
420 
421  /*
422  * Move down to the next partition level and search again
423  * until we find a leaf partition that matches this tuple
424  */
425  dispatch = pd[dispatch->indexes[partidx]];
426  }
427  else
428  {
429  /* Not yet built. Do that now. */
430  PartitionDispatch subdispatch;
431 
432  /*
433  * Create the new PartitionDispatch. We pass the current one
434  * in as the parent PartitionDispatch
435  */
436  subdispatch = ExecInitPartitionDispatchInfo(mtstate->ps.state,
437  proute,
438  partdesc->oids[partidx],
439  dispatch, partidx);
440  Assert(dispatch->indexes[partidx] >= 0 &&
441  dispatch->indexes[partidx] < proute->num_dispatch);
442  dispatch = subdispatch;
443  }
444  }
445  }
446 }
447 
448 /*
449  * ExecHashSubPlanResultRelsByOid
450  * Build a hash table to allow fast lookups of subplan ResultRelInfos by
451  * partition Oid. We also populate the subplan ResultRelInfo with an
452  * ri_PartitionRoot.
453  */
454 static void
456  PartitionTupleRouting *proute)
457 {
458  HASHCTL ctl;
459  HTAB *htab;
460  int i;
461 
462  memset(&ctl, 0, sizeof(ctl));
463  ctl.keysize = sizeof(Oid);
464  ctl.entrysize = sizeof(SubplanResultRelHashElem);
466 
467  htab = hash_create("PartitionTupleRouting table", mtstate->mt_nplans,
468  &ctl, HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
469  proute->subplan_resultrel_htab = htab;
470 
471  /* Hash all subplans by their Oid */
472  for (i = 0; i < mtstate->mt_nplans; i++)
473  {
474  ResultRelInfo *rri = &mtstate->resultRelInfo[i];
475  bool found;
476  Oid partoid = RelationGetRelid(rri->ri_RelationDesc);
478 
479  elem = (SubplanResultRelHashElem *)
480  hash_search(htab, &partoid, HASH_ENTER, &found);
481  Assert(!found);
482  elem->rri = rri;
483 
484  /*
485  * This is required in order to convert the partition's tuple to be
486  * compatible with the root partitioned table's tuple descriptor. When
487  * generating the per-subplan result rels, this was not set.
488  */
489  rri->ri_PartitionRoot = proute->partition_root;
490  }
491 }
492 
493 /*
494  * ExecInitPartitionInfo
495  * Lock the partition and initialize ResultRelInfo. Also setup other
496  * information for the partition and store it in the next empty slot in
497  * the proute->partitions array.
498  *
499  * Returns the ResultRelInfo
500  */
501 static ResultRelInfo *
503  PartitionTupleRouting *proute,
504  PartitionDispatch dispatch,
505  ResultRelInfo *rootResultRelInfo,
506  int partidx)
507 {
508  ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
509  Relation rootrel = rootResultRelInfo->ri_RelationDesc,
510  partrel;
511  Relation firstResultRel = mtstate->resultRelInfo[0].ri_RelationDesc;
512  ResultRelInfo *leaf_part_rri;
513  MemoryContext oldcxt;
514  AttrNumber *part_attnos = NULL;
515  bool found_whole_row;
516 
517  oldcxt = MemoryContextSwitchTo(proute->memcxt);
518 
519  partrel = table_open(dispatch->partdesc->oids[partidx], RowExclusiveLock);
520 
521  leaf_part_rri = makeNode(ResultRelInfo);
522  InitResultRelInfo(leaf_part_rri,
523  partrel,
524  node ? node->rootRelation : 1,
525  rootrel,
526  estate->es_instrument);
527 
528  /*
529  * Verify result relation is a valid target for an INSERT. An UPDATE of a
530  * partition-key becomes a DELETE+INSERT operation, so this check is still
531  * required when the operation is CMD_UPDATE.
532  */
533  CheckValidResultRel(leaf_part_rri, CMD_INSERT);
534 
535  /*
536  * Open partition indices. The user may have asked to check for conflicts
537  * within this leaf partition and do "nothing" instead of throwing an
538  * error. Be prepared in that case by initializing the index information
539  * needed by ExecInsert() to perform speculative insertions.
540  */
541  if (partrel->rd_rel->relhasindex &&
542  leaf_part_rri->ri_IndexRelationDescs == NULL)
543  ExecOpenIndices(leaf_part_rri,
544  (node != NULL &&
546 
547  /*
548  * Build WITH CHECK OPTION constraints for the partition. Note that we
549  * didn't build the withCheckOptionList for partitions within the planner,
550  * but simple translation of varattnos will suffice. This only occurs for
551  * the INSERT case or in the case of UPDATE tuple routing where we didn't
552  * find a result rel to reuse in ExecSetupPartitionTupleRouting().
553  */
554  if (node && node->withCheckOptionLists != NIL)
555  {
556  List *wcoList;
557  List *wcoExprs = NIL;
558  ListCell *ll;
559  int firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex;
560 
561  /*
562  * In the case of INSERT on a partitioned table, there is only one
563  * plan. Likewise, there is only one WCO list, not one per partition.
564  * For UPDATE, there are as many WCO lists as there are plans.
565  */
566  Assert((node->operation == CMD_INSERT &&
567  list_length(node->withCheckOptionLists) == 1 &&
568  list_length(node->plans) == 1) ||
569  (node->operation == CMD_UPDATE &&
571  list_length(node->plans)));
572 
573  /*
574  * Use the WCO list of the first plan as a reference to calculate
575  * attno's for the WCO list of this partition. In the INSERT case,
576  * that refers to the root partitioned table, whereas in the UPDATE
577  * tuple routing case, that refers to the first partition in the
578  * mtstate->resultRelInfo array. In any case, both that relation and
579  * this partition should have the same columns, so we should be able
580  * to map attributes successfully.
581  */
582  wcoList = linitial(node->withCheckOptionLists);
583 
584  /*
585  * Convert Vars in it to contain this partition's attribute numbers.
586  */
587  part_attnos =
589  RelationGetDescr(firstResultRel));
590  wcoList = (List *)
591  map_variable_attnos((Node *) wcoList,
592  firstVarno, 0,
593  part_attnos,
594  RelationGetDescr(firstResultRel)->natts,
595  RelationGetForm(partrel)->reltype,
596  &found_whole_row);
597  /* We ignore the value of found_whole_row. */
598 
599  foreach(ll, wcoList)
600  {
602  ExprState *wcoExpr = ExecInitQual(castNode(List, wco->qual),
603  &mtstate->ps);
604 
605  wcoExprs = lappend(wcoExprs, wcoExpr);
606  }
607 
608  leaf_part_rri->ri_WithCheckOptions = wcoList;
609  leaf_part_rri->ri_WithCheckOptionExprs = wcoExprs;
610  }
611 
612  /*
613  * Build the RETURNING projection for the partition. Note that we didn't
614  * build the returningList for partitions within the planner, but simple
615  * translation of varattnos will suffice. This only occurs for the INSERT
616  * case or in the case of UPDATE tuple routing where we didn't find a
617  * result rel to reuse in ExecSetupPartitionTupleRouting().
618  */
619  if (node && node->returningLists != NIL)
620  {
621  TupleTableSlot *slot;
622  ExprContext *econtext;
623  List *returningList;
624  int firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex;
625 
626  /* See the comment above for WCO lists. */
627  Assert((node->operation == CMD_INSERT &&
628  list_length(node->returningLists) == 1 &&
629  list_length(node->plans) == 1) ||
630  (node->operation == CMD_UPDATE &&
631  list_length(node->returningLists) ==
632  list_length(node->plans)));
633 
634  /*
635  * Use the RETURNING list of the first plan as a reference to
636  * calculate attno's for the RETURNING list of this partition. See
637  * the comment above for WCO lists for more details on why this is
638  * okay.
639  */
640  returningList = linitial(node->returningLists);
641 
642  /*
643  * Convert Vars in it to contain this partition's attribute numbers.
644  */
645  if (part_attnos == NULL)
646  part_attnos =
648  RelationGetDescr(firstResultRel));
649  returningList = (List *)
650  map_variable_attnos((Node *) returningList,
651  firstVarno, 0,
652  part_attnos,
653  RelationGetDescr(firstResultRel)->natts,
654  RelationGetForm(partrel)->reltype,
655  &found_whole_row);
656  /* We ignore the value of found_whole_row. */
657 
658  leaf_part_rri->ri_returningList = returningList;
659 
660  /*
661  * Initialize the projection itself.
662  *
663  * Use the slot and the expression context that would have been set up
664  * in ExecInitModifyTable() for projection's output.
665  */
666  Assert(mtstate->ps.ps_ResultTupleSlot != NULL);
667  slot = mtstate->ps.ps_ResultTupleSlot;
668  Assert(mtstate->ps.ps_ExprContext != NULL);
669  econtext = mtstate->ps.ps_ExprContext;
670  leaf_part_rri->ri_projectReturning =
671  ExecBuildProjectionInfo(returningList, econtext, slot,
672  &mtstate->ps, RelationGetDescr(partrel));
673  }
674 
675  /* Set up information needed for routing tuples to the partition. */
676  ExecInitRoutingInfo(mtstate, estate, proute, dispatch,
677  leaf_part_rri, partidx);
678 
679  /*
680  * If there is an ON CONFLICT clause, initialize state for it.
681  */
682  if (node && node->onConflictAction != ONCONFLICT_NONE)
683  {
684  int firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex;
685  TupleDesc partrelDesc = RelationGetDescr(partrel);
686  ExprContext *econtext = mtstate->ps.ps_ExprContext;
687  ListCell *lc;
688  List *arbiterIndexes = NIL;
689 
690  /*
691  * If there is a list of arbiter indexes, map it to a list of indexes
692  * in the partition. We do that by scanning the partition's index
693  * list and searching for ancestry relationships to each index in the
694  * ancestor table.
695  */
696  if (list_length(rootResultRelInfo->ri_onConflictArbiterIndexes) > 0)
697  {
698  List *childIdxs;
699 
700  childIdxs = RelationGetIndexList(leaf_part_rri->ri_RelationDesc);
701 
702  foreach(lc, childIdxs)
703  {
704  Oid childIdx = lfirst_oid(lc);
705  List *ancestors;
706  ListCell *lc2;
707 
708  ancestors = get_partition_ancestors(childIdx);
709  foreach(lc2, rootResultRelInfo->ri_onConflictArbiterIndexes)
710  {
711  if (list_member_oid(ancestors, lfirst_oid(lc2)))
712  arbiterIndexes = lappend_oid(arbiterIndexes, childIdx);
713  }
714  list_free(ancestors);
715  }
716  }
717 
718  /*
719  * If the resulting lists are of inequal length, something is wrong.
720  * (This shouldn't happen, since arbiter index selection should not
721  * pick up an invalid index.)
722  */
723  if (list_length(rootResultRelInfo->ri_onConflictArbiterIndexes) !=
724  list_length(arbiterIndexes))
725  elog(ERROR, "invalid arbiter index list");
726  leaf_part_rri->ri_onConflictArbiterIndexes = arbiterIndexes;
727 
728  /*
729  * In the DO UPDATE case, we have some more state to initialize.
730  */
731  if (node->onConflictAction == ONCONFLICT_UPDATE)
732  {
733  TupleConversionMap *map;
734 
735  map = leaf_part_rri->ri_PartitionInfo->pi_RootToPartitionMap;
736 
737  Assert(node->onConflictSet != NIL);
738  Assert(rootResultRelInfo->ri_onConflict != NULL);
739 
740  leaf_part_rri->ri_onConflict = makeNode(OnConflictSetState);
741 
742  /*
743  * Need a separate existing slot for each partition, as the
744  * partition could be of a different AM, even if the tuple
745  * descriptors match.
746  */
747  leaf_part_rri->ri_onConflict->oc_Existing =
748  table_slot_create(leaf_part_rri->ri_RelationDesc,
749  &mtstate->ps.state->es_tupleTable);
750 
751  /*
752  * If the partition's tuple descriptor matches exactly the root
753  * parent (the common case), we can re-use most of the parent's ON
754  * CONFLICT SET state, skipping a bunch of work. Otherwise, we
755  * need to create state specific to this partition.
756  */
757  if (map == NULL)
758  {
759  /*
760  * It's safe to reuse these from the partition root, as we
761  * only process one tuple at a time (therefore we won't
762  * overwrite needed data in slots), and the results of
763  * projections are independent of the underlying storage.
764  * Projections and where clauses themselves don't store state
765  * / are independent of the underlying storage.
766  */
767  leaf_part_rri->ri_onConflict->oc_ProjSlot =
768  rootResultRelInfo->ri_onConflict->oc_ProjSlot;
769  leaf_part_rri->ri_onConflict->oc_ProjInfo =
770  rootResultRelInfo->ri_onConflict->oc_ProjInfo;
771  leaf_part_rri->ri_onConflict->oc_WhereClause =
772  rootResultRelInfo->ri_onConflict->oc_WhereClause;
773  }
774  else
775  {
776  List *onconflset;
777  TupleDesc tupDesc;
778  bool found_whole_row;
779 
780  /*
781  * Translate expressions in onConflictSet to account for
782  * different attribute numbers. For that, map partition
783  * varattnos twice: first to catch the EXCLUDED
784  * pseudo-relation (INNER_VAR), and second to handle the main
785  * target relation (firstVarno).
786  */
787  onconflset = (List *) copyObject((Node *) node->onConflictSet);
788  if (part_attnos == NULL)
789  part_attnos =
791  RelationGetDescr(firstResultRel));
792  onconflset = (List *)
793  map_variable_attnos((Node *) onconflset,
794  INNER_VAR, 0,
795  part_attnos,
796  RelationGetDescr(firstResultRel)->natts,
797  RelationGetForm(partrel)->reltype,
798  &found_whole_row);
799  /* We ignore the value of found_whole_row. */
800  onconflset = (List *)
801  map_variable_attnos((Node *) onconflset,
802  firstVarno, 0,
803  part_attnos,
804  RelationGetDescr(firstResultRel)->natts,
805  RelationGetForm(partrel)->reltype,
806  &found_whole_row);
807  /* We ignore the value of found_whole_row. */
808 
809  /* Finally, adjust this tlist to match the partition. */
810  onconflset = adjust_partition_tlist(onconflset, map);
811 
812  /* create the tuple slot for the UPDATE SET projection */
813  tupDesc = ExecTypeFromTL(onconflset);
814  leaf_part_rri->ri_onConflict->oc_ProjSlot =
815  ExecInitExtraTupleSlot(mtstate->ps.state, tupDesc,
816  &TTSOpsVirtual);
817 
818  /* build UPDATE SET projection state */
819  leaf_part_rri->ri_onConflict->oc_ProjInfo =
820  ExecBuildProjectionInfo(onconflset, econtext,
821  leaf_part_rri->ri_onConflict->oc_ProjSlot,
822  &mtstate->ps, partrelDesc);
823 
824  /*
825  * If there is a WHERE clause, initialize state where it will
826  * be evaluated, mapping the attribute numbers appropriately.
827  * As with onConflictSet, we need to map partition varattnos
828  * to the partition's tupdesc.
829  */
830  if (node->onConflictWhere)
831  {
832  List *clause;
833 
834  clause = copyObject((List *) node->onConflictWhere);
835  clause = (List *)
836  map_variable_attnos((Node *) clause,
837  INNER_VAR, 0,
838  part_attnos,
839  RelationGetDescr(firstResultRel)->natts,
840  RelationGetForm(partrel)->reltype,
841  &found_whole_row);
842  /* We ignore the value of found_whole_row. */
843  clause = (List *)
844  map_variable_attnos((Node *) clause,
845  firstVarno, 0,
846  part_attnos,
847  RelationGetDescr(firstResultRel)->natts,
848  RelationGetForm(partrel)->reltype,
849  &found_whole_row);
850  /* We ignore the value of found_whole_row. */
851  leaf_part_rri->ri_onConflict->oc_WhereClause =
852  ExecInitQual((List *) clause, &mtstate->ps);
853  }
854  }
855  }
856  }
857 
858  /*
859  * Since we've just initialized this ResultRelInfo, it's not in any list
860  * attached to the estate as yet. Add it, so that it can be found later.
861  *
862  * Note that the entries in this list appear in no predetermined order,
863  * because partition result rels are initialized as and when they're
864  * needed.
865  */
869  leaf_part_rri);
870 
871  MemoryContextSwitchTo(oldcxt);
872 
873  return leaf_part_rri;
874 }
875 
876 /*
877  * ExecInitRoutingInfo
878  * Set up information needed for translating tuples between root
879  * partitioned table format and partition format, and keep track of it
880  * in PartitionTupleRouting.
881  */
882 static void
884  EState *estate,
885  PartitionTupleRouting *proute,
886  PartitionDispatch dispatch,
887  ResultRelInfo *partRelInfo,
888  int partidx)
889 {
890  MemoryContext oldcxt;
891  PartitionRoutingInfo *partrouteinfo;
892  int rri_index;
893 
894  oldcxt = MemoryContextSwitchTo(proute->memcxt);
895 
896  partrouteinfo = palloc(sizeof(PartitionRoutingInfo));
897 
898  /*
899  * Set up a tuple conversion map to convert a tuple routed to the
900  * partition from the parent's type to the partition's.
901  */
902  partrouteinfo->pi_RootToPartitionMap =
904  RelationGetDescr(partRelInfo->ri_RelationDesc));
905 
906  /*
907  * If a partition has a different rowtype than the root parent, initialize
908  * a slot dedicated to storing this partition's tuples. The slot is used
909  * for various operations that are applied to tuples after routing, such
910  * as checking constraints.
911  */
912  if (partrouteinfo->pi_RootToPartitionMap != NULL)
913  {
914  Relation partrel = partRelInfo->ri_RelationDesc;
915 
916  /*
917  * Initialize the slot itself setting its descriptor to this
918  * partition's TupleDesc; TupleDesc reference will be released at the
919  * end of the command.
920  */
921  partrouteinfo->pi_PartitionTupleSlot =
922  table_slot_create(partrel, &estate->es_tupleTable);
923  }
924  else
925  partrouteinfo->pi_PartitionTupleSlot = NULL;
926 
927  /*
928  * Also, if transition capture is required, store a map to convert tuples
929  * from partition's rowtype to the root partition table's.
930  */
931  if (mtstate &&
932  (mtstate->mt_transition_capture || mtstate->mt_oc_transition_capture))
933  {
934  partrouteinfo->pi_PartitionToRootMap =
936  RelationGetDescr(partRelInfo->ri_PartitionRoot));
937  }
938  else
939  partrouteinfo->pi_PartitionToRootMap = NULL;
940 
941  /*
942  * If the partition is a foreign table, let the FDW init itself for
943  * routing tuples to the partition.
944  */
945  if (partRelInfo->ri_FdwRoutine != NULL &&
946  partRelInfo->ri_FdwRoutine->BeginForeignInsert != NULL)
947  partRelInfo->ri_FdwRoutine->BeginForeignInsert(mtstate, partRelInfo);
948 
949  partRelInfo->ri_PartitionInfo = partrouteinfo;
950  partRelInfo->ri_CopyMultiInsertBuffer = NULL;
951 
952  /*
953  * Keep track of it in the PartitionTupleRouting->partitions array.
954  */
955  Assert(dispatch->indexes[partidx] == -1);
956 
957  rri_index = proute->num_partitions++;
958 
959  /* Allocate or enlarge the array, as needed */
960  if (proute->num_partitions >= proute->max_partitions)
961  {
962  if (proute->max_partitions == 0)
963  {
964  proute->max_partitions = 8;
965  proute->partitions = (ResultRelInfo **)
966  palloc(sizeof(ResultRelInfo *) * proute->max_partitions);
967  }
968  else
969  {
970  proute->max_partitions *= 2;
971  proute->partitions = (ResultRelInfo **)
972  repalloc(proute->partitions, sizeof(ResultRelInfo *) *
973  proute->max_partitions);
974  }
975  }
976 
977  proute->partitions[rri_index] = partRelInfo;
978  dispatch->indexes[partidx] = rri_index;
979 
980  MemoryContextSwitchTo(oldcxt);
981 }
982 
983 /*
984  * ExecInitPartitionDispatchInfo
985  * Lock the partitioned table (if not locked already) and initialize
986  * PartitionDispatch for a partitioned table and store it in the next
987  * available slot in the proute->partition_dispatch_info array. Also,
988  * record the index into this array in the parent_pd->indexes[] array in
989  * the partidx element so that we can properly retrieve the newly created
990  * PartitionDispatch later.
991  */
992 static PartitionDispatch
994  PartitionTupleRouting *proute, Oid partoid,
995  PartitionDispatch parent_pd, int partidx)
996 {
997  Relation rel;
998  PartitionDesc partdesc;
1000  int dispatchidx;
1001  MemoryContext oldcxt;
1002 
1003  if (estate->es_partition_directory == NULL)
1004  estate->es_partition_directory =
1006 
1007  oldcxt = MemoryContextSwitchTo(proute->memcxt);
1008 
1009  /*
1010  * Only sub-partitioned tables need to be locked here. The root
1011  * partitioned table will already have been locked as it's referenced in
1012  * the query's rtable.
1013  */
1014  if (partoid != RelationGetRelid(proute->partition_root))
1015  rel = table_open(partoid, RowExclusiveLock);
1016  else
1017  rel = proute->partition_root;
1018  partdesc = PartitionDirectoryLookup(estate->es_partition_directory, rel);
1019 
1021  partdesc->nparts * sizeof(int));
1022  pd->reldesc = rel;
1023  pd->key = RelationGetPartitionKey(rel);
1024  pd->keystate = NIL;
1025  pd->partdesc = partdesc;
1026  if (parent_pd != NULL)
1027  {
1028  TupleDesc tupdesc = RelationGetDescr(rel);
1029 
1030  /*
1031  * For sub-partitioned tables where the column order differs from its
1032  * direct parent partitioned table, we must store a tuple table slot
1033  * initialized with its tuple descriptor and a tuple conversion map to
1034  * convert a tuple from its parent's rowtype to its own. This is to
1035  * make sure that we are looking at the correct row using the correct
1036  * tuple descriptor when computing its partition key for tuple
1037  * routing.
1038  */
1040  tupdesc);
1041  pd->tupslot = pd->tupmap ?
1042  MakeSingleTupleTableSlot(tupdesc, &TTSOpsVirtual) : NULL;
1043  }
1044  else
1045  {
1046  /* Not required for the root partitioned table */
1047  pd->tupmap = NULL;
1048  pd->tupslot = NULL;
1049  }
1050 
1051  /*
1052  * Initialize with -1 to signify that the corresponding partition's
1053  * ResultRelInfo or PartitionDispatch has not been created yet.
1054  */
1055  memset(pd->indexes, -1, sizeof(int) * partdesc->nparts);
1056 
1057  /* Track in PartitionTupleRouting for later use */
1058  dispatchidx = proute->num_dispatch++;
1059 
1060  /* Allocate or enlarge the array, as needed */
1061  if (proute->num_dispatch >= proute->max_dispatch)
1062  {
1063  if (proute->max_dispatch == 0)
1064  {
1065  proute->max_dispatch = 4;
1067  palloc(sizeof(PartitionDispatch) * proute->max_dispatch);
1068  }
1069  else
1070  {
1071  proute->max_dispatch *= 2;
1074  sizeof(PartitionDispatch) * proute->max_dispatch);
1075  }
1076  }
1077  proute->partition_dispatch_info[dispatchidx] = pd;
1078 
1079  /*
1080  * Finally, if setting up a PartitionDispatch for a sub-partitioned table,
1081  * install a downlink in the parent to allow quick descent.
1082  */
1083  if (parent_pd)
1084  {
1085  Assert(parent_pd->indexes[partidx] == -1);
1086  parent_pd->indexes[partidx] = dispatchidx;
1087  }
1088 
1089  MemoryContextSwitchTo(oldcxt);
1090 
1091  return pd;
1092 }
1093 
1094 /*
1095  * ExecCleanupTupleRouting -- Clean up objects allocated for partition tuple
1096  * routing.
1097  *
1098  * Close all the partitioned tables, leaf partitions, and their indices.
1099  */
1100 void
1102  PartitionTupleRouting *proute)
1103 {
1104  HTAB *htab = proute->subplan_resultrel_htab;
1105  int i;
1106 
1107  /*
1108  * Remember, proute->partition_dispatch_info[0] corresponds to the root
1109  * partitioned table, which we must not try to close, because it is the
1110  * main target table of the query that will be closed by callers such as
1111  * ExecEndPlan() or DoCopy(). Also, tupslot is NULL for the root
1112  * partitioned table.
1113  */
1114  for (i = 1; i < proute->num_dispatch; i++)
1115  {
1117 
1118  table_close(pd->reldesc, NoLock);
1119 
1120  if (pd->tupslot)
1122  }
1123 
1124  for (i = 0; i < proute->num_partitions; i++)
1125  {
1126  ResultRelInfo *resultRelInfo = proute->partitions[i];
1127 
1128  /* Allow any FDWs to shut down */
1129  if (resultRelInfo->ri_FdwRoutine != NULL &&
1130  resultRelInfo->ri_FdwRoutine->EndForeignInsert != NULL)
1131  resultRelInfo->ri_FdwRoutine->EndForeignInsert(mtstate->ps.state,
1132  resultRelInfo);
1133 
1134  /*
1135  * Check if this result rel is one belonging to the node's subplans,
1136  * if so, let ExecEndPlan() clean it up.
1137  */
1138  if (htab)
1139  {
1140  Oid partoid;
1141  bool found;
1142 
1143  partoid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1144 
1145  (void) hash_search(htab, &partoid, HASH_FIND, &found);
1146  if (found)
1147  continue;
1148  }
1149 
1150  ExecCloseIndices(resultRelInfo);
1151  table_close(resultRelInfo->ri_RelationDesc, NoLock);
1152  }
1153 }
1154 
1155 /* ----------------
1156  * FormPartitionKeyDatum
1157  * Construct values[] and isnull[] arrays for the partition key
1158  * of a tuple.
1159  *
1160  * pd Partition dispatch object of the partitioned table
1161  * slot Heap tuple from which to extract partition key
1162  * estate executor state for evaluating any partition key
1163  * expressions (must be non-NULL)
1164  * values Array of partition key Datums (output area)
1165  * isnull Array of is-null indicators (output area)
1166  *
1167  * the ecxt_scantuple slot of estate's per-tuple expr context must point to
1168  * the heap tuple passed in.
1169  * ----------------
1170  */
1171 static void
1173  TupleTableSlot *slot,
1174  EState *estate,
1175  Datum *values,
1176  bool *isnull)
1177 {
1178  ListCell *partexpr_item;
1179  int i;
1180 
1181  if (pd->key->partexprs != NIL && pd->keystate == NIL)
1182  {
1183  /* Check caller has set up context correctly */
1184  Assert(estate != NULL &&
1185  GetPerTupleExprContext(estate)->ecxt_scantuple == slot);
1186 
1187  /* First time through, set up expression evaluation state */
1188  pd->keystate = ExecPrepareExprList(pd->key->partexprs, estate);
1189  }
1190 
1191  partexpr_item = list_head(pd->keystate);
1192  for (i = 0; i < pd->key->partnatts; i++)
1193  {
1194  AttrNumber keycol = pd->key->partattrs[i];
1195  Datum datum;
1196  bool isNull;
1197 
1198  if (keycol != 0)
1199  {
1200  /* Plain column; get the value directly from the heap tuple */
1201  datum = slot_getattr(slot, keycol, &isNull);
1202  }
1203  else
1204  {
1205  /* Expression; need to evaluate it */
1206  if (partexpr_item == NULL)
1207  elog(ERROR, "wrong number of partition key expressions");
1208  datum = ExecEvalExprSwitchContext((ExprState *) lfirst(partexpr_item),
1209  GetPerTupleExprContext(estate),
1210  &isNull);
1211  partexpr_item = lnext(pd->keystate, partexpr_item);
1212  }
1213  values[i] = datum;
1214  isnull[i] = isNull;
1215  }
1216 
1217  if (partexpr_item != NULL)
1218  elog(ERROR, "wrong number of partition key expressions");
1219 }
1220 
1221 /*
1222  * get_partition_for_tuple
1223  * Finds partition of relation which accepts the partition key specified
1224  * in values and isnull
1225  *
1226  * Return value is index of the partition (>= 0 and < partdesc->nparts) if one
1227  * found or -1 if none found.
1228  */
1229 static int
1231 {
1232  int bound_offset;
1233  int part_index = -1;
1234  PartitionKey key = pd->key;
1235  PartitionDesc partdesc = pd->partdesc;
1236  PartitionBoundInfo boundinfo = partdesc->boundinfo;
1237 
1238  /* Route as appropriate based on partitioning strategy. */
1239  switch (key->strategy)
1240  {
1242  {
1243  int greatest_modulus;
1244  uint64 rowHash;
1245 
1246  greatest_modulus = get_hash_partition_greatest_modulus(boundinfo);
1247  rowHash = compute_partition_hash_value(key->partnatts,
1248  key->partsupfunc,
1249  key->partcollation,
1250  values, isnull);
1251 
1252  part_index = boundinfo->indexes[rowHash % greatest_modulus];
1253  }
1254  break;
1255 
1257  if (isnull[0])
1258  {
1259  if (partition_bound_accepts_nulls(boundinfo))
1260  part_index = boundinfo->null_index;
1261  }
1262  else
1263  {
1264  bool equal = false;
1265 
1266  bound_offset = partition_list_bsearch(key->partsupfunc,
1267  key->partcollation,
1268  boundinfo,
1269  values[0], &equal);
1270  if (bound_offset >= 0 && equal)
1271  part_index = boundinfo->indexes[bound_offset];
1272  }
1273  break;
1274 
1276  {
1277  bool equal = false,
1278  range_partkey_has_null = false;
1279  int i;
1280 
1281  /*
1282  * No range includes NULL, so this will be accepted by the
1283  * default partition if there is one, and otherwise rejected.
1284  */
1285  for (i = 0; i < key->partnatts; i++)
1286  {
1287  if (isnull[i])
1288  {
1289  range_partkey_has_null = true;
1290  break;
1291  }
1292  }
1293 
1294  if (!range_partkey_has_null)
1295  {
1296  bound_offset = partition_range_datum_bsearch(key->partsupfunc,
1297  key->partcollation,
1298  boundinfo,
1299  key->partnatts,
1300  values,
1301  &equal);
1302 
1303  /*
1304  * The bound at bound_offset is less than or equal to the
1305  * tuple value, so the bound at offset+1 is the upper
1306  * bound of the partition we're looking for, if there
1307  * actually exists one.
1308  */
1309  part_index = boundinfo->indexes[bound_offset + 1];
1310  }
1311  }
1312  break;
1313 
1314  default:
1315  elog(ERROR, "unexpected partition strategy: %d",
1316  (int) key->strategy);
1317  }
1318 
1319  /*
1320  * part_index < 0 means we failed to find a partition of this parent. Use
1321  * the default partition, if there is one.
1322  */
1323  if (part_index < 0)
1324  part_index = boundinfo->default_index;
1325 
1326  return part_index;
1327 }
1328 
1329 /*
1330  * ExecBuildSlotPartitionKeyDescription
1331  *
1332  * This works very much like BuildIndexValueDescription() and is currently
1333  * used for building error messages when ExecFindPartition() fails to find
1334  * partition for a row.
1335  */
1336 static char *
1338  Datum *values,
1339  bool *isnull,
1340  int maxfieldlen)
1341 {
1344  int partnatts = get_partition_natts(key);
1345  int i;
1346  Oid relid = RelationGetRelid(rel);
1347  AclResult aclresult;
1348 
1349  if (check_enable_rls(relid, InvalidOid, true) == RLS_ENABLED)
1350  return NULL;
1351 
1352  /* If the user has table-level access, just go build the description. */
1353  aclresult = pg_class_aclcheck(relid, GetUserId(), ACL_SELECT);
1354  if (aclresult != ACLCHECK_OK)
1355  {
1356  /*
1357  * Step through the columns of the partition key and make sure the
1358  * user has SELECT rights on all of them.
1359  */
1360  for (i = 0; i < partnatts; i++)
1361  {
1363 
1364  /*
1365  * If this partition key column is an expression, we return no
1366  * detail rather than try to figure out what column(s) the
1367  * expression includes and if the user has SELECT rights on them.
1368  */
1369  if (attnum == InvalidAttrNumber ||
1370  pg_attribute_aclcheck(relid, attnum, GetUserId(),
1371  ACL_SELECT) != ACLCHECK_OK)
1372  return NULL;
1373  }
1374  }
1375 
1376  initStringInfo(&buf);
1377  appendStringInfo(&buf, "(%s) = (",
1378  pg_get_partkeydef_columns(relid, true));
1379 
1380  for (i = 0; i < partnatts; i++)
1381  {
1382  char *val;
1383  int vallen;
1384 
1385  if (isnull[i])
1386  val = "null";
1387  else
1388  {
1389  Oid foutoid;
1390  bool typisvarlena;
1391 
1393  &foutoid, &typisvarlena);
1394  val = OidOutputFunctionCall(foutoid, values[i]);
1395  }
1396 
1397  if (i > 0)
1398  appendStringInfoString(&buf, ", ");
1399 
1400  /* truncate if needed */
1401  vallen = strlen(val);
1402  if (vallen <= maxfieldlen)
1403  appendBinaryStringInfo(&buf, val, vallen);
1404  else
1405  {
1406  vallen = pg_mbcliplen(val, vallen, maxfieldlen);
1407  appendBinaryStringInfo(&buf, val, vallen);
1408  appendStringInfoString(&buf, "...");
1409  }
1410  }
1411 
1412  appendStringInfoChar(&buf, ')');
1413 
1414  return buf.data;
1415 }
1416 
1417 /*
1418  * adjust_partition_tlist
1419  * Adjust the targetlist entries for a given partition to account for
1420  * attribute differences between parent and the partition
1421  *
1422  * The expressions have already been fixed, but here we fix the list to make
1423  * target resnos match the partition's attribute numbers. This results in a
1424  * copy of the original target list in which the entries appear in resno
1425  * order, including both the existing entries (that may have their resno
1426  * changed in-place) and the newly added entries for columns that don't exist
1427  * in the parent.
1428  *
1429  * Scribbles on the input tlist, so callers must make sure to make a copy
1430  * before passing it to us.
1431  */
1432 static List *
1434 {
1435  List *new_tlist = NIL;
1436  TupleDesc tupdesc = map->outdesc;
1437  AttrNumber *attrMap = map->attrMap;
1438  AttrNumber attrno;
1439 
1440  for (attrno = 1; attrno <= tupdesc->natts; attrno++)
1441  {
1442  Form_pg_attribute att_tup = TupleDescAttr(tupdesc, attrno - 1);
1443  TargetEntry *tle;
1444 
1445  if (attrMap[attrno - 1] != InvalidAttrNumber)
1446  {
1447  Assert(!att_tup->attisdropped);
1448 
1449  /*
1450  * Use the corresponding entry from the parent's tlist, adjusting
1451  * the resno the match the partition's attno.
1452  */
1453  tle = (TargetEntry *) list_nth(tlist, attrMap[attrno - 1] - 1);
1454  tle->resno = attrno;
1455  }
1456  else
1457  {
1458  Const *expr;
1459 
1460  /*
1461  * For a dropped attribute in the partition, generate a dummy
1462  * entry with resno matching the partition's attno.
1463  */
1464  Assert(att_tup->attisdropped);
1465  expr = makeConst(INT4OID,
1466  -1,
1467  InvalidOid,
1468  sizeof(int32),
1469  (Datum) 0,
1470  true, /* isnull */
1471  true /* byval */ );
1472  tle = makeTargetEntry((Expr *) expr,
1473  attrno,
1474  pstrdup(NameStr(att_tup->attname)),
1475  false);
1476  }
1477 
1478  new_tlist = lappend(new_tlist, tle);
1479  }
1480 
1481  return new_tlist;
1482 }
1483 
1484 /*-------------------------------------------------------------------------
1485  * Run-Time Partition Pruning Support.
1486  *
1487  * The following series of functions exist to support the removal of unneeded
1488  * subplans for queries against partitioned tables. The supporting functions
1489  * here are designed to work with any plan type which supports an arbitrary
1490  * number of subplans, e.g. Append, MergeAppend.
1491  *
1492  * When pruning involves comparison of a partition key to a constant, it's
1493  * done by the planner. However, if we have a comparison to a non-constant
1494  * but not volatile expression, that presents an opportunity for run-time
1495  * pruning by the executor, allowing irrelevant partitions to be skipped
1496  * dynamically.
1497  *
1498  * We must distinguish expressions containing PARAM_EXEC Params from
1499  * expressions that don't contain those. Even though a PARAM_EXEC Param is
1500  * considered to be a stable expression, it can change value from one plan
1501  * node scan to the next during query execution. Stable comparison
1502  * expressions that don't involve such Params allow partition pruning to be
1503  * done once during executor startup. Expressions that do involve such Params
1504  * require us to prune separately for each scan of the parent plan node.
1505  *
1506  * Note that pruning away unneeded subplans during executor startup has the
1507  * added benefit of not having to initialize the unneeded subplans at all.
1508  *
1509  *
1510  * Functions:
1511  *
1512  * ExecCreatePartitionPruneState:
1513  * Creates the PartitionPruneState required by each of the two pruning
1514  * functions. Details stored include how to map the partition index
1515  * returned by the partition pruning code into subplan indexes.
1516  *
1517  * ExecFindInitialMatchingSubPlans:
1518  * Returns indexes of matching subplans. Partition pruning is attempted
1519  * without any evaluation of expressions containing PARAM_EXEC Params.
1520  * This function must be called during executor startup for the parent
1521  * plan before the subplans themselves are initialized. Subplans which
1522  * are found not to match by this function must be removed from the
1523  * plan's list of subplans during execution, as this function performs a
1524  * remap of the partition index to subplan index map and the newly
1525  * created map provides indexes only for subplans which remain after
1526  * calling this function.
1527  *
1528  * ExecFindMatchingSubPlans:
1529  * Returns indexes of matching subplans after evaluating all available
1530  * expressions. This function can only be called during execution and
1531  * must be called again each time the value of a Param listed in
1532  * PartitionPruneState's 'execparamids' changes.
1533  *-------------------------------------------------------------------------
1534  */
1535 
1536 /*
1537  * ExecCreatePartitionPruneState
1538  * Build the data structure required for calling
1539  * ExecFindInitialMatchingSubPlans and ExecFindMatchingSubPlans.
1540  *
1541  * 'planstate' is the parent plan node's execution state.
1542  *
1543  * 'partitionpruneinfo' is a PartitionPruneInfo as generated by
1544  * make_partition_pruneinfo. Here we build a PartitionPruneState containing a
1545  * PartitionPruningData for each partitioning hierarchy (i.e., each sublist of
1546  * partitionpruneinfo->prune_infos), each of which contains a
1547  * PartitionedRelPruningData for each PartitionedRelPruneInfo appearing in
1548  * that sublist. This two-level system is needed to keep from confusing the
1549  * different hierarchies when a UNION ALL contains multiple partitioned tables
1550  * as children. The data stored in each PartitionedRelPruningData can be
1551  * re-used each time we re-evaluate which partitions match the pruning steps
1552  * provided in each PartitionedRelPruneInfo.
1553  */
1556  PartitionPruneInfo *partitionpruneinfo)
1557 {
1558  EState *estate = planstate->state;
1559  PartitionPruneState *prunestate;
1560  int n_part_hierarchies;
1561  ListCell *lc;
1562  int i;
1563 
1564  if (estate->es_partition_directory == NULL)
1565  estate->es_partition_directory =
1567 
1568  n_part_hierarchies = list_length(partitionpruneinfo->prune_infos);
1569  Assert(n_part_hierarchies > 0);
1570 
1571  /*
1572  * Allocate the data structure
1573  */
1574  prunestate = (PartitionPruneState *)
1575  palloc(offsetof(PartitionPruneState, partprunedata) +
1576  sizeof(PartitionPruningData *) * n_part_hierarchies);
1577 
1578  prunestate->execparamids = NULL;
1579  /* other_subplans can change at runtime, so we need our own copy */
1580  prunestate->other_subplans = bms_copy(partitionpruneinfo->other_subplans);
1581  prunestate->do_initial_prune = false; /* may be set below */
1582  prunestate->do_exec_prune = false; /* may be set below */
1583  prunestate->num_partprunedata = n_part_hierarchies;
1584 
1585  /*
1586  * Create a short-term memory context which we'll use when making calls to
1587  * the partition pruning functions. This avoids possible memory leaks,
1588  * since the pruning functions call comparison functions that aren't under
1589  * our control.
1590  */
1591  prunestate->prune_context =
1593  "Partition Prune",
1595 
1596  i = 0;
1597  foreach(lc, partitionpruneinfo->prune_infos)
1598  {
1599  List *partrelpruneinfos = lfirst_node(List, lc);
1600  int npartrelpruneinfos = list_length(partrelpruneinfos);
1601  PartitionPruningData *prunedata;
1602  ListCell *lc2;
1603  int j;
1604 
1605  prunedata = (PartitionPruningData *)
1606  palloc(offsetof(PartitionPruningData, partrelprunedata) +
1607  npartrelpruneinfos * sizeof(PartitionedRelPruningData));
1608  prunestate->partprunedata[i] = prunedata;
1609  prunedata->num_partrelprunedata = npartrelpruneinfos;
1610 
1611  j = 0;
1612  foreach(lc2, partrelpruneinfos)
1613  {
1615  PartitionedRelPruningData *pprune = &prunedata->partrelprunedata[j];
1616  Relation partrel;
1617  PartitionDesc partdesc;
1618  PartitionKey partkey;
1619 
1620  /*
1621  * We can rely on the copies of the partitioned table's partition
1622  * key and partition descriptor appearing in its relcache entry,
1623  * because that entry will be held open and locked for the
1624  * duration of this executor run.
1625  */
1626  partrel = ExecGetRangeTableRelation(estate, pinfo->rtindex);
1627  partkey = RelationGetPartitionKey(partrel);
1629  partrel);
1630 
1631  /*
1632  * Initialize the subplan_map and subpart_map. Since detaching a
1633  * partition requires AccessExclusiveLock, no partitions can have
1634  * disappeared, nor can the bounds for any partition have changed.
1635  * However, new partitions may have been added.
1636  */
1637  Assert(partdesc->nparts >= pinfo->nparts);
1638  pprune->nparts = partdesc->nparts;
1639  pprune->subplan_map = palloc(sizeof(int) * partdesc->nparts);
1640  if (partdesc->nparts == pinfo->nparts)
1641  {
1642  /*
1643  * There are no new partitions, so this is simple. We can
1644  * simply point to the subpart_map from the plan, but we must
1645  * copy the subplan_map since we may change it later.
1646  */
1647  pprune->subpart_map = pinfo->subpart_map;
1648  memcpy(pprune->subplan_map, pinfo->subplan_map,
1649  sizeof(int) * pinfo->nparts);
1650 
1651  /*
1652  * Double-check that the list of unpruned relations has not
1653  * changed. (Pruned partitions are not in relid_map[].)
1654  */
1655 #ifdef USE_ASSERT_CHECKING
1656  for (int k = 0; k < pinfo->nparts; k++)
1657  {
1658  Assert(partdesc->oids[k] == pinfo->relid_map[k] ||
1659  pinfo->subplan_map[k] == -1);
1660  }
1661 #endif
1662  }
1663  else
1664  {
1665  int pd_idx = 0;
1666  int pp_idx;
1667 
1668  /*
1669  * Some new partitions have appeared since plan time, and
1670  * those are reflected in our PartitionDesc but were not
1671  * present in the one used to construct subplan_map and
1672  * subpart_map. So we must construct new and longer arrays
1673  * where the partitions that were originally present map to
1674  * the same place, and any added indexes map to -1, as if the
1675  * new partitions had been pruned.
1676  */
1677  pprune->subpart_map = palloc(sizeof(int) * partdesc->nparts);
1678  for (pp_idx = 0; pp_idx < partdesc->nparts; ++pp_idx)
1679  {
1680  if (pinfo->relid_map[pd_idx] != partdesc->oids[pp_idx])
1681  {
1682  pprune->subplan_map[pp_idx] = -1;
1683  pprune->subpart_map[pp_idx] = -1;
1684  }
1685  else
1686  {
1687  pprune->subplan_map[pp_idx] =
1688  pinfo->subplan_map[pd_idx];
1689  pprune->subpart_map[pp_idx] =
1690  pinfo->subpart_map[pd_idx++];
1691  }
1692  }
1693  Assert(pd_idx == pinfo->nparts);
1694  }
1695 
1696  /* present_parts is also subject to later modification */
1697  pprune->present_parts = bms_copy(pinfo->present_parts);
1698 
1699  /*
1700  * Initialize pruning contexts as needed.
1701  */
1703  if (pinfo->initial_pruning_steps)
1704  {
1706  pinfo->initial_pruning_steps,
1707  partdesc, partkey, planstate);
1708  /* Record whether initial pruning is needed at any level */
1709  prunestate->do_initial_prune = true;
1710  }
1711  pprune->exec_pruning_steps = pinfo->exec_pruning_steps;
1712  if (pinfo->exec_pruning_steps)
1713  {
1715  pinfo->exec_pruning_steps,
1716  partdesc, partkey, planstate);
1717  /* Record whether exec pruning is needed at any level */
1718  prunestate->do_exec_prune = true;
1719  }
1720 
1721  /*
1722  * Accumulate the IDs of all PARAM_EXEC Params affecting the
1723  * partitioning decisions at this plan node.
1724  */
1725  prunestate->execparamids = bms_add_members(prunestate->execparamids,
1726  pinfo->execparamids);
1727 
1728  j++;
1729  }
1730  i++;
1731  }
1732 
1733  return prunestate;
1734 }
1735 
1736 /*
1737  * Initialize a PartitionPruneContext for the given list of pruning steps.
1738  */
1739 static void
1741  List *pruning_steps,
1742  PartitionDesc partdesc,
1743  PartitionKey partkey,
1744  PlanState *planstate)
1745 {
1746  int n_steps;
1747  int partnatts;
1748  ListCell *lc;
1749 
1750  n_steps = list_length(pruning_steps);
1751 
1752  context->strategy = partkey->strategy;
1753  context->partnatts = partnatts = partkey->partnatts;
1754  context->nparts = partdesc->nparts;
1755  context->boundinfo = partdesc->boundinfo;
1756  context->partcollation = partkey->partcollation;
1757  context->partsupfunc = partkey->partsupfunc;
1758 
1759  /* We'll look up type-specific support functions as needed */
1760  context->stepcmpfuncs = (FmgrInfo *)
1761  palloc0(sizeof(FmgrInfo) * n_steps * partnatts);
1762 
1763  context->ppccontext = CurrentMemoryContext;
1764  context->planstate = planstate;
1765 
1766  /* Initialize expression state for each expression we need */
1767  context->exprstates = (ExprState **)
1768  palloc0(sizeof(ExprState *) * n_steps * partnatts);
1769  foreach(lc, pruning_steps)
1770  {
1772  ListCell *lc2;
1773  int keyno;
1774 
1775  /* not needed for other step kinds */
1776  if (!IsA(step, PartitionPruneStepOp))
1777  continue;
1778 
1779  Assert(list_length(step->exprs) <= partnatts);
1780 
1781  keyno = 0;
1782  foreach(lc2, step->exprs)
1783  {
1784  Expr *expr = (Expr *) lfirst(lc2);
1785 
1786  /* not needed for Consts */
1787  if (!IsA(expr, Const))
1788  {
1789  int stateidx = PruneCxtStateIdx(partnatts,
1790  step->step.step_id,
1791  keyno);
1792 
1793  context->exprstates[stateidx] =
1794  ExecInitExpr(expr, context->planstate);
1795  }
1796  keyno++;
1797  }
1798  }
1799 }
1800 
1801 /*
1802  * ExecFindInitialMatchingSubPlans
1803  * Identify the set of subplans that cannot be eliminated by initial
1804  * pruning, disregarding any pruning constraints involving PARAM_EXEC
1805  * Params.
1806  *
1807  * If additional pruning passes will be required (because of PARAM_EXEC
1808  * Params), we must also update the translation data that allows conversion
1809  * of partition indexes into subplan indexes to account for the unneeded
1810  * subplans having been removed.
1811  *
1812  * Must only be called once per 'prunestate', and only if initial pruning
1813  * is required.
1814  *
1815  * 'nsubplans' must be passed as the total number of unpruned subplans.
1816  */
1817 Bitmapset *
1819 {
1820  Bitmapset *result = NULL;
1821  MemoryContext oldcontext;
1822  int i;
1823 
1824  /* Caller error if we get here without do_initial_prune */
1825  Assert(prunestate->do_initial_prune);
1826 
1827  /*
1828  * Switch to a temp context to avoid leaking memory in the executor's
1829  * query-lifespan memory context.
1830  */
1831  oldcontext = MemoryContextSwitchTo(prunestate->prune_context);
1832 
1833  /*
1834  * For each hierarchy, do the pruning tests, and add nondeletable
1835  * subplans' indexes to "result".
1836  */
1837  for (i = 0; i < prunestate->num_partprunedata; i++)
1838  {
1839  PartitionPruningData *prunedata;
1840  PartitionedRelPruningData *pprune;
1841 
1842  prunedata = prunestate->partprunedata[i];
1843  pprune = &prunedata->partrelprunedata[0];
1844 
1845  /* Perform pruning without using PARAM_EXEC Params */
1846  find_matching_subplans_recurse(prunedata, pprune, true, &result);
1847 
1848  /* Expression eval may have used space in node's ps_ExprContext too */
1849  if (pprune->initial_pruning_steps)
1851  }
1852 
1853  /* Add in any subplans that partition pruning didn't account for */
1854  result = bms_add_members(result, prunestate->other_subplans);
1855 
1856  MemoryContextSwitchTo(oldcontext);
1857 
1858  /* Copy result out of the temp context before we reset it */
1859  result = bms_copy(result);
1860 
1861  MemoryContextReset(prunestate->prune_context);
1862 
1863  /*
1864  * If exec-time pruning is required and we pruned subplans above, then we
1865  * must re-sequence the subplan indexes so that ExecFindMatchingSubPlans
1866  * properly returns the indexes from the subplans which will remain after
1867  * execution of this function.
1868  *
1869  * We can safely skip this when !do_exec_prune, even though that leaves
1870  * invalid data in prunestate, because that data won't be consulted again
1871  * (cf initial Assert in ExecFindMatchingSubPlans).
1872  */
1873  if (prunestate->do_exec_prune && bms_num_members(result) < nsubplans)
1874  {
1875  int *new_subplan_indexes;
1876  Bitmapset *new_other_subplans;
1877  int i;
1878  int newidx;
1879 
1880  /*
1881  * First we must build a temporary array which maps old subplan
1882  * indexes to new ones. For convenience of initialization, we use
1883  * 1-based indexes in this array and leave pruned items as 0.
1884  */
1885  new_subplan_indexes = (int *) palloc0(sizeof(int) * nsubplans);
1886  newidx = 1;
1887  i = -1;
1888  while ((i = bms_next_member(result, i)) >= 0)
1889  {
1890  Assert(i < nsubplans);
1891  new_subplan_indexes[i] = newidx++;
1892  }
1893 
1894  /*
1895  * Now we can update each PartitionedRelPruneInfo's subplan_map with
1896  * new subplan indexes. We must also recompute its present_parts
1897  * bitmap.
1898  */
1899  for (i = 0; i < prunestate->num_partprunedata; i++)
1900  {
1901  PartitionPruningData *prunedata = prunestate->partprunedata[i];
1902  int j;
1903 
1904  /*
1905  * Within each hierarchy, we perform this loop in back-to-front
1906  * order so that we determine present_parts for the lowest-level
1907  * partitioned tables first. This way we can tell whether a
1908  * sub-partitioned table's partitions were entirely pruned so we
1909  * can exclude it from the current level's present_parts.
1910  */
1911  for (j = prunedata->num_partrelprunedata - 1; j >= 0; j--)
1912  {
1913  PartitionedRelPruningData *pprune = &prunedata->partrelprunedata[j];
1914  int nparts = pprune->nparts;
1915  int k;
1916 
1917  /* We just rebuild present_parts from scratch */
1918  bms_free(pprune->present_parts);
1919  pprune->present_parts = NULL;
1920 
1921  for (k = 0; k < nparts; k++)
1922  {
1923  int oldidx = pprune->subplan_map[k];
1924  int subidx;
1925 
1926  /*
1927  * If this partition existed as a subplan then change the
1928  * old subplan index to the new subplan index. The new
1929  * index may become -1 if the partition was pruned above,
1930  * or it may just come earlier in the subplan list due to
1931  * some subplans being removed earlier in the list. If
1932  * it's a subpartition, add it to present_parts unless
1933  * it's entirely pruned.
1934  */
1935  if (oldidx >= 0)
1936  {
1937  Assert(oldidx < nsubplans);
1938  pprune->subplan_map[k] = new_subplan_indexes[oldidx] - 1;
1939 
1940  if (new_subplan_indexes[oldidx] > 0)
1941  pprune->present_parts =
1942  bms_add_member(pprune->present_parts, k);
1943  }
1944  else if ((subidx = pprune->subpart_map[k]) >= 0)
1945  {
1946  PartitionedRelPruningData *subprune;
1947 
1948  subprune = &prunedata->partrelprunedata[subidx];
1949 
1950  if (!bms_is_empty(subprune->present_parts))
1951  pprune->present_parts =
1952  bms_add_member(pprune->present_parts, k);
1953  }
1954  }
1955  }
1956  }
1957 
1958  /*
1959  * We must also recompute the other_subplans set, since indexes in it
1960  * may change.
1961  */
1962  new_other_subplans = NULL;
1963  i = -1;
1964  while ((i = bms_next_member(prunestate->other_subplans, i)) >= 0)
1965  new_other_subplans = bms_add_member(new_other_subplans,
1966  new_subplan_indexes[i] - 1);
1967 
1968  bms_free(prunestate->other_subplans);
1969  prunestate->other_subplans = new_other_subplans;
1970 
1971  pfree(new_subplan_indexes);
1972  }
1973 
1974  return result;
1975 }
1976 
1977 /*
1978  * ExecFindMatchingSubPlans
1979  * Determine which subplans match the pruning steps detailed in
1980  * 'prunestate' for the current comparison expression values.
1981  *
1982  * Here we assume we may evaluate PARAM_EXEC Params.
1983  */
1984 Bitmapset *
1986 {
1987  Bitmapset *result = NULL;
1988  MemoryContext oldcontext;
1989  int i;
1990 
1991  /*
1992  * If !do_exec_prune, we've got problems because
1993  * ExecFindInitialMatchingSubPlans will not have bothered to update
1994  * prunestate for whatever pruning it did.
1995  */
1996  Assert(prunestate->do_exec_prune);
1997 
1998  /*
1999  * Switch to a temp context to avoid leaking memory in the executor's
2000  * query-lifespan memory context.
2001  */
2002  oldcontext = MemoryContextSwitchTo(prunestate->prune_context);
2003 
2004  /*
2005  * For each hierarchy, do the pruning tests, and add nondeletable
2006  * subplans' indexes to "result".
2007  */
2008  for (i = 0; i < prunestate->num_partprunedata; i++)
2009  {
2010  PartitionPruningData *prunedata;
2011  PartitionedRelPruningData *pprune;
2012 
2013  prunedata = prunestate->partprunedata[i];
2014  pprune = &prunedata->partrelprunedata[0];
2015 
2016  find_matching_subplans_recurse(prunedata, pprune, false, &result);
2017 
2018  /* Expression eval may have used space in node's ps_ExprContext too */
2019  if (pprune->exec_pruning_steps)
2021  }
2022 
2023  /* Add in any subplans that partition pruning didn't account for */
2024  result = bms_add_members(result, prunestate->other_subplans);
2025 
2026  MemoryContextSwitchTo(oldcontext);
2027 
2028  /* Copy result out of the temp context before we reset it */
2029  result = bms_copy(result);
2030 
2031  MemoryContextReset(prunestate->prune_context);
2032 
2033  return result;
2034 }
2035 
2036 /*
2037  * find_matching_subplans_recurse
2038  * Recursive worker function for ExecFindMatchingSubPlans and
2039  * ExecFindInitialMatchingSubPlans
2040  *
2041  * Adds valid (non-prunable) subplan IDs to *validsubplans
2042  */
2043 static void
2045  PartitionedRelPruningData *pprune,
2046  bool initial_prune,
2047  Bitmapset **validsubplans)
2048 {
2049  Bitmapset *partset;
2050  int i;
2051 
2052  /* Guard against stack overflow due to overly deep partition hierarchy. */
2054 
2055  /* Only prune if pruning would be useful at this level. */
2056  if (initial_prune && pprune->initial_pruning_steps)
2057  {
2058  partset = get_matching_partitions(&pprune->initial_context,
2059  pprune->initial_pruning_steps);
2060  }
2061  else if (!initial_prune && pprune->exec_pruning_steps)
2062  {
2063  partset = get_matching_partitions(&pprune->exec_context,
2064  pprune->exec_pruning_steps);
2065  }
2066  else
2067  {
2068  /*
2069  * If no pruning is to be done, just include all partitions at this
2070  * level.
2071  */
2072  partset = pprune->present_parts;
2073  }
2074 
2075  /* Translate partset into subplan indexes */
2076  i = -1;
2077  while ((i = bms_next_member(partset, i)) >= 0)
2078  {
2079  if (pprune->subplan_map[i] >= 0)
2080  *validsubplans = bms_add_member(*validsubplans,
2081  pprune->subplan_map[i]);
2082  else
2083  {
2084  int partidx = pprune->subpart_map[i];
2085 
2086  if (partidx >= 0)
2088  &prunedata->partrelprunedata[partidx],
2089  initial_prune, validsubplans);
2090  else
2091  {
2092  /*
2093  * We get here if the planner already pruned all the sub-
2094  * partitions for this partition. Silently ignore this
2095  * partition in this case. The end result is the same: we
2096  * would have pruned all partitions just the same, but we
2097  * don't have any pruning steps to execute to verify this.
2098  */
2099  }
2100  }
2101  }
2102 }
TupleTableSlot * table_slot_create(Relation relation, List **reglist)
Definition: tableam.c:77
AttrNumber * attrMap
Definition: tupconvert.h:26
#define NIL
Definition: pg_list.h:65
static int get_partition_for_tuple(PartitionDispatch pd, Datum *values, bool *isnull)
Definition: fmgr.h:56
struct TransitionCaptureState * mt_oc_transition_capture
Definition: execnodes.h:1189
struct PartitionDispatchData PartitionDispatchData
FmgrInfo * partsupfunc
Definition: partprune.h:55
void InitResultRelInfo(ResultRelInfo *resultRelInfo, Relation resultRelationDesc, Index resultRelationIndex, Relation partition_root, int instrument_options)
Definition: execMain.c:1277
Relation ri_RelationDesc
Definition: execnodes.h:410
Bitmapset * execparamids
Definition: plannodes.h:1141
#define IsA(nodeptr, _type_)
Definition: nodes.h:576
MemoryContext prune_context
#define AllocSetContextCreate
Definition: memutils.h:170
PartitionDesc partdesc
static Datum ExecEvalExprSwitchContext(ExprState *state, ExprContext *econtext, bool *isNull)
Definition: executor.h:300
char * pg_get_partkeydef_columns(Oid relid, bool pretty)
Definition: ruleutils.c:1635
TupleDesc outdesc
Definition: tupconvert.h:25
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:133
#define likely(x)
Definition: c.h:207
void getTypeOutputInfo(Oid type, Oid *typOutput, bool *typIsVarlena)
Definition: lsyscache.c:2674
TupleTableSlot * ExecInitExtraTupleSlot(EState *estate, TupleDesc tupledesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1801
static void ExecInitPruningContext(PartitionPruneContext *context, List *pruning_steps, PartitionDesc partdesc, PartitionKey partkey, PlanState *planstate)
Bitmapset * bms_copy(const Bitmapset *a)
Definition: bitmapset.c:74
struct SubplanResultRelHashElem SubplanResultRelHashElem
struct CopyMultiInsertBuffer * ri_CopyMultiInsertBuffer
Definition: execnodes.h:487
#define HASH_CONTEXT
Definition: hsearch.h:93
#define HASH_ELEM
Definition: hsearch.h:87
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:425
AclResult pg_attribute_aclcheck(Oid table_oid, AttrNumber attnum, Oid roleid, AclMode mode)
Definition: aclchk.c:4515
static ListCell * lnext(const List *l, const ListCell *c)
Definition: pg_list.h:321
MemoryContext hcxt
Definition: hsearch.h:78
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:3011
#define RelationGetDescr(relation)
Definition: rel.h:448
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1208
Oid GetUserId(void)
Definition: miscinit.c:380
FmgrInfo * stepcmpfuncs
Definition: partprune.h:56
List * withCheckOptionLists
Definition: plannodes.h:228
FmgrInfo * partsupfunc
Definition: partcache.h:35
#define castNode(_type_, nodeptr)
Definition: nodes.h:594
BeginForeignInsert_function BeginForeignInsert
Definition: fdwapi.h:215
ResultRelInfo * resultRelInfo
Definition: execnodes.h:1168
#define TupleDescAttr(tupdesc, i)
Definition: tupdesc.h:92
struct PartitionRoutingInfo * ri_PartitionInfo
Definition: execnodes.h:484
char * pstrdup(const char *in)
Definition: mcxt.c:1186
#define RelationGetForm(relation)
Definition: rel.h:416
Relation ri_PartitionRoot
Definition: execnodes.h:481
ExprContext * ps_ExprContext
Definition: execnodes.h:978
MemoryContext ppccontext
Definition: partprune.h:57
static Oid get_partition_col_typid(PartitionKey key, int col)
Definition: partcache.h:84
PartitionPruningData * partprunedata[FLEXIBLE_ARRAY_MEMBER]
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1043
const TupleTableSlotOps TTSOpsVirtual
Definition: execTuples.c:83
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
PartitionDirectory CreatePartitionDirectory(MemoryContext mcxt)
Definition: partdesc.c:242
Size entrysize
Definition: hsearch.h:73
Definition: nodes.h:525
static int get_partition_natts(PartitionKey key)
Definition: partcache.h:63
int errcode(int sqlerrcode)
Definition: elog.c:608
#define PARTITION_MAX_KEYS
TupleTableSlot * execute_attr_map_slot(AttrNumber *attrMap, TupleTableSlot *in_slot, TupleTableSlot *out_slot)
Definition: tupconvert.c:425
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:136
bool * is_leaf
Definition: partdesc.h:26
List * partexprs
Definition: partcache.h:30
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:906
EState * state
Definition: execnodes.h:941
unsigned int Oid
Definition: postgres_ext.h:31
Const * makeConst(Oid consttype, int32 consttypmod, Oid constcollid, int constlen, Datum constvalue, bool constisnull, bool constbyval)
Definition: makefuncs.c:297
List * lappend_oid(List *list, Oid datum)
Definition: list.c:358
#define OidIsValid(objectId)
Definition: c.h:645
List * plans
Definition: plannodes.h:227
ResultRelInfo ** partitions
Definition: execPartition.c:94
ExprState * ExecInitQual(List *qual, PlanState *parent)
Definition: execExpr.c:207
Index ri_RangeTableIndex
Definition: execnodes.h:407
signed int int32
Definition: c.h:347
List * onConflictSet
Definition: plannodes.h:236
PartitionBoundInfo boundinfo
Definition: partdesc.h:29
Index rootRelation
Definition: plannodes.h:222
void ExecOpenIndices(ResultRelInfo *resultRelInfo, bool speculative)
Definition: execIndexing.c:151
static void ExecHashSubPlanResultRelsByOid(ModifyTableState *mtstate, PartitionTupleRouting *proute)
#define GetPerTupleExprContext(estate)
Definition: executor.h:501
Definition: dynahash.c:208
int partition_range_datum_bsearch(FmgrInfo *partsupfunc, Oid *partcollation, PartitionBoundInfo boundinfo, int nvalues, Datum *values, bool *is_equal)
Definition: partbounds.c:1665
TupleTableSlot * ps_ResultTupleSlot
Definition: execnodes.h:977
TupleConversionMap * pi_RootToPartitionMap
Definition: execPartition.h:37
void pfree(void *pointer)
Definition: mcxt.c:1056
MemoryContext es_query_cxt
Definition: execnodes.h:549
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:91
#define linitial(l)
Definition: pg_list.h:195
#define ERROR
Definition: elog.h:43
static void find_matching_subplans_recurse(PartitionPruningData *prunedata, PartitionedRelPruningData *pprune, bool initial_prune, Bitmapset **validsubplans)
PlanState ps
Definition: execnodes.h:1159
ProjectionInfo * oc_ProjInfo
Definition: execnodes.h:384
void ExecCleanupTupleRouting(ModifyTableState *mtstate, PartitionTupleRouting *proute)
static void * list_nth(const List *list, int n)
Definition: pg_list.h:277
ExprState ** exprstates
Definition: partprune.h:59
int pg_mbcliplen(const char *mbstr, int len, int limit)
Definition: mbutils.c:862
static void FormPartitionKeyDatum(PartitionDispatch pd, TupleTableSlot *slot, EState *estate, Datum *values, bool *isnull)
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:192
void CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation)
Definition: execMain.c:1076
#define lfirst_node(type, lc)
Definition: pg_list.h:193
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:646
void appendStringInfoString(StringInfo str, const char *s)
Definition: stringinfo.c:176
TupleConversionMap * convert_tuples_by_name(TupleDesc indesc, TupleDesc outdesc)
Definition: tupconvert.c:205
int get_hash_partition_greatest_modulus(PartitionBoundInfo bound)
Definition: partbounds.c:1388
Bitmapset * ExecFindInitialMatchingSubPlans(PartitionPruneState *prunestate, int nsubplans)
struct TransitionCaptureState * mt_transition_capture
Definition: execnodes.h:1186
#define NoLock
Definition: lockdefs.h:34
static char * buf
Definition: pg_test_fsync.c:67
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
Definition: execTuples.c:1224
void check_stack_depth(void)
Definition: postgres.c:3284
#define RowExclusiveLock
Definition: lockdefs.h:38
int errdetail(const char *fmt,...)
Definition: elog.c:955
AttrNumber resno
Definition: primnodes.h:1394
#define RelationGetRelationName(relation)
Definition: rel.h:456
static ListCell * list_head(const List *l)
Definition: pg_list.h:125
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:200
struct FdwRoutine * ri_FdwRoutine
Definition: execnodes.h:439
Bitmapset * present_parts
Definition: plannodes.h:1126
PartitionDispatch * partition_dispatch_info
Definition: execPartition.c:91
MemoryContext CurrentMemoryContext
Definition: mcxt.c:38
Bitmapset * execparamids
int es_instrument
Definition: execnodes.h:556
ExprState * oc_WhereClause
Definition: execnodes.h:385
PartitionPruneStep step
Definition: plannodes.h:1186
PartitionTupleRouting * ExecSetupPartitionTupleRouting(EState *estate, ModifyTableState *mtstate, Relation rel)
#define ereport(elevel, rest)
Definition: elog.h:141
Node * map_variable_attnos(Node *node, int target_varno, int sublevels_up, const AttrNumber *attno_map, int map_length, Oid to_rowtype, bool *found_whole_row)
TargetEntry * makeTargetEntry(Expr *expr, AttrNumber resno, char *resname, bool resjunk)
Definition: makefuncs.c:236
List * ExecPrepareExprList(List *nodes, EState *estate)
Definition: execExpr.c:564
PartitionPruneContext exec_context
Definition: execPartition.h:84
List * lappend(List *list, void *datum)
Definition: list.c:322
bool bms_is_empty(const Bitmapset *a)
Definition: bitmapset.c:701
void appendStringInfoChar(StringInfo str, char ch)
Definition: stringinfo.c:188
void initStringInfo(StringInfo str)
Definition: stringinfo.c:59
AttrNumber * convert_tuples_by_name_map_if_req(TupleDesc indesc, TupleDesc outdesc)
Definition: tupconvert.c:327
OnConflictSetState * ri_onConflict
Definition: execnodes.h:472
static int16 get_partition_col_attnum(PartitionKey key, int col)
Definition: partcache.h:78
#define HASH_BLOBS
Definition: hsearch.h:88
Oid * partcollation
Definition: partcache.h:38
List * es_tupleTable
Definition: execnodes.h:551
void * palloc0(Size size)
Definition: mcxt.c:980
AclResult
Definition: acl.h:177
static PartitionDispatch ExecInitPartitionDispatchInfo(EState *estate, PartitionTupleRouting *proute, Oid partoid, PartitionDispatch parent_pd, int partidx)
AttrNumber * partattrs
Definition: partcache.h:28
HTAB * hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
Definition: dynahash.c:316
uintptr_t Datum
Definition: postgres.h:367
#define ACL_SELECT
Definition: parsenodes.h:75
TupleTableSlot * tupslot
Size keysize
Definition: hsearch.h:72
TupleConversionMap * pi_PartitionToRootMap
Definition: execPartition.h:43
List * ri_PartitionCheck
Definition: execnodes.h:475
#define PARTITION_STRATEGY_HASH
Definition: parsenodes.h:798
#define partition_bound_accepts_nulls(bi)
Definition: partbounds.h:74
Plan * plan
Definition: execnodes.h:939
int partition_list_bsearch(FmgrInfo *partsupfunc, Oid *partcollation, PartitionBoundInfo boundinfo, Datum value, bool *is_equal)
Definition: partbounds.c:1576
uint64 compute_partition_hash_value(int partnatts, FmgrInfo *partsupfunc, Oid *partcollation, Datum *values, bool *isnull)
Definition: partbounds.c:2739
#define InvalidOid
Definition: postgres_ext.h:36
static Datum slot_getattr(TupleTableSlot *slot, int attnum, bool *isnull)
Definition: tuptable.h:381
List * es_tuple_routing_result_relations
Definition: execnodes.h:537
int16 attnum
Definition: pg_attribute.h:79
TupleTableSlot * oc_ProjSlot
Definition: execnodes.h:383
#define INNER_VAR
Definition: primnodes.h:157
int check_enable_rls(Oid relid, Oid checkAsUser, bool noError)
Definition: rls.c:52
Relation ExecGetRangeTableRelation(EState *estate, Index rti)
Definition: execUtils.c:754
static ResultRelInfo * ExecInitPartitionInfo(ModifyTableState *mtstate, EState *estate, PartitionTupleRouting *proute, PartitionDispatch dispatch, ResultRelInfo *rootResultRelInfo, int partidx)
void bms_free(Bitmapset *a)
Definition: bitmapset.c:208
#define makeNode(_type_)
Definition: nodes.h:573
bool list_member_oid(const List *list, Oid datum)
Definition: list.c:675
Bitmapset * other_subplans
Definition: plannodes.h:1102
#define Assert(condition)
Definition: c.h:739
#define lfirst(lc)
Definition: pg_list.h:190
OnConflictAction onConflictAction
Definition: plannodes.h:234
static List * adjust_partition_tlist(List *tlist, TupleConversionMap *map)
static int list_length(const List *l)
Definition: pg_list.h:169
TupleDesc ExecTypeFromTL(List *targetList)
Definition: execTuples.c:1908
TupleTableSlot * ecxt_scantuple
Definition: execnodes.h:223
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:736
PartitionDirectory es_partition_directory
Definition: execnodes.h:531
#define PARTITION_STRATEGY_LIST
Definition: parsenodes.h:799
List * RelationGetIndexList(Relation relation)
Definition: relcache.c:4347
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1069
#define RelationGetPartitionKey(relation)
Definition: rel.h:603
PartitionedRelPruningData partrelprunedata[FLEXIBLE_ARRAY_MEMBER]
Definition: execPartition.h:97
#define InvalidAttrNumber
Definition: attnum.h:23
#define GetPerTupleMemoryContext(estate)
Definition: executor.h:506
AclResult pg_class_aclcheck(Oid table_oid, Oid roleid, AclMode mode)
Definition: aclchk.c:4629
static Datum values[MAXATTR]
Definition: bootstrap.c:167
#define PARTITION_STRATEGY_RANGE
Definition: parsenodes.h:800
static char * ExecBuildSlotPartitionKeyDescription(Relation rel, Datum *values, bool *isnull, int maxfieldlen)
char * OidOutputFunctionCall(Oid functionId, Datum val)
Definition: fmgr.c:1655
void * palloc(Size size)
Definition: mcxt.c:949
PartitionBoundInfo boundinfo
Definition: partprune.h:53
ProjectionInfo * ExecBuildProjectionInfo(List *targetList, ExprContext *econtext, TupleTableSlot *slot, PlanState *parent, TupleDesc inputDesc)
Definition: execExpr.c:351
int errmsg(const char *fmt,...)
Definition: elog.c:822
Bitmapset * get_matching_partitions(PartitionPruneContext *context, List *pruning_steps)
Definition: partprune.c:716
CmdType operation
Definition: plannodes.h:219
void list_free(List *list)
Definition: list.c:1377
#define elog(elevel,...)
Definition: elog.h:228
int i
#define NameStr(name)
Definition: c.h:616
bool ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate, bool emitError)
Definition: execMain.c:1783
List * returningLists
Definition: plannodes.h:229
PartitionDesc PartitionDirectoryLookup(PartitionDirectory pdir, Relation rel)
Definition: partdesc.c:274
ExprState * ExecInitExpr(Expr *node, PlanState *parent)
Definition: execExpr.c:121
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
ResultRelInfo * ExecFindPartition(ModifyTableState *mtstate, ResultRelInfo *rootResultRelInfo, PartitionTupleRouting *proute, TupleTableSlot *slot, EState *estate)
PartitionPruneState * ExecCreatePartitionPruneState(PlanState *planstate, PartitionPruneInfo *partitionpruneinfo)
MemoryContext memcxt
Definition: execPartition.c:98
#define copyObject(obj)
Definition: nodes.h:641
#define PruneCxtStateIdx(partnatts, step_id, keyno)
Definition: partprune.h:68
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:39
struct PartitionDispatchData * PartitionDispatch
Definition: execPartition.h:22
AttrNumber * convert_tuples_by_name_map(TupleDesc indesc, TupleDesc outdesc)
Definition: tupconvert.c:245
Definition: pg_list.h:50
List * get_partition_ancestors(Oid relid)
Definition: partition.c:115
PartitionPruneContext initial_context
Definition: execPartition.h:83
int16 AttrNumber
Definition: attnum.h:21
static void ExecInitRoutingInfo(ModifyTableState *mtstate, EState *estate, PartitionTupleRouting *proute, PartitionDispatch dispatch, ResultRelInfo *partRelInfo, int partidx)
#define RelationGetRelid(relation)
Definition: rel.h:422
void appendBinaryStringInfo(StringInfo str, const char *data, int datalen)
Definition: stringinfo.c:227
long val
Definition: informix.c:664
List * ri_onConflictArbiterIndexes
Definition: execnodes.h:469
void ExecCloseIndices(ResultRelInfo *resultRelInfo)
Definition: execIndexing.c:226
#define offsetof(type, field)
Definition: c.h:662
Bitmapset * bms_add_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:793
int indexes[FLEXIBLE_ARRAY_MEMBER]
#define ResetExprContext(econtext)
Definition: executor.h:495
#define lfirst_oid(lc)
Definition: pg_list.h:192
Bitmapset * other_subplans
PlanState * planstate
Definition: partprune.h:58
EndForeignInsert_function EndForeignInsert
Definition: fdwapi.h:216
struct PartitionedRelPruningData PartitionedRelPruningData
Node * onConflictWhere
Definition: plannodes.h:237
TupleTableSlot * pi_PartitionTupleSlot
Definition: execPartition.h:49
Bitmapset * ExecFindMatchingSubPlans(PartitionPruneState *prunestate)