PostgreSQL Source Code git master
Loading...
Searching...
No Matches
execPartition.c File Reference
#include "postgres.h"
#include "access/table.h"
#include "access/tableam.h"
#include "access/tupconvert.h"
#include "catalog/index.h"
#include "catalog/partition.h"
#include "executor/execPartition.h"
#include "executor/executor.h"
#include "executor/nodeModifyTable.h"
#include "foreign/fdwapi.h"
#include "mb/pg_wchar.h"
#include "miscadmin.h"
#include "partitioning/partbounds.h"
#include "partitioning/partdesc.h"
#include "partitioning/partprune.h"
#include "rewrite/rewriteManip.h"
#include "utils/acl.h"
#include "utils/injection_point.h"
#include "utils/lsyscache.h"
#include "utils/partcache.h"
#include "utils/rls.h"
#include "utils/ruleutils.h"
Include dependency graph for execPartition.c:

Go to the source code of this file.

Data Structures

struct  PartitionTupleRouting
 
struct  PartitionDispatchData
 

Macros

#define PARTITION_CACHED_FIND_THRESHOLD   16
 

Typedefs

typedef struct PartitionDispatchData PartitionDispatchData
 

Functions

static ResultRelInfoExecInitPartitionInfo (ModifyTableState *mtstate, EState *estate, PartitionTupleRouting *proute, PartitionDispatch dispatch, ResultRelInfo *rootResultRelInfo, int partidx)
 
static void ExecInitRoutingInfo (ModifyTableState *mtstate, EState *estate, PartitionTupleRouting *proute, PartitionDispatch dispatch, ResultRelInfo *partRelInfo, int partidx, bool is_borrowed_rel)
 
static PartitionDispatch ExecInitPartitionDispatchInfo (EState *estate, PartitionTupleRouting *proute, Oid partoid, PartitionDispatch parent_pd, int partidx, ResultRelInfo *rootResultRelInfo)
 
static void FormPartitionKeyDatum (PartitionDispatch pd, TupleTableSlot *slot, EState *estate, Datum *values, bool *isnull)
 
static int get_partition_for_tuple (PartitionDispatch pd, const Datum *values, const bool *isnull)
 
static charExecBuildSlotPartitionKeyDescription (Relation rel, const Datum *values, const bool *isnull, int maxfieldlen)
 
static Listadjust_partition_colnos (List *colnos, ResultRelInfo *leaf_part_rri)
 
static Listadjust_partition_colnos_using_map (List *colnos, AttrMap *attrMap)
 
static PartitionPruneStateCreatePartitionPruneState (EState *estate, PartitionPruneInfo *pruneinfo, Bitmapset **all_leafpart_rtis)
 
static void InitPartitionPruneContext (PartitionPruneContext *context, List *pruning_steps, PartitionDesc partdesc, PartitionKey partkey, PlanState *planstate, ExprContext *econtext)
 
static void InitExecPartitionPruneContexts (PartitionPruneState *prunestate, PlanState *parent_plan, Bitmapset *initially_valid_subplans, int n_total_subplans)
 
static void find_matching_subplans_recurse (PartitionPruningData *prunedata, PartitionedRelPruningData *pprune, bool initial_prune, Bitmapset **validsubplans, Bitmapset **validsubplan_rtis)
 
PartitionTupleRoutingExecSetupPartitionTupleRouting (EState *estate, Relation rel)
 
ResultRelInfoExecFindPartition (ModifyTableState *mtstate, ResultRelInfo *rootResultRelInfo, PartitionTupleRouting *proute, TupleTableSlot *slot, EState *estate)
 
static bool IsIndexCompatibleAsArbiter (Relation arbiterIndexRelation, IndexInfo *arbiterIndexInfo, Relation indexRelation, IndexInfo *indexInfo)
 
void ExecCleanupTupleRouting (ModifyTableState *mtstate, PartitionTupleRouting *proute)
 
void ExecDoInitialPruning (EState *estate)
 
PartitionPruneStateExecInitPartitionExecPruning (PlanState *planstate, int n_total_subplans, int part_prune_index, Bitmapset *relids, Bitmapset **initially_valid_subplans)
 
BitmapsetExecFindMatchingSubPlans (PartitionPruneState *prunestate, bool initial_prune, Bitmapset **validsubplan_rtis)
 

Macro Definition Documentation

◆ PARTITION_CACHED_FIND_THRESHOLD

#define PARTITION_CACHED_FIND_THRESHOLD   16

Definition at line 1527 of file execPartition.c.

Typedef Documentation

◆ PartitionDispatchData

Function Documentation

◆ adjust_partition_colnos()

static List * adjust_partition_colnos ( List colnos,
ResultRelInfo leaf_part_rri 
)
static

Definition at line 1878 of file execPartition.c.

1879{
1881
1882 Assert(map != NULL);
1883
1885}
#define Assert(condition)
Definition c.h:945
static List * adjust_partition_colnos_using_map(List *colnos, AttrMap *attrMap)
TupleConversionMap * ExecGetChildToRootMap(ResultRelInfo *resultRelInfo)
Definition execUtils.c:1305
static int fb(int x)
AttrMap * attrMap
Definition tupconvert.h:28

References adjust_partition_colnos_using_map(), Assert, TupleConversionMap::attrMap, ExecGetChildToRootMap(), and fb().

Referenced by ExecInitPartitionInfo().

◆ adjust_partition_colnos_using_map()

static List * adjust_partition_colnos_using_map ( List colnos,
AttrMap attrMap 
)
static

Definition at line 1895 of file execPartition.c.

1896{
1897 List *new_colnos = NIL;
1898 ListCell *lc;
1899
1900 Assert(attrMap != NULL); /* else we shouldn't be here */
1901
1902 foreach(lc, colnos)
1903 {
1905
1906 if (parentattrno <= 0 ||
1907 parentattrno > attrMap->maplen ||
1908 attrMap->attnums[parentattrno - 1] == 0)
1909 elog(ERROR, "unexpected attno %d in target column list",
1910 parentattrno);
1912 attrMap->attnums[parentattrno - 1]);
1913 }
1914
1915 return new_colnos;
1916}
int16 AttrNumber
Definition attnum.h:21
#define ERROR
Definition elog.h:39
#define elog(elevel,...)
Definition elog.h:226
List * lappend_int(List *list, int datum)
Definition list.c:357
#define NIL
Definition pg_list.h:68
#define lfirst_int(lc)
Definition pg_list.h:173
int maplen
Definition attmap.h:37
AttrNumber * attnums
Definition attmap.h:36
Definition pg_list.h:54

References Assert, AttrMap::attnums, elog, ERROR, fb(), lappend_int(), lfirst_int, AttrMap::maplen, and NIL.

Referenced by adjust_partition_colnos(), and ExecInitPartitionInfo().

◆ CreatePartitionPruneState()

static PartitionPruneState * CreatePartitionPruneState ( EState estate,
PartitionPruneInfo pruneinfo,
Bitmapset **  all_leafpart_rtis 
)
static

Definition at line 2145 of file execPartition.c.

2147{
2150 ListCell *lc;
2151 int i;
2152
2153 /*
2154 * Expression context that will be used by partkey_datum_from_expr() to
2155 * evaluate expressions for comparison against partition bounds.
2156 */
2157 ExprContext *econtext = CreateExprContext(estate);
2158
2159 /* For data reading, executor always includes detached partitions */
2160 if (estate->es_partition_directory == NULL)
2161 estate->es_partition_directory =
2162 CreatePartitionDirectory(estate->es_query_cxt, false);
2163
2166
2167 /*
2168 * Allocate the data structure
2169 */
2171 palloc(offsetof(PartitionPruneState, partprunedata) +
2173
2174 /* Save ExprContext for use during InitExecPartitionPruneContexts(). */
2175 prunestate->econtext = econtext;
2176 prunestate->execparamids = NULL;
2177 /* other_subplans can change at runtime, so we need our own copy */
2178 prunestate->other_subplans = bms_copy(pruneinfo->other_subplans);
2179 prunestate->do_initial_prune = false; /* may be set below */
2180 prunestate->do_exec_prune = false; /* may be set below */
2181 prunestate->num_partprunedata = n_part_hierarchies;
2182
2183 /*
2184 * Create a short-term memory context which we'll use when making calls to
2185 * the partition pruning functions. This avoids possible memory leaks,
2186 * since the pruning functions call comparison functions that aren't under
2187 * our control.
2188 */
2189 prunestate->prune_context =
2191 "Partition Prune",
2193
2194 i = 0;
2195 foreach(lc, pruneinfo->prune_infos)
2196 {
2200 ListCell *lc2;
2201 int j;
2202
2204 palloc(offsetof(PartitionPruningData, partrelprunedata) +
2206 prunestate->partprunedata[i] = prunedata;
2207 prunedata->num_partrelprunedata = npartrelpruneinfos;
2208
2209 j = 0;
2210 foreach(lc2, partrelpruneinfos)
2211 {
2213 PartitionedRelPruningData *pprune = &prunedata->partrelprunedata[j];
2214 Relation partrel;
2215 PartitionDesc partdesc;
2217
2218 /*
2219 * We can rely on the copies of the partitioned table's partition
2220 * key and partition descriptor appearing in its relcache entry,
2221 * because that entry will be held open and locked for the
2222 * duration of this executor run.
2223 */
2224 partrel = ExecGetRangeTableRelation(estate, pinfo->rtindex, false);
2225
2226 /* Remember for InitExecPartitionPruneContexts(). */
2227 pprune->partrel = partrel;
2228
2231 partrel);
2232
2233 /*
2234 * Initialize the subplan_map and subpart_map.
2235 *
2236 * The set of partitions that exist now might not be the same that
2237 * existed when the plan was made. The normal case is that it is;
2238 * optimize for that case with a quick comparison, and just copy
2239 * the subplan_map and make subpart_map, leafpart_rti_map point to
2240 * the ones in PruneInfo.
2241 *
2242 * For the case where they aren't identical, we could have more
2243 * partitions on either side; or even exactly the same number of
2244 * them on both but the set of OIDs doesn't match fully. Handle
2245 * this by creating new subplan_map and subpart_map arrays that
2246 * corresponds to the ones in the PruneInfo where the new
2247 * partition descriptor's OIDs match. Any that don't match can be
2248 * set to -1, as if they were pruned. By construction, both
2249 * arrays are in partition bounds order.
2250 */
2251 pprune->nparts = partdesc->nparts;
2252 pprune->subplan_map = palloc_array(int, partdesc->nparts);
2253
2254 if (partdesc->nparts == pinfo->nparts &&
2255 memcmp(partdesc->oids, pinfo->relid_map,
2256 sizeof(int) * partdesc->nparts) == 0)
2257 {
2258 pprune->subpart_map = pinfo->subpart_map;
2259 pprune->leafpart_rti_map = pinfo->leafpart_rti_map;
2260 memcpy(pprune->subplan_map, pinfo->subplan_map,
2261 sizeof(int) * pinfo->nparts);
2262 }
2263 else
2264 {
2265 int pd_idx = 0;
2266 int pp_idx;
2267
2268 /*
2269 * When the partition arrays are not identical, there could be
2270 * some new ones but it's also possible that one was removed;
2271 * we cope with both situations by walking the arrays and
2272 * discarding those that don't match.
2273 *
2274 * If the number of partitions on both sides match, it's still
2275 * possible that one partition has been detached and another
2276 * attached. Cope with that by creating a map that skips any
2277 * mismatches.
2278 */
2279 pprune->subpart_map = palloc_array(int, partdesc->nparts);
2280 pprune->leafpart_rti_map = palloc_array(int, partdesc->nparts);
2281
2282 for (pp_idx = 0; pp_idx < partdesc->nparts; pp_idx++)
2283 {
2284 /* Skip any InvalidOid relid_map entries */
2285 while (pd_idx < pinfo->nparts &&
2286 !OidIsValid(pinfo->relid_map[pd_idx]))
2287 pd_idx++;
2288
2289 recheck:
2290 if (pd_idx < pinfo->nparts &&
2291 pinfo->relid_map[pd_idx] == partdesc->oids[pp_idx])
2292 {
2293 /* match... */
2294 pprune->subplan_map[pp_idx] =
2295 pinfo->subplan_map[pd_idx];
2296 pprune->subpart_map[pp_idx] =
2297 pinfo->subpart_map[pd_idx];
2298 pprune->leafpart_rti_map[pp_idx] =
2299 pinfo->leafpart_rti_map[pd_idx];
2300 pd_idx++;
2301 continue;
2302 }
2303
2304 /*
2305 * There isn't an exact match in the corresponding
2306 * positions of both arrays. Peek ahead in
2307 * pinfo->relid_map to see if we have a match for the
2308 * current partition in partdesc. Normally if a match
2309 * exists it's just one element ahead, and it means the
2310 * planner saw one extra partition that we no longer see
2311 * now (its concurrent detach finished just in between);
2312 * so we skip that one by updating pd_idx to the new
2313 * location and jumping above. We can then continue to
2314 * match the rest of the elements after skipping the OID
2315 * with no match; no future matches are tried for the
2316 * element that was skipped, because we know the arrays to
2317 * be in the same order.
2318 *
2319 * If we don't see a match anywhere in the rest of the
2320 * pinfo->relid_map array, that means we see an element
2321 * now that the planner didn't see, so mark that one as
2322 * pruned and move on.
2323 */
2324 for (int pd_idx2 = pd_idx + 1; pd_idx2 < pinfo->nparts; pd_idx2++)
2325 {
2326 if (pd_idx2 >= pinfo->nparts)
2327 break;
2328 if (pinfo->relid_map[pd_idx2] == partdesc->oids[pp_idx])
2329 {
2330 pd_idx = pd_idx2;
2331 goto recheck;
2332 }
2333 }
2334
2335 pprune->subpart_map[pp_idx] = -1;
2336 pprune->subplan_map[pp_idx] = -1;
2337 pprune->leafpart_rti_map[pp_idx] = 0;
2338 }
2339 }
2340
2341 /* present_parts is also subject to later modification */
2343
2344 /*
2345 * Only initial_context is initialized here. exec_context is
2346 * initialized during ExecInitPartitionExecPruning() when the
2347 * parent plan's PlanState is available.
2348 *
2349 * Note that we must skip execution-time (both "init" and "exec")
2350 * partition pruning in EXPLAIN (GENERIC_PLAN), since parameter
2351 * values may be missing.
2352 */
2353 pprune->initial_pruning_steps = pinfo->initial_pruning_steps;
2354 if (pinfo->initial_pruning_steps &&
2356 {
2357 InitPartitionPruneContext(&pprune->initial_context,
2358 pprune->initial_pruning_steps,
2359 partdesc, partkey, NULL,
2360 econtext);
2361 /* Record whether initial pruning is needed at any level */
2362 prunestate->do_initial_prune = true;
2363 }
2364 pprune->exec_pruning_steps = pinfo->exec_pruning_steps;
2365 if (pinfo->exec_pruning_steps &&
2367 {
2368 /* Record whether exec pruning is needed at any level */
2369 prunestate->do_exec_prune = true;
2370 }
2371
2372 /*
2373 * Accumulate the IDs of all PARAM_EXEC Params affecting the
2374 * partitioning decisions at this plan node.
2375 */
2376 prunestate->execparamids = bms_add_members(prunestate->execparamids,
2377 pinfo->execparamids);
2378
2379 /*
2380 * Return all leaf partition indexes if we're skipping pruning in
2381 * the EXPLAIN (GENERIC_PLAN) case.
2382 */
2383 if (pinfo->initial_pruning_steps && !prunestate->do_initial_prune)
2384 {
2385 int part_index = -1;
2386
2387 while ((part_index = bms_next_member(pprune->present_parts,
2388 part_index)) >= 0)
2389 {
2390 Index rtindex = pprune->leafpart_rti_map[part_index];
2391
2392 if (rtindex)
2394 rtindex);
2395 }
2396 }
2397
2398 j++;
2399 }
2400 i++;
2401 }
2402
2403 return prunestate;
2404}
int bms_next_member(const Bitmapset *a, int prevbit)
Definition bitmapset.c:1290
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition bitmapset.c:799
Bitmapset * bms_add_members(Bitmapset *a, const Bitmapset *b)
Definition bitmapset.c:901
Bitmapset * bms_copy(const Bitmapset *a)
Definition bitmapset.c:122
unsigned int Index
Definition c.h:700
#define OidIsValid(objectId)
Definition c.h:860
static void InitPartitionPruneContext(PartitionPruneContext *context, List *pruning_steps, PartitionDesc partdesc, PartitionKey partkey, PlanState *planstate, ExprContext *econtext)
Relation ExecGetRangeTableRelation(EState *estate, Index rti, bool isResultRel)
Definition execUtils.c:830
ExprContext * CreateExprContext(EState *estate)
Definition execUtils.c:312
#define EXEC_FLAG_EXPLAIN_GENERIC
Definition executor.h:68
#define palloc_array(type, count)
Definition fe_memutils.h:76
int j
Definition isn.c:78
int i
Definition isn.c:77
void * palloc(Size size)
Definition mcxt.c:1387
MemoryContext CurrentMemoryContext
Definition mcxt.c:160
#define AllocSetContextCreate
Definition memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition memutils.h:160
PartitionKey RelationGetPartitionKey(Relation rel)
Definition partcache.c:51
PartitionDirectory CreatePartitionDirectory(MemoryContext mcxt, bool omit_detached)
Definition partdesc.c:423
PartitionDesc PartitionDirectoryLookup(PartitionDirectory pdir, Relation rel)
Definition partdesc.c:456
#define lfirst_node(type, lc)
Definition pg_list.h:176
static int list_length(const List *l)
Definition pg_list.h:152
int es_top_eflags
Definition execnodes.h:731
MemoryContext es_query_cxt
Definition execnodes.h:722
PartitionDirectory es_partition_directory
Definition execnodes.h:704
struct EState * ecxt_estate
Definition execnodes.h:326
Bitmapset * present_parts
Definition plannodes.h:1700

References ALLOCSET_DEFAULT_SIZES, AllocSetContextCreate, Assert, bms_add_member(), bms_add_members(), bms_copy(), bms_next_member(), CreateExprContext(), CreatePartitionDirectory(), CurrentMemoryContext, ExprContext::ecxt_estate, EState::es_partition_directory, EState::es_query_cxt, EState::es_top_eflags, EXEC_FLAG_EXPLAIN_GENERIC, PartitionedRelPruneInfo::exec_pruning_steps, ExecGetRangeTableRelation(), PartitionedRelPruneInfo::execparamids, fb(), i, PartitionedRelPruneInfo::initial_pruning_steps, InitPartitionPruneContext(), j, lfirst_node, list_length(), PartitionedRelPruneInfo::nparts, PartitionDescData::nparts, OidIsValid, PartitionDescData::oids, palloc(), palloc_array, PartitionDirectoryLookup(), PartitionedRelPruneInfo::present_parts, RelationGetPartitionKey(), and PartitionedRelPruneInfo::rtindex.

Referenced by ExecDoInitialPruning().

◆ ExecBuildSlotPartitionKeyDescription()

static char * ExecBuildSlotPartitionKeyDescription ( Relation  rel,
const Datum values,
const bool isnull,
int  maxfieldlen 
)
static

Definition at line 1790 of file execPartition.c.

1794{
1797 int partnatts = get_partition_natts(key);
1798 int i;
1799 Oid relid = RelationGetRelid(rel);
1801
1802 if (check_enable_rls(relid, InvalidOid, true) == RLS_ENABLED)
1803 return NULL;
1804
1805 /* If the user has table-level access, just go build the description. */
1807 if (aclresult != ACLCHECK_OK)
1808 {
1809 /*
1810 * Step through the columns of the partition key and make sure the
1811 * user has SELECT rights on all of them.
1812 */
1813 for (i = 0; i < partnatts; i++)
1814 {
1816
1817 /*
1818 * If this partition key column is an expression, we return no
1819 * detail rather than try to figure out what column(s) the
1820 * expression includes and if the user has SELECT rights on them.
1821 */
1822 if (attnum == InvalidAttrNumber ||
1825 return NULL;
1826 }
1827 }
1828
1830 appendStringInfo(&buf, "(%s) = (",
1831 pg_get_partkeydef_columns(relid, true));
1832
1833 for (i = 0; i < partnatts; i++)
1834 {
1835 char *val;
1836 int vallen;
1837
1838 if (isnull[i])
1839 val = "null";
1840 else
1841 {
1842 Oid foutoid;
1843 bool typisvarlena;
1844
1846 &foutoid, &typisvarlena);
1848 }
1849
1850 if (i > 0)
1852
1853 /* truncate if needed */
1854 vallen = strlen(val);
1855 if (vallen <= maxfieldlen)
1856 appendBinaryStringInfo(&buf, val, vallen);
1857 else
1858 {
1859 vallen = pg_mbcliplen(val, vallen, maxfieldlen);
1860 appendBinaryStringInfo(&buf, val, vallen);
1861 appendStringInfoString(&buf, "...");
1862 }
1863 }
1864
1866
1867 return buf.data;
1868}
AclResult
Definition acl.h:183
@ ACLCHECK_OK
Definition acl.h:184
AclResult pg_attribute_aclcheck(Oid table_oid, AttrNumber attnum, Oid roleid, AclMode mode)
Definition aclchk.c:3911
AclResult pg_class_aclcheck(Oid table_oid, Oid roleid, AclMode mode)
Definition aclchk.c:4082
#define InvalidAttrNumber
Definition attnum.h:23
static Datum values[MAXATTR]
Definition bootstrap.c:188
char * OidOutputFunctionCall(Oid functionId, Datum val)
Definition fmgr.c:1764
long val
Definition informix.c:689
void getTypeOutputInfo(Oid type, Oid *typOutput, bool *typIsVarlena)
Definition lsyscache.c:3129
int pg_mbcliplen(const char *mbstr, int len, int limit)
Definition mbutils.c:1211
Oid GetUserId(void)
Definition miscinit.c:470
#define ACL_SELECT
Definition parsenodes.h:77
static int16 get_partition_col_attnum(PartitionKey key, int col)
Definition partcache.h:80
static int get_partition_natts(PartitionKey key)
Definition partcache.h:65
static Oid get_partition_col_typid(PartitionKey key, int col)
Definition partcache.h:86
int16 attnum
static char buf[DEFAULT_XLOG_SEG_SIZE]
#define InvalidOid
unsigned int Oid
#define RelationGetRelid(relation)
Definition rel.h:514
int check_enable_rls(Oid relid, Oid checkAsUser, bool noError)
Definition rls.c:52
@ RLS_ENABLED
Definition rls.h:45
char * pg_get_partkeydef_columns(Oid relid, bool pretty)
Definition ruleutils.c:2280
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition stringinfo.c:145
void appendBinaryStringInfo(StringInfo str, const void *data, int datalen)
Definition stringinfo.c:281
void appendStringInfoString(StringInfo str, const char *s)
Definition stringinfo.c:230
void appendStringInfoChar(StringInfo str, char ch)
Definition stringinfo.c:242
void initStringInfo(StringInfo str)
Definition stringinfo.c:97

References ACL_SELECT, ACLCHECK_OK, appendBinaryStringInfo(), appendStringInfo(), appendStringInfoChar(), appendStringInfoString(), attnum, buf, check_enable_rls(), fb(), get_partition_col_attnum(), get_partition_col_typid(), get_partition_natts(), getTypeOutputInfo(), GetUserId(), i, initStringInfo(), InvalidAttrNumber, InvalidOid, OidOutputFunctionCall(), pg_attribute_aclcheck(), pg_class_aclcheck(), pg_get_partkeydef_columns(), pg_mbcliplen(), RelationGetPartitionKey(), RelationGetRelid, RLS_ENABLED, val, and values.

Referenced by ExecFindPartition().

◆ ExecCleanupTupleRouting()

void ExecCleanupTupleRouting ( ModifyTableState mtstate,
PartitionTupleRouting proute 
)

Definition at line 1412 of file execPartition.c.

1414{
1415 int i;
1416
1417 /*
1418 * Remember, proute->partition_dispatch_info[0] corresponds to the root
1419 * partitioned table, which we must not try to close, because it is the
1420 * main target table of the query that will be closed by callers such as
1421 * ExecEndPlan() or DoCopy(). Also, tupslot is NULL for the root
1422 * partitioned table.
1423 */
1424 for (i = 1; i < proute->num_dispatch; i++)
1425 {
1427
1429
1430 if (pd->tupslot)
1432 }
1433
1434 for (i = 0; i < proute->num_partitions; i++)
1435 {
1436 ResultRelInfo *resultRelInfo = proute->partitions[i];
1437
1438 /* Allow any FDWs to shut down */
1439 if (resultRelInfo->ri_FdwRoutine != NULL &&
1440 resultRelInfo->ri_FdwRoutine->EndForeignInsert != NULL)
1441 resultRelInfo->ri_FdwRoutine->EndForeignInsert(mtstate->ps.state,
1442 resultRelInfo);
1443
1444 /*
1445 * Close it if it's not one of the result relations borrowed from the
1446 * owning ModifyTableState; those will be closed by ExecEndPlan().
1447 */
1448 if (proute->is_borrowed_rel[i])
1449 continue;
1450
1451 ExecCloseIndices(resultRelInfo);
1452 table_close(resultRelInfo->ri_RelationDesc, NoLock);
1453 }
1454}
void ExecCloseIndices(ResultRelInfo *resultRelInfo)
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
#define NoLock
Definition lockdefs.h:34
EndForeignInsert_function EndForeignInsert
Definition fdwapi.h:239
TupleTableSlot * tupslot
PartitionDispatch * partition_dispatch_info
ResultRelInfo ** partitions
EState * state
Definition execnodes.h:1179
Relation ri_RelationDesc
Definition execnodes.h:492
struct FdwRoutine * ri_FdwRoutine
Definition execnodes.h:545
void table_close(Relation relation, LOCKMODE lockmode)
Definition table.c:126

References FdwRoutine::EndForeignInsert, ExecCloseIndices(), ExecDropSingleTupleTableSlot(), fb(), i, PartitionTupleRouting::is_borrowed_rel, NoLock, PartitionTupleRouting::num_dispatch, PartitionTupleRouting::num_partitions, PartitionTupleRouting::partition_dispatch_info, PartitionTupleRouting::partitions, ModifyTableState::ps, PartitionDispatchData::reldesc, ResultRelInfo::ri_FdwRoutine, ResultRelInfo::ri_RelationDesc, PlanState::state, table_close(), and PartitionDispatchData::tupslot.

Referenced by CopyFrom(), ExecEndModifyTable(), and finish_edata().

◆ ExecDoInitialPruning()

void ExecDoInitialPruning ( EState estate)

Definition at line 1995 of file execPartition.c.

1996{
1997 ListCell *lc;
1998
1999 foreach(lc, estate->es_part_prune_infos)
2000 {
2006
2007 /* Create and save the PartitionPruneState. */
2011 prunestate);
2012
2013 /*
2014 * Perform initial pruning steps, if any, and save the result
2015 * bitmapset or NULL as described in the header comment.
2016 */
2017 if (prunestate->do_initial_prune)
2020 else
2022
2027 }
2028}
Bitmapset * ExecFindMatchingSubPlans(PartitionPruneState *prunestate, bool initial_prune, Bitmapset **validsubplan_rtis)
static PartitionPruneState * CreatePartitionPruneState(EState *estate, PartitionPruneInfo *pruneinfo, Bitmapset **all_leafpart_rtis)
List * lappend(List *list, void *datum)
Definition list.c:339
List * es_part_prune_infos
Definition execnodes.h:682
Bitmapset * es_unpruned_relids
Definition execnodes.h:685
List * es_part_prune_states
Definition execnodes.h:683
List * es_part_prune_results
Definition execnodes.h:684

References bms_add_members(), CreatePartitionPruneState(), EState::es_part_prune_infos, EState::es_part_prune_results, EState::es_part_prune_states, EState::es_unpruned_relids, ExecFindMatchingSubPlans(), fb(), lappend(), and lfirst_node.

Referenced by InitPlan().

◆ ExecFindMatchingSubPlans()

Bitmapset * ExecFindMatchingSubPlans ( PartitionPruneState prunestate,
bool  initial_prune,
Bitmapset **  validsubplan_rtis 
)

Definition at line 2667 of file execPartition.c.

2670{
2671 Bitmapset *result = NULL;
2672 MemoryContext oldcontext;
2673 int i;
2674
2675 /*
2676 * Either we're here on the initial prune done during pruning
2677 * initialization, or we're at a point where PARAM_EXEC Params can be
2678 * evaluated *and* there are steps in which to do so.
2679 */
2680 Assert(initial_prune || prunestate->do_exec_prune);
2682
2683 /*
2684 * Switch to a temp context to avoid leaking memory in the executor's
2685 * query-lifespan memory context.
2686 */
2687 oldcontext = MemoryContextSwitchTo(prunestate->prune_context);
2688
2689 /*
2690 * For each hierarchy, do the pruning tests, and add nondeletable
2691 * subplans' indexes to "result".
2692 */
2693 for (i = 0; i < prunestate->num_partprunedata; i++)
2694 {
2695 PartitionPruningData *prunedata = prunestate->partprunedata[i];
2697
2698 /*
2699 * We pass the zeroth item, belonging to the root table of the
2700 * hierarchy, and find_matching_subplans_recurse() takes care of
2701 * recursing to other (lower-level) parents as needed.
2702 */
2703 pprune = &prunedata->partrelprunedata[0];
2705 &result, validsubplan_rtis);
2706
2707 /*
2708 * Expression eval may have used space in ExprContext too. Avoid
2709 * accessing exec_context during initial pruning, as it is not valid
2710 * at that stage.
2711 */
2712 if (!initial_prune && pprune->exec_pruning_steps)
2713 ResetExprContext(pprune->exec_context.exprcontext);
2714 }
2715
2716 /* Add in any subplans that partition pruning didn't account for */
2717 result = bms_add_members(result, prunestate->other_subplans);
2718
2719 MemoryContextSwitchTo(oldcontext);
2720
2721 /* Copy result out of the temp context before we reset it */
2722 result = bms_copy(result);
2725
2726 MemoryContextReset(prunestate->prune_context);
2727
2728 return result;
2729}
static void find_matching_subplans_recurse(PartitionPruningData *prunedata, PartitionedRelPruningData *pprune, bool initial_prune, Bitmapset **validsubplans, Bitmapset **validsubplan_rtis)
#define ResetExprContext(econtext)
Definition executor.h:654
void MemoryContextReset(MemoryContext context)
Definition mcxt.c:403
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition palloc.h:124

References Assert, bms_add_members(), bms_copy(), fb(), find_matching_subplans_recurse(), i, MemoryContextReset(), MemoryContextSwitchTo(), and ResetExprContext.

Referenced by choose_next_subplan_for_leader(), choose_next_subplan_for_worker(), choose_next_subplan_locally(), ExecAppendAsyncBegin(), ExecDoInitialPruning(), and ExecMergeAppend().

◆ ExecFindPartition()

ResultRelInfo * ExecFindPartition ( ModifyTableState mtstate,
ResultRelInfo rootResultRelInfo,
PartitionTupleRouting proute,
TupleTableSlot slot,
EState estate 
)

Definition at line 268 of file execPartition.c.

272{
275 bool isnull[PARTITION_MAX_KEYS];
276 Relation rel;
278 PartitionDesc partdesc;
280 TupleTableSlot *ecxt_scantuple_saved = ecxt->ecxt_scantuple;
281 TupleTableSlot *rootslot = slot;
285
286 /* use per-tuple context here to avoid leaking memory */
288
289 /*
290 * First check the root table's partition constraint, if any. No point in
291 * routing the tuple if it doesn't belong in the root table itself.
292 */
293 if (rootResultRelInfo->ri_RelationDesc->rd_rel->relispartition)
294 ExecPartitionCheck(rootResultRelInfo, slot, estate, true);
295
296 /* start with the root partitioned table */
297 dispatch = pd[0];
298 while (dispatch != NULL)
299 {
300 int partidx = -1;
301 bool is_leaf;
302
304
305 rel = dispatch->reldesc;
306 partdesc = dispatch->partdesc;
307
308 /*
309 * Extract partition key from tuple. Expression evaluation machinery
310 * that FormPartitionKeyDatum() invokes expects ecxt_scantuple to
311 * point to the correct tuple slot. The slot might have changed from
312 * what was used for the parent table if the table of the current
313 * partitioning level has different tuple descriptor from the parent.
314 * So update ecxt_scantuple accordingly.
315 */
316 ecxt->ecxt_scantuple = slot;
317 FormPartitionKeyDatum(dispatch, slot, estate, values, isnull);
318
319 /*
320 * If this partitioned table has no partitions or no partition for
321 * these values, error out.
322 */
323 if (partdesc->nparts == 0 ||
325 {
326 char *val_desc;
327
329 values, isnull, 64);
333 errmsg("no partition of relation \"%s\" found for row",
335 val_desc ?
336 errdetail("Partition key of the failing row contains %s.",
337 val_desc) : 0,
338 errtable(rel)));
339 }
340
341 is_leaf = partdesc->is_leaf[partidx];
342 if (is_leaf)
343 {
344 /*
345 * We've reached the leaf -- hurray, we're done. Look to see if
346 * we've already got a ResultRelInfo for this partition.
347 */
348 if (likely(dispatch->indexes[partidx] >= 0))
349 {
350 /* ResultRelInfo already built */
351 Assert(dispatch->indexes[partidx] < proute->num_partitions);
352 rri = proute->partitions[dispatch->indexes[partidx]];
353 }
354 else
355 {
356 /*
357 * If the partition is known in the owning ModifyTableState
358 * node, we can re-use that ResultRelInfo instead of creating
359 * a new one with ExecInitPartitionInfo().
360 */
362 partdesc->oids[partidx],
363 true, false);
364 if (rri)
365 {
366 ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
367
368 /* Verify this ResultRelInfo allows INSERTs */
370 node ? node->onConflictAction : ONCONFLICT_NONE,
371 NIL);
372
373 /*
374 * Initialize information needed to insert this and
375 * subsequent tuples routed to this partition.
376 */
377 ExecInitRoutingInfo(mtstate, estate, proute, dispatch,
378 rri, partidx, true);
379 }
380 else
381 {
382 /* We need to create a new one. */
383 rri = ExecInitPartitionInfo(mtstate, estate, proute,
384 dispatch,
385 rootResultRelInfo, partidx);
386 }
387 }
388 Assert(rri != NULL);
389
390 /* Signal to terminate the loop */
391 dispatch = NULL;
392 }
393 else
394 {
395 /*
396 * Partition is a sub-partitioned table; get the PartitionDispatch
397 */
398 if (likely(dispatch->indexes[partidx] >= 0))
399 {
400 /* Already built. */
401 Assert(dispatch->indexes[partidx] < proute->num_dispatch);
402
403 rri = proute->nonleaf_partitions[dispatch->indexes[partidx]];
404
405 /*
406 * Move down to the next partition level and search again
407 * until we find a leaf partition that matches this tuple
408 */
409 dispatch = pd[dispatch->indexes[partidx]];
410 }
411 else
412 {
413 /* Not yet built. Do that now. */
415
416 /*
417 * Create the new PartitionDispatch. We pass the current one
418 * in as the parent PartitionDispatch
419 */
421 proute,
422 partdesc->oids[partidx],
424 mtstate->rootResultRelInfo);
425 Assert(dispatch->indexes[partidx] >= 0 &&
426 dispatch->indexes[partidx] < proute->num_dispatch);
427
428 rri = proute->nonleaf_partitions[dispatch->indexes[partidx]];
430 }
431
432 /*
433 * Convert the tuple to the new parent's layout, if different from
434 * the previous parent.
435 */
436 if (dispatch->tupslot)
437 {
438 AttrMap *map = dispatch->tupmap;
440
441 myslot = dispatch->tupslot;
442 slot = execute_attr_map_slot(map, slot, myslot);
443
444 if (tempslot != NULL)
446 }
447 }
448
449 /*
450 * If this partition is the default one, we must check its partition
451 * constraint now, which may have changed concurrently due to
452 * partitions being added to the parent.
453 *
454 * (We do this here, and do not rely on ExecInsert doing it, because
455 * we don't want to miss doing it for non-leaf partitions.)
456 */
457 if (partidx == partdesc->boundinfo->default_index)
458 {
459 /*
460 * The tuple must match the partition's layout for the constraint
461 * expression to be evaluated successfully. If the partition is
462 * sub-partitioned, that would already be the case due to the code
463 * above, but for a leaf partition the tuple still matches the
464 * parent's layout.
465 *
466 * Note that we have a map to convert from root to current
467 * partition, but not from immediate parent to current partition.
468 * So if we have to convert, do it from the root slot; if not, use
469 * the root slot as-is.
470 */
471 if (is_leaf)
472 {
474
475 if (map)
477 rri->ri_PartitionTupleSlot);
478 else
479 slot = rootslot;
480 }
481
482 ExecPartitionCheck(rri, slot, estate, true);
483 }
484 }
485
486 /* Release the tuple in the lowest parent's dedicated slot. */
487 if (myslot != NULL)
489 /* and restore ecxt's scantuple */
490 ecxt->ecxt_scantuple = ecxt_scantuple_saved;
492
493 return rri;
494}
#define likely(x)
Definition c.h:431
int errcode(int sqlerrcode)
Definition elog.c:874
int errdetail(const char *fmt,...) pg_attribute_printf(1
#define ereport(elevel,...)
Definition elog.h:150
void CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation, OnConflictAction onConflictAction, List *mergeActions)
Definition execMain.c:1056
bool ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate, bool emitError)
Definition execMain.c:1875
static PartitionDispatch ExecInitPartitionDispatchInfo(EState *estate, PartitionTupleRouting *proute, Oid partoid, PartitionDispatch parent_pd, int partidx, ResultRelInfo *rootResultRelInfo)
static ResultRelInfo * ExecInitPartitionInfo(ModifyTableState *mtstate, EState *estate, PartitionTupleRouting *proute, PartitionDispatch dispatch, ResultRelInfo *rootResultRelInfo, int partidx)
static void ExecInitRoutingInfo(ModifyTableState *mtstate, EState *estate, PartitionTupleRouting *proute, PartitionDispatch dispatch, ResultRelInfo *partRelInfo, int partidx, bool is_borrowed_rel)
static char * ExecBuildSlotPartitionKeyDescription(Relation rel, const Datum *values, const bool *isnull, int maxfieldlen)
static void FormPartitionKeyDatum(PartitionDispatch pd, TupleTableSlot *slot, EState *estate, Datum *values, bool *isnull)
static int get_partition_for_tuple(PartitionDispatch pd, const Datum *values, const bool *isnull)
TupleConversionMap * ExecGetRootToChildMap(ResultRelInfo *resultRelInfo, EState *estate)
Definition execUtils.c:1331
#define GetPerTupleExprContext(estate)
Definition executor.h:660
#define GetPerTupleMemoryContext(estate)
Definition executor.h:665
#define CHECK_FOR_INTERRUPTS()
Definition miscadmin.h:123
ResultRelInfo * ExecLookupResultRelByOid(ModifyTableState *node, Oid resultoid, bool missing_ok, bool update_cache)
@ ONCONFLICT_NONE
Definition nodes.h:428
@ CMD_INSERT
Definition nodes.h:277
static char * errmsg
#define PARTITION_MAX_KEYS
uint64_t Datum
Definition postgres.h:70
#define RelationGetRelationName(relation)
Definition rel.h:548
int errtable(Relation rel)
Definition relcache.c:6062
ResultRelInfo * rootResultRelInfo
Definition execnodes.h:1428
PartitionBoundInfo boundinfo
Definition partdesc.h:38
ResultRelInfo ** nonleaf_partitions
Plan * plan
Definition execnodes.h:1177
Form_pg_class rd_rel
Definition rel.h:111
TupleTableSlot * execute_attr_map_slot(AttrMap *attrMap, TupleTableSlot *in_slot, TupleTableSlot *out_slot)
Definition tupconvert.c:193
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition tuptable.h:476

References Assert, TupleConversionMap::attrMap, PartitionDescData::boundinfo, CHECK_FOR_INTERRUPTS, CheckValidResultRel(), CMD_INSERT, PartitionBoundInfoData::default_index, ereport, errcode(), errdetail(), errmsg, ERROR, errtable(), ExecBuildSlotPartitionKeyDescription(), ExecClearTuple(), ExecGetRootToChildMap(), ExecInitPartitionDispatchInfo(), ExecInitPartitionInfo(), ExecInitRoutingInfo(), ExecLookupResultRelByOid(), ExecPartitionCheck(), execute_attr_map_slot(), fb(), FormPartitionKeyDatum(), get_partition_for_tuple(), GetPerTupleExprContext, GetPerTupleMemoryContext, PartitionDescData::is_leaf, likely, MemoryContextSwitchTo(), NIL, PartitionTupleRouting::nonleaf_partitions, PartitionDescData::nparts, PartitionTupleRouting::num_dispatch, PartitionTupleRouting::num_partitions, OidIsValid, PartitionDescData::oids, ONCONFLICT_NONE, ModifyTable::onConflictAction, PartitionTupleRouting::partition_dispatch_info, PARTITION_MAX_KEYS, PartitionTupleRouting::partitions, PlanState::plan, ModifyTableState::ps, RelationData::rd_rel, RelationGetRelationName, RelationGetRelid, ResultRelInfo::ri_RelationDesc, ModifyTableState::rootResultRelInfo, and values.

Referenced by apply_handle_tuple_routing(), CopyFrom(), and ExecPrepareTupleRouting().

◆ ExecInitPartitionDispatchInfo()

static PartitionDispatch ExecInitPartitionDispatchInfo ( EState estate,
PartitionTupleRouting proute,
Oid  partoid,
PartitionDispatch  parent_pd,
int  partidx,
ResultRelInfo rootResultRelInfo 
)
static

Definition at line 1275 of file execPartition.c.

1279{
1280 Relation rel;
1281 PartitionDesc partdesc;
1283 int dispatchidx;
1285
1286 /*
1287 * For data modification, it is better that executor does not include
1288 * partitions being detached, except when running in snapshot-isolation
1289 * mode. This means that a read-committed transaction immediately gets a
1290 * "no partition for tuple" error when a tuple is inserted into a
1291 * partition that's being detached concurrently, but a transaction in
1292 * repeatable-read mode can still use such a partition.
1293 */
1294 if (estate->es_partition_directory == NULL)
1295 estate->es_partition_directory =
1298
1300
1301 /*
1302 * Only sub-partitioned tables need to be locked here. The root
1303 * partitioned table will already have been locked as it's referenced in
1304 * the query's rtable.
1305 */
1306 if (partoid != RelationGetRelid(proute->partition_root))
1307 rel = table_open(partoid, RowExclusiveLock);
1308 else
1309 rel = proute->partition_root;
1310 partdesc = PartitionDirectoryLookup(estate->es_partition_directory, rel);
1311
1313 partdesc->nparts * sizeof(int));
1314 pd->reldesc = rel;
1315 pd->key = RelationGetPartitionKey(rel);
1316 pd->keystate = NIL;
1317 pd->partdesc = partdesc;
1318 if (parent_pd != NULL)
1319 {
1320 TupleDesc tupdesc = RelationGetDescr(rel);
1321
1322 /*
1323 * For sub-partitioned tables where the column order differs from its
1324 * direct parent partitioned table, we must store a tuple table slot
1325 * initialized with its tuple descriptor and a tuple conversion map to
1326 * convert a tuple from its parent's rowtype to its own. This is to
1327 * make sure that we are looking at the correct row using the correct
1328 * tuple descriptor when computing its partition key for tuple
1329 * routing.
1330 */
1332 tupdesc,
1333 false);
1334 pd->tupslot = pd->tupmap ?
1336 }
1337 else
1338 {
1339 /* Not required for the root partitioned table */
1340 pd->tupmap = NULL;
1341 pd->tupslot = NULL;
1342 }
1343
1344 /*
1345 * Initialize with -1 to signify that the corresponding partition's
1346 * ResultRelInfo or PartitionDispatch has not been created yet.
1347 */
1348 memset(pd->indexes, -1, sizeof(int) * partdesc->nparts);
1349
1350 /* Track in PartitionTupleRouting for later use */
1351 dispatchidx = proute->num_dispatch++;
1352
1353 /* Allocate or enlarge the array, as needed */
1354 if (proute->num_dispatch >= proute->max_dispatch)
1355 {
1356 if (proute->max_dispatch == 0)
1357 {
1358 proute->max_dispatch = 4;
1361 }
1362 else
1363 {
1364 proute->max_dispatch *= 2;
1367 sizeof(PartitionDispatch) * proute->max_dispatch);
1368 proute->nonleaf_partitions = (ResultRelInfo **)
1370 sizeof(ResultRelInfo *) * proute->max_dispatch);
1371 }
1372 }
1374
1375 /*
1376 * If setting up a PartitionDispatch for a sub-partitioned table, we may
1377 * also need a minimally valid ResultRelInfo for checking the partition
1378 * constraint later; set that up now.
1379 */
1380 if (parent_pd)
1381 {
1383
1384 InitResultRelInfo(rri, rel, 0, rootResultRelInfo, 0);
1386 }
1387 else
1389
1390 /*
1391 * Finally, if setting up a PartitionDispatch for a sub-partitioned table,
1392 * install a downlink in the parent to allow quick descent.
1393 */
1394 if (parent_pd)
1395 {
1396 Assert(parent_pd->indexes[partidx] == -1);
1397 parent_pd->indexes[partidx] = dispatchidx;
1398 }
1399
1401
1402 return pd;
1403}
AttrMap * build_attrmap_by_name_if_req(TupleDesc indesc, TupleDesc outdesc, bool missing_ok)
Definition attmap.c:261
void InitResultRelInfo(ResultRelInfo *resultRelInfo, Relation resultRelationDesc, Index resultRelationIndex, ResultRelInfo *partition_root_rri, int instrument_options)
Definition execMain.c:1262
struct PartitionDispatchData * PartitionDispatch
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
const TupleTableSlotOps TTSOpsVirtual
Definition execTuples.c:84
#define RowExclusiveLock
Definition lockdefs.h:38
void * repalloc(void *pointer, Size size)
Definition mcxt.c:1632
#define makeNode(_type_)
Definition nodes.h:161
#define RelationGetDescr(relation)
Definition rel.h:540
int indexes[FLEXIBLE_ARRAY_MEMBER]
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition table.c:40
#define IsolationUsesXactSnapshot()
Definition xact.h:52

References Assert, build_attrmap_by_name_if_req(), CreatePartitionDirectory(), EState::es_partition_directory, EState::es_query_cxt, fb(), PartitionDispatchData::indexes, InitResultRelInfo(), IsolationUsesXactSnapshot, PartitionDispatchData::key, PartitionDispatchData::keystate, makeNode, MakeSingleTupleTableSlot(), PartitionTupleRouting::max_dispatch, PartitionTupleRouting::memcxt, MemoryContextSwitchTo(), NIL, PartitionTupleRouting::nonleaf_partitions, PartitionDescData::nparts, PartitionTupleRouting::num_dispatch, palloc(), palloc_array, PartitionDispatchData::partdesc, PartitionTupleRouting::partition_dispatch_info, PartitionTupleRouting::partition_root, PartitionDirectoryLookup(), RelationGetDescr, RelationGetPartitionKey(), RelationGetRelid, PartitionDispatchData::reldesc, repalloc(), RowExclusiveLock, table_open(), TTSOpsVirtual, PartitionDispatchData::tupmap, and PartitionDispatchData::tupslot.

Referenced by ExecFindPartition(), and ExecSetupPartitionTupleRouting().

◆ ExecInitPartitionExecPruning()

PartitionPruneState * ExecInitPartitionExecPruning ( PlanState planstate,
int  n_total_subplans,
int  part_prune_index,
Bitmapset relids,
Bitmapset **  initially_valid_subplans 
)

Definition at line 2051 of file execPartition.c.

2056{
2058 EState *estate = planstate->state;
2060
2061 /* Obtain the pruneinfo we need. */
2063 part_prune_index);
2064
2065 /* Its relids better match the plan node's or the planner messed up. */
2066 if (!bms_equal(relids, pruneinfo->relids))
2067 elog(ERROR, "wrong pruneinfo with relids=%s found at part_prune_index=%d contained in plan node with relids=%s",
2068 bmsToString(pruneinfo->relids), part_prune_index,
2069 bmsToString(relids));
2070
2071 /*
2072 * The PartitionPruneState would have been created by
2073 * ExecDoInitialPruning() and stored as the part_prune_index'th element of
2074 * EState.es_part_prune_states.
2075 */
2076 prunestate = list_nth(estate->es_part_prune_states, part_prune_index);
2078
2079 /* Use the result of initial pruning done by ExecDoInitialPruning(). */
2080 if (prunestate->do_initial_prune)
2082 estate->es_part_prune_results,
2083 part_prune_index);
2084 else
2085 {
2086 /* No pruning, so we'll need to initialize all subplans */
2089 n_total_subplans - 1);
2090 }
2091
2092 /*
2093 * The exec pruning state must also be initialized, if needed, before it
2094 * can be used for pruning during execution.
2095 *
2096 * This also re-sequences subplan indexes contained in prunestate to
2097 * account for any that were removed due to initial pruning; refer to the
2098 * condition in InitExecPartitionPruneContexts() that is used to determine
2099 * whether to do this. If no exec pruning needs to be done, we would thus
2100 * leave the maps to be in an invalid state, but that's ok since that data
2101 * won't be consulted again (cf initial Assert in
2102 * ExecFindMatchingSubPlans).
2103 */
2104 if (prunestate->do_exec_prune)
2108
2109 return prunestate;
2110}
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition bitmapset.c:142
Bitmapset * bms_add_range(Bitmapset *a, int lower, int upper)
Definition bitmapset.c:1003
static void InitExecPartitionPruneContexts(PartitionPruneState *prunestate, PlanState *parent_plan, Bitmapset *initially_valid_subplans, int n_total_subplans)
char * bmsToString(const Bitmapset *bms)
Definition outfuncs.c:828
static void * list_nth(const List *list, int n)
Definition pg_list.h:299
#define list_nth_node(type, list, n)
Definition pg_list.h:327

References Assert, bms_add_range(), bms_equal(), bmsToString(), elog, ERROR, EState::es_part_prune_infos, EState::es_part_prune_results, EState::es_part_prune_states, fb(), InitExecPartitionPruneContexts(), list_nth(), list_nth_node, and PlanState::state.

Referenced by ExecInitAppend(), and ExecInitMergeAppend().

◆ ExecInitPartitionInfo()

static ResultRelInfo * ExecInitPartitionInfo ( ModifyTableState mtstate,
EState estate,
PartitionTupleRouting proute,
PartitionDispatch  dispatch,
ResultRelInfo rootResultRelInfo,
int  partidx 
)
static

Definition at line 564 of file execPartition.c.

569{
570 ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
571 Oid partOid = dispatch->partdesc->oids[partidx];
572 Relation partrel;
578 bool found_whole_row;
579
581
583
586 partrel,
587 0,
588 rootResultRelInfo,
589 estate->es_instrument);
590
591 /*
592 * Verify result relation is a valid target for an INSERT. An UPDATE of a
593 * partition-key becomes a DELETE+INSERT operation, so this check is still
594 * required when the operation is CMD_UPDATE.
595 */
597 node ? node->onConflictAction : ONCONFLICT_NONE, NIL);
598
599 /*
600 * Open partition indices. The user may have asked to check for conflicts
601 * within this leaf partition and do "nothing" instead of throwing an
602 * error. Be prepared in that case by initializing the index information
603 * needed by ExecInsert() to perform speculative insertions.
604 */
605 if (partrel->rd_rel->relhasindex &&
606 leaf_part_rri->ri_IndexRelationDescs == NULL)
608 (node != NULL &&
609 node->onConflictAction != ONCONFLICT_NONE));
610
611 /*
612 * Build WITH CHECK OPTION constraints for the partition. Note that we
613 * didn't build the withCheckOptionList for partitions within the planner,
614 * but simple translation of varattnos will suffice. This only occurs for
615 * the INSERT case or in the case of UPDATE/MERGE tuple routing where we
616 * didn't find a result rel to reuse.
617 */
618 if (node && node->withCheckOptionLists != NIL)
619 {
620 List *wcoList;
621 List *wcoExprs = NIL;
622 ListCell *ll;
623
624 /*
625 * In the case of INSERT on a partitioned table, there is only one
626 * plan. Likewise, there is only one WCO list, not one per partition.
627 * For UPDATE/MERGE, there are as many WCO lists as there are plans.
628 */
629 Assert((node->operation == CMD_INSERT &&
630 list_length(node->withCheckOptionLists) == 1 &&
631 list_length(node->resultRelations) == 1) ||
632 (node->operation == CMD_UPDATE &&
633 list_length(node->withCheckOptionLists) ==
634 list_length(node->resultRelations)) ||
635 (node->operation == CMD_MERGE &&
636 list_length(node->withCheckOptionLists) ==
637 list_length(node->resultRelations)));
638
639 /*
640 * Use the WCO list of the first plan as a reference to calculate
641 * attno's for the WCO list of this partition. In the INSERT case,
642 * that refers to the root partitioned table, whereas in the UPDATE
643 * tuple routing case, that refers to the first partition in the
644 * mtstate->resultRelInfo array. In any case, both that relation and
645 * this partition should have the same columns, so we should be able
646 * to map attributes successfully.
647 */
648 wcoList = linitial(node->withCheckOptionLists);
649
650 /*
651 * Convert Vars in it to contain this partition's attribute numbers.
652 */
656 false);
657 wcoList = (List *)
659 firstVarno, 0,
661 RelationGetForm(partrel)->reltype,
662 &found_whole_row);
663 /* We ignore the value of found_whole_row. */
664
665 foreach(ll, wcoList)
666 {
669 &mtstate->ps);
670
672 }
673
674 leaf_part_rri->ri_WithCheckOptions = wcoList;
675 leaf_part_rri->ri_WithCheckOptionExprs = wcoExprs;
676 }
677
678 /*
679 * Build the RETURNING projection for the partition. Note that we didn't
680 * build the returningList for partitions within the planner, but simple
681 * translation of varattnos will suffice. This only occurs for the INSERT
682 * case or in the case of UPDATE/MERGE tuple routing where we didn't find
683 * a result rel to reuse.
684 */
685 if (node && node->returningLists != NIL)
686 {
687 TupleTableSlot *slot;
688 ExprContext *econtext;
689 List *returningList;
690
691 /* See the comment above for WCO lists. */
692 Assert((node->operation == CMD_INSERT &&
693 list_length(node->returningLists) == 1 &&
694 list_length(node->resultRelations) == 1) ||
695 (node->operation == CMD_UPDATE &&
696 list_length(node->returningLists) ==
697 list_length(node->resultRelations)) ||
698 (node->operation == CMD_MERGE &&
699 list_length(node->returningLists) ==
700 list_length(node->resultRelations)));
701
702 /*
703 * Use the RETURNING list of the first plan as a reference to
704 * calculate attno's for the RETURNING list of this partition. See
705 * the comment above for WCO lists for more details on why this is
706 * okay.
707 */
708 returningList = linitial(node->returningLists);
709
710 /*
711 * Convert Vars in it to contain this partition's attribute numbers.
712 */
713 if (part_attmap == NULL)
717 false);
718 returningList = (List *)
719 map_variable_attnos((Node *) returningList,
720 firstVarno, 0,
722 RelationGetForm(partrel)->reltype,
723 &found_whole_row);
724 /* We ignore the value of found_whole_row. */
725
726 leaf_part_rri->ri_returningList = returningList;
727
728 /*
729 * Initialize the projection itself.
730 *
731 * Use the slot and the expression context that would have been set up
732 * in ExecInitModifyTable() for projection's output.
733 */
734 Assert(mtstate->ps.ps_ResultTupleSlot != NULL);
735 slot = mtstate->ps.ps_ResultTupleSlot;
736 Assert(mtstate->ps.ps_ExprContext != NULL);
737 econtext = mtstate->ps.ps_ExprContext;
738 leaf_part_rri->ri_projectReturning =
739 ExecBuildProjectionInfo(returningList, econtext, slot,
740 &mtstate->ps, RelationGetDescr(partrel));
741 }
742
743 /* Set up information needed for routing tuples to the partition. */
744 ExecInitRoutingInfo(mtstate, estate, proute, dispatch,
745 leaf_part_rri, partidx, false);
746
747 /*
748 * If there is an ON CONFLICT clause, initialize state for it.
749 */
750 if (node && node->onConflictAction != ONCONFLICT_NONE)
751 {
753 ExprContext *econtext = mtstate->ps.ps_ExprContext;
754 List *arbiterIndexes = NIL;
755 int additional_arbiters = 0;
756
757 /*
758 * If there is a list of arbiter indexes, map it to a list of indexes
759 * in the partition. We also add any "identical indexes" to any of
760 * those, to cover the case where one of them is concurrently being
761 * reindexed.
762 */
763 if (rootResultRelInfo->ri_onConflictArbiterIndexes != NIL)
764 {
768
769 for (int listidx = 0; listidx < leaf_part_rri->ri_NumIndices; listidx++)
770 {
771 Oid indexoid;
772 List *ancestors;
773
774 /*
775 * If one of this index's ancestors is in the root's arbiter
776 * list, then use this index as arbiter for this partition.
777 * Otherwise, if this index has no parent, track it for later,
778 * in case REINDEX CONCURRENTLY is working on one of the
779 * arbiters.
780 *
781 * However, if two indexes appear to have the same parent,
782 * treat the second of these as if it had no parent. This
783 * sounds counterintuitive, but it can happen if a transaction
784 * running REINDEX CONCURRENTLY commits right between those
785 * two indexes are checked by another process in this loop.
786 * This will have the effect of also treating that second
787 * index as arbiter.
788 *
789 * XXX get_partition_ancestors scans pg_inherits, which is not
790 * only slow, but also means the catalog snapshot can get
791 * invalidated each time through the loop (cf.
792 * GetNonHistoricCatalogSnapshot). Consider a syscache or
793 * some other way to cache?
794 */
795 indexoid = RelationGetRelid(leaf_part_rri->ri_IndexRelationDescs[listidx]);
796 ancestors = get_partition_ancestors(indexoid);
797 INJECTION_POINT("exec-init-partition-after-get-partition-ancestors", NULL);
798
799 if (ancestors != NIL &&
801 {
803 {
804 if (list_member_oid(ancestors, parent_idx))
805 {
807 arbiterIndexes = lappend_oid(arbiterIndexes, indexoid);
809 break;
810 }
811 }
812 }
813 else
815
816 list_free(ancestors);
817 }
818
819 /*
820 * If we found any indexes with no ancestors, it's possible that
821 * some arbiter index is undergoing concurrent reindex. Match all
822 * unparented indexes against arbiters; add unparented matching
823 * ones as "additional arbiters".
824 *
825 * This is critical so that all concurrent transactions use the
826 * same set as arbiters during REINDEX CONCURRENTLY, to avoid
827 * spurious "duplicate key" errors.
828 */
829 if (unparented_idxs && arbiterIndexes)
830 {
832 {
835
836 unparented_rel = leaf_part_rri->ri_IndexRelationDescs[unparented_i];
837 unparented_ii = leaf_part_rri->ri_IndexRelationInfo[unparented_i];
838
839 Assert(!list_member_oid(arbiterIndexes,
840 unparented_rel->rd_index->indexrelid));
841
842 /* Ignore indexes not ready */
843 if (!unparented_ii->ii_ReadyForInserts)
844 continue;
845
847 {
850
851 arbiter_rel = leaf_part_rri->ri_IndexRelationDescs[arbiter_i];
852 arbiter_ii = leaf_part_rri->ri_IndexRelationInfo[arbiter_i];
853
854 /*
855 * If the non-ancestor index is compatible with the
856 * arbiter, use the non-ancestor as arbiter too.
857 */
862 {
863 arbiterIndexes = lappend_oid(arbiterIndexes,
864 unparented_rel->rd_index->indexrelid);
866 break;
867 }
868 }
869 }
870 }
874 }
875
876 /*
877 * We expect to find as many arbiter indexes on this partition as the
878 * root has, plus however many "additional arbiters" (to wit: those
879 * being concurrently rebuilt) we found.
880 */
881 if (list_length(rootResultRelInfo->ri_onConflictArbiterIndexes) !=
882 list_length(arbiterIndexes) - additional_arbiters)
883 elog(ERROR, "invalid arbiter index list");
884 leaf_part_rri->ri_onConflictArbiterIndexes = arbiterIndexes;
885
886 /*
887 * In the DO UPDATE and DO SELECT cases, we have some more state to
888 * initialize.
889 */
890 if (node->onConflictAction == ONCONFLICT_UPDATE ||
891 node->onConflictAction == ONCONFLICT_SELECT)
892 {
895
897
898 Assert(node->onConflictSet != NIL ||
899 node->onConflictAction == ONCONFLICT_SELECT);
900 Assert(rootResultRelInfo->ri_onConflict != NULL);
901
902 leaf_part_rri->ri_onConflict = onconfl;
903
904 /* Lock strength for DO SELECT [FOR UPDATE/SHARE] */
905 onconfl->oc_LockStrength =
906 rootResultRelInfo->ri_onConflict->oc_LockStrength;
907
908 /*
909 * Need a separate existing slot for each partition, as the
910 * partition could be of a different AM, even if the tuple
911 * descriptors match.
912 */
913 onconfl->oc_Existing =
914 table_slot_create(leaf_part_rri->ri_RelationDesc,
915 &mtstate->ps.state->es_tupleTable);
916
917 /*
918 * If the partition's tuple descriptor matches exactly the root
919 * parent (the common case), we can re-use most of the parent's ON
920 * CONFLICT action state, skipping a bunch of work. Otherwise, we
921 * need to create state specific to this partition.
922 */
923 if (map == NULL)
924 {
925 /*
926 * It's safe to reuse these from the partition root, as we
927 * only process one tuple at a time (therefore we won't
928 * overwrite needed data in slots), and the results of any
929 * projections are independent of the underlying storage.
930 * Projections and where clauses themselves don't store state
931 * / are independent of the underlying storage.
932 */
933 onconfl->oc_ProjSlot =
934 rootResultRelInfo->ri_onConflict->oc_ProjSlot;
935 onconfl->oc_ProjInfo =
936 rootResultRelInfo->ri_onConflict->oc_ProjInfo;
937 onconfl->oc_WhereClause =
938 rootResultRelInfo->ri_onConflict->oc_WhereClause;
939 }
940 else
941 {
942 /*
943 * For ON CONFLICT DO UPDATE, translate expressions in
944 * onConflictSet to account for different attribute numbers.
945 * For that, map partition varattnos twice: first to catch the
946 * EXCLUDED pseudo-relation (INNER_VAR), and second to handle
947 * the main target relation (firstVarno).
948 */
949 if (node->onConflictAction == ONCONFLICT_UPDATE)
950 {
953
954 onconflset = copyObject(node->onConflictSet);
955 if (part_attmap == NULL)
959 false);
960 onconflset = (List *)
962 INNER_VAR, 0,
964 RelationGetForm(partrel)->reltype,
965 &found_whole_row);
966 /* We ignore the value of found_whole_row. */
967 onconflset = (List *)
969 firstVarno, 0,
971 RelationGetForm(partrel)->reltype,
972 &found_whole_row);
973 /* We ignore the value of found_whole_row. */
974
975 /*
976 * Finally, adjust the target colnos to match the
977 * partition.
978 */
979 onconflcols = adjust_partition_colnos(node->onConflictCols,
981
982 /* create the tuple slot for the UPDATE SET projection */
983 onconfl->oc_ProjSlot =
984 table_slot_create(partrel,
985 &mtstate->ps.state->es_tupleTable);
986
987 /* build UPDATE SET projection state */
988 onconfl->oc_ProjInfo =
990 true,
993 econtext,
994 onconfl->oc_ProjSlot,
995 &mtstate->ps);
996 }
997
998 /*
999 * For both ON CONFLICT DO UPDATE and ON CONFLICT DO SELECT,
1000 * there may be a WHERE clause. If so, initialize state where
1001 * it will be evaluated, mapping the attribute numbers
1002 * appropriately. As with onConflictSet, we need to map
1003 * partition varattnos twice, to catch both the EXCLUDED
1004 * pseudo-relation (INNER_VAR), and the main target relation
1005 * (firstVarno).
1006 */
1007 if (node->onConflictWhere)
1008 {
1009 List *clause;
1010
1011 if (part_attmap == NULL)
1012 part_attmap =
1015 false);
1016
1017 clause = copyObject((List *) node->onConflictWhere);
1018 clause = (List *)
1019 map_variable_attnos((Node *) clause,
1020 INNER_VAR, 0,
1022 RelationGetForm(partrel)->reltype,
1023 &found_whole_row);
1024 /* We ignore the value of found_whole_row. */
1025 clause = (List *)
1026 map_variable_attnos((Node *) clause,
1027 firstVarno, 0,
1029 RelationGetForm(partrel)->reltype,
1030 &found_whole_row);
1031 /* We ignore the value of found_whole_row. */
1032 onconfl->oc_WhereClause =
1033 ExecInitQual(clause, &mtstate->ps);
1034 }
1035 }
1036 }
1037 }
1038
1039 /*
1040 * Since we've just initialized this ResultRelInfo, it's not in any list
1041 * attached to the estate as yet. Add it, so that it can be found later.
1042 *
1043 * Note that the entries in this list appear in no predetermined order,
1044 * because partition result rels are initialized as and when they're
1045 * needed.
1046 */
1051
1052 /*
1053 * Initialize information about this partition that's needed to handle
1054 * MERGE. We take the "first" result relation's mergeActionList as
1055 * reference and make copy for this relation, converting stuff that
1056 * references attribute numbers to match this relation's.
1057 *
1058 * This duplicates much of the logic in ExecInitMerge(), so if something
1059 * changes there, look here too.
1060 */
1061 if (node && node->operation == CMD_MERGE)
1062 {
1063 List *firstMergeActionList = linitial(node->mergeActionLists);
1064 ListCell *lc;
1065 ExprContext *econtext = mtstate->ps.ps_ExprContext;
1066 Node *joinCondition;
1067
1068 if (part_attmap == NULL)
1069 part_attmap =
1072 false);
1073
1074 if (unlikely(!leaf_part_rri->ri_projectNewInfoValid))
1076
1077 /* Initialize state for join condition checking. */
1078 joinCondition =
1079 map_variable_attnos(linitial(node->mergeJoinConditions),
1080 firstVarno, 0,
1082 RelationGetForm(partrel)->reltype,
1083 &found_whole_row);
1084 /* We ignore the value of found_whole_row. */
1085 leaf_part_rri->ri_MergeJoinCondition =
1086 ExecInitQual((List *) joinCondition, &mtstate->ps);
1087
1088 foreach(lc, firstMergeActionList)
1089 {
1090 /* Make a copy for this relation to be safe. */
1092 MergeActionState *action_state;
1093
1094 /* Generate the action's state for this relation */
1095 action_state = makeNode(MergeActionState);
1096 action_state->mas_action = action;
1097
1098 /* And put the action in the appropriate list */
1099 leaf_part_rri->ri_MergeActions[action->matchKind] =
1100 lappend(leaf_part_rri->ri_MergeActions[action->matchKind],
1101 action_state);
1102
1103 switch (action->commandType)
1104 {
1105 case CMD_INSERT:
1106
1107 /*
1108 * ExecCheckPlanOutput() already done on the targetlist
1109 * when "first" result relation initialized and it is same
1110 * for all result relations.
1111 */
1112 action_state->mas_proj =
1113 ExecBuildProjectionInfo(action->targetList, econtext,
1114 leaf_part_rri->ri_newTupleSlot,
1115 &mtstate->ps,
1116 RelationGetDescr(partrel));
1117 break;
1118 case CMD_UPDATE:
1119
1120 /*
1121 * Convert updateColnos from "first" result relation
1122 * attribute numbers to this result rel's.
1123 */
1124 if (part_attmap)
1125 action->updateColnos =
1127 part_attmap);
1128 action_state->mas_proj =
1130 true,
1131 action->updateColnos,
1132 RelationGetDescr(leaf_part_rri->ri_RelationDesc),
1133 econtext,
1134 leaf_part_rri->ri_newTupleSlot,
1135 NULL);
1136 break;
1137 case CMD_DELETE:
1138 case CMD_NOTHING:
1139 /* Nothing to do */
1140 break;
1141
1142 default:
1143 elog(ERROR, "unknown action in MERGE WHEN clause");
1144 }
1145
1146 /* found_whole_row intentionally ignored. */
1147 action->qual =
1149 firstVarno, 0,
1151 RelationGetForm(partrel)->reltype,
1152 &found_whole_row);
1153 action_state->mas_whenqual =
1154 ExecInitQual((List *) action->qual, &mtstate->ps);
1155 }
1156 }
1158
1159 return leaf_part_rri;
1160}
AttrMap * build_attrmap_by_name(TupleDesc indesc, TupleDesc outdesc, bool missing_ok)
Definition attmap.c:175
#define unlikely(x)
Definition c.h:432
ProjectionInfo * ExecBuildProjectionInfo(List *targetList, ExprContext *econtext, TupleTableSlot *slot, PlanState *parent, TupleDesc inputDesc)
Definition execExpr.c:391
ExprState * ExecInitQual(List *qual, PlanState *parent)
Definition execExpr.c:250
ProjectionInfo * ExecBuildUpdateProjection(List *targetList, bool evalTargetList, List *targetColnos, TupleDesc relDesc, ExprContext *econtext, TupleTableSlot *slot, PlanState *parent)
Definition execExpr.c:568
void ExecOpenIndices(ResultRelInfo *resultRelInfo, bool speculative)
static bool IsIndexCompatibleAsArbiter(Relation arbiterIndexRelation, IndexInfo *arbiterIndexInfo, Relation indexRelation, IndexInfo *indexInfo)
static List * adjust_partition_colnos(List *colnos, ResultRelInfo *leaf_part_rri)
#define INJECTION_POINT(name, arg)
List * lappend_oid(List *list, Oid datum)
Definition list.c:375
void list_free(List *list)
Definition list.c:1546
bool list_member_oid(const List *list, Oid datum)
Definition list.c:722
void ExecInitMergeTupleSlots(ModifyTableState *mtstate, ResultRelInfo *resultRelInfo)
#define copyObject(obj)
Definition nodes.h:232
@ ONCONFLICT_SELECT
Definition nodes.h:431
@ ONCONFLICT_UPDATE
Definition nodes.h:430
@ CMD_MERGE
Definition nodes.h:279
@ CMD_DELETE
Definition nodes.h:278
@ CMD_UPDATE
Definition nodes.h:276
@ CMD_NOTHING
Definition nodes.h:282
#define castNode(_type_, nodeptr)
Definition nodes.h:182
List * get_partition_ancestors(Oid relid)
Definition partition.c:134
#define lfirst(lc)
Definition pg_list.h:172
#define linitial(l)
Definition pg_list.h:178
#define foreach_oid(var, lst)
Definition pg_list.h:471
#define linitial_oid(l)
Definition pg_list.h:180
#define foreach_int(var, lst)
Definition pg_list.h:470
#define INNER_VAR
Definition primnodes.h:243
#define RelationGetForm(relation)
Definition rel.h:508
Node * map_variable_attnos(Node *node, int target_varno, int sublevels_up, const AttrMap *attno_map, Oid to_rowtype, bool *found_whole_row)
List * es_tuple_routing_result_relations
Definition execnodes.h:710
int es_instrument
Definition execnodes.h:732
List * es_tupleTable
Definition execnodes.h:724
MergeAction * mas_action
Definition execnodes.h:461
ProjectionInfo * mas_proj
Definition execnodes.h:462
ExprState * mas_whenqual
Definition execnodes.h:464
ResultRelInfo * resultRelInfo
Definition execnodes.h:1420
Definition nodes.h:135
ExprState * oc_WhereClause
Definition execnodes.h:448
ProjectionInfo * oc_ProjInfo
Definition execnodes.h:446
TupleTableSlot * oc_ProjSlot
Definition execnodes.h:445
LockClauseStrength oc_LockStrength
Definition execnodes.h:447
ExprContext * ps_ExprContext
Definition execnodes.h:1216
TupleTableSlot * ps_ResultTupleSlot
Definition execnodes.h:1215
OnConflictActionState * ri_onConflict
Definition execnodes.h:595
List * ri_onConflictArbiterIndexes
Definition execnodes.h:592
Index ri_RangeTableIndex
Definition execnodes.h:489
TupleTableSlot * table_slot_create(Relation relation, List **reglist)
Definition tableam.c:92

References adjust_partition_colnos(), adjust_partition_colnos_using_map(), Assert, build_attrmap_by_name(), castNode, CheckValidResultRel(), CMD_DELETE, CMD_INSERT, CMD_MERGE, CMD_NOTHING, CMD_UPDATE, copyObject, elog, ERROR, EState::es_instrument, EState::es_query_cxt, EState::es_tuple_routing_result_relations, EState::es_tupleTable, ExecBuildProjectionInfo(), ExecBuildUpdateProjection(), ExecGetRootToChildMap(), ExecInitMergeTupleSlots(), ExecInitQual(), ExecInitRoutingInfo(), ExecOpenIndices(), fb(), foreach_int, foreach_oid, get_partition_ancestors(), InitResultRelInfo(), INJECTION_POINT, INNER_VAR, IsIndexCompatibleAsArbiter(), lappend(), lappend_int(), lappend_oid(), lfirst, lfirst_node, linitial, linitial_oid, list_free(), list_length(), list_member_oid(), makeNode, map_variable_attnos(), MergeActionState::mas_action, MergeActionState::mas_proj, MergeActionState::mas_whenqual, PartitionTupleRouting::memcxt, MemoryContextSwitchTo(), ModifyTable::mergeActionLists, ModifyTable::mergeJoinConditions, NIL, OnConflictActionState::oc_LockStrength, OnConflictActionState::oc_ProjInfo, OnConflictActionState::oc_ProjSlot, OnConflictActionState::oc_WhereClause, ONCONFLICT_NONE, ONCONFLICT_SELECT, ONCONFLICT_UPDATE, ModifyTable::onConflictAction, ModifyTable::onConflictCols, ModifyTable::onConflictSet, ModifyTable::onConflictWhere, ModifyTable::operation, PlanState::plan, ModifyTableState::ps, PlanState::ps_ExprContext, PlanState::ps_ResultTupleSlot, RelationData::rd_rel, RelationGetDescr, RelationGetForm, RelationGetRelid, ModifyTable::resultRelations, ModifyTableState::resultRelInfo, ModifyTable::returningLists, ResultRelInfo::ri_onConflict, ResultRelInfo::ri_onConflictArbiterIndexes, ResultRelInfo::ri_RangeTableIndex, ResultRelInfo::ri_RelationDesc, RowExclusiveLock, PlanState::state, table_open(), table_slot_create(), unlikely, and ModifyTable::withCheckOptionLists.

Referenced by ExecFindPartition().

◆ ExecInitRoutingInfo()

static void ExecInitRoutingInfo ( ModifyTableState mtstate,
EState estate,
PartitionTupleRouting proute,
PartitionDispatch  dispatch,
ResultRelInfo partRelInfo,
int  partidx,
bool  is_borrowed_rel 
)
static

Definition at line 1169 of file execPartition.c.

1176{
1178 int rri_index;
1179
1181
1182 /*
1183 * Set up tuple conversion between root parent and the partition if the
1184 * two have different rowtypes. If conversion is indeed required, also
1185 * initialize a slot dedicated to storing this partition's converted
1186 * tuples. Various operations that are applied to tuples after routing,
1187 * such as checking constraints, will refer to this slot.
1188 */
1189 if (ExecGetRootToChildMap(partRelInfo, estate) != NULL)
1190 {
1191 Relation partrel = partRelInfo->ri_RelationDesc;
1192
1193 /*
1194 * This pins the partition's TupleDesc, which will be released at the
1195 * end of the command.
1196 */
1197 partRelInfo->ri_PartitionTupleSlot =
1198 table_slot_create(partrel, &estate->es_tupleTable);
1199 }
1200 else
1201 partRelInfo->ri_PartitionTupleSlot = NULL;
1202
1203 /*
1204 * If the partition is a foreign table, let the FDW init itself for
1205 * routing tuples to the partition.
1206 */
1207 if (partRelInfo->ri_FdwRoutine != NULL &&
1208 partRelInfo->ri_FdwRoutine->BeginForeignInsert != NULL)
1209 partRelInfo->ri_FdwRoutine->BeginForeignInsert(mtstate, partRelInfo);
1210
1211 /*
1212 * Determine if the FDW supports batch insert and determine the batch size
1213 * (a FDW may support batching, but it may be disabled for the
1214 * server/table or for this particular query).
1215 *
1216 * If the FDW does not support batching, we set the batch size to 1.
1217 */
1218 if (partRelInfo->ri_FdwRoutine != NULL &&
1219 partRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize &&
1220 partRelInfo->ri_FdwRoutine->ExecForeignBatchInsert)
1221 partRelInfo->ri_BatchSize =
1222 partRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize(partRelInfo);
1223 else
1224 partRelInfo->ri_BatchSize = 1;
1225
1226 Assert(partRelInfo->ri_BatchSize >= 1);
1227
1228 partRelInfo->ri_CopyMultiInsertBuffer = NULL;
1229
1230 /*
1231 * Keep track of it in the PartitionTupleRouting->partitions array.
1232 */
1233 Assert(dispatch->indexes[partidx] == -1);
1234
1235 rri_index = proute->num_partitions++;
1236
1237 /* Allocate or enlarge the array, as needed */
1238 if (proute->num_partitions >= proute->max_partitions)
1239 {
1240 if (proute->max_partitions == 0)
1241 {
1242 proute->max_partitions = 8;
1244 proute->is_borrowed_rel = palloc_array(bool, proute->max_partitions);
1245 }
1246 else
1247 {
1248 proute->max_partitions *= 2;
1249 proute->partitions = (ResultRelInfo **)
1250 repalloc(proute->partitions, sizeof(ResultRelInfo *) *
1251 proute->max_partitions);
1252 proute->is_borrowed_rel = (bool *)
1253 repalloc(proute->is_borrowed_rel, sizeof(bool) *
1254 proute->max_partitions);
1255 }
1256 }
1257
1258 proute->partitions[rri_index] = partRelInfo;
1259 proute->is_borrowed_rel[rri_index] = is_borrowed_rel;
1260 dispatch->indexes[partidx] = rri_index;
1261
1263}

References Assert, EState::es_tupleTable, ExecGetRootToChildMap(), fb(), PartitionTupleRouting::is_borrowed_rel, PartitionTupleRouting::max_partitions, PartitionTupleRouting::memcxt, MemoryContextSwitchTo(), PartitionTupleRouting::num_partitions, palloc_array, PartitionTupleRouting::partitions, repalloc(), and table_slot_create().

Referenced by ExecFindPartition(), and ExecInitPartitionInfo().

◆ ExecSetupPartitionTupleRouting()

PartitionTupleRouting * ExecSetupPartitionTupleRouting ( EState estate,
Relation  rel 
)

Definition at line 221 of file execPartition.c.

222{
223 PartitionTupleRouting *proute;
224
225 /*
226 * Here we attempt to expend as little effort as possible in setting up
227 * the PartitionTupleRouting. Each partition's ResultRelInfo is built on
228 * demand, only when we actually need to route a tuple to that partition.
229 * The reason for this is that a common case is for INSERT to insert a
230 * single tuple into a partitioned table and this must be fast.
231 */
233 proute->partition_root = rel;
235 /* Rest of members initialized by zeroing */
236
237 /*
238 * Initialize this table's PartitionDispatch object. Here we pass in the
239 * parent as NULL as we don't need to care about any parent of the target
240 * partitioned table.
241 */
243 NULL, 0, NULL);
244
245 return proute;
246}
#define palloc0_object(type)
Definition fe_memutils.h:75

References CurrentMemoryContext, ExecInitPartitionDispatchInfo(), fb(), PartitionTupleRouting::memcxt, palloc0_object, PartitionTupleRouting::partition_root, and RelationGetRelid.

Referenced by apply_handle_tuple_routing(), CopyFrom(), ExecCrossPartitionUpdate(), ExecInitMerge(), and ExecInitModifyTable().

◆ find_matching_subplans_recurse()

static void find_matching_subplans_recurse ( PartitionPruningData prunedata,
PartitionedRelPruningData pprune,
bool  initial_prune,
Bitmapset **  validsubplans,
Bitmapset **  validsubplan_rtis 
)
static

Definition at line 2740 of file execPartition.c.

2745{
2747 int i;
2748
2749 /* Guard against stack overflow due to overly deep partition hierarchy. */
2751
2752 /*
2753 * Prune as appropriate, if we have pruning steps matching the current
2754 * execution context. Otherwise just include all partitions at this
2755 * level.
2756 */
2757 if (initial_prune && pprune->initial_pruning_steps)
2758 partset = get_matching_partitions(&pprune->initial_context,
2759 pprune->initial_pruning_steps);
2760 else if (!initial_prune && pprune->exec_pruning_steps)
2761 partset = get_matching_partitions(&pprune->exec_context,
2762 pprune->exec_pruning_steps);
2763 else
2764 partset = pprune->present_parts;
2765
2766 /* Translate partset into subplan indexes */
2767 i = -1;
2768 while ((i = bms_next_member(partset, i)) >= 0)
2769 {
2770 if (pprune->subplan_map[i] >= 0)
2771 {
2773 pprune->subplan_map[i]);
2774
2775 /*
2776 * Only report leaf partitions. Non-leaf partitions may appear
2777 * here when they use an unflattened Append or MergeAppend.
2778 */
2779 if (validsubplan_rtis && pprune->leafpart_rti_map[i])
2781 pprune->leafpart_rti_map[i]);
2782 }
2783 else
2784 {
2785 int partidx = pprune->subpart_map[i];
2786
2787 if (partidx >= 0)
2789 &prunedata->partrelprunedata[partidx],
2792 else
2793 {
2794 /*
2795 * We get here if the planner already pruned all the sub-
2796 * partitions for this partition. Silently ignore this
2797 * partition in this case. The end result is the same: we
2798 * would have pruned all partitions just the same, but we
2799 * don't have any pruning steps to execute to verify this.
2800 */
2801 }
2802 }
2803 }
2804}
Bitmapset * get_matching_partitions(PartitionPruneContext *context, List *pruning_steps)
Definition partprune.c:845
void check_stack_depth(void)
Definition stack_depth.c:95

References bms_add_member(), bms_next_member(), check_stack_depth(), fb(), find_matching_subplans_recurse(), get_matching_partitions(), and i.

Referenced by ExecFindMatchingSubPlans(), and find_matching_subplans_recurse().

◆ FormPartitionKeyDatum()

static void FormPartitionKeyDatum ( PartitionDispatch  pd,
TupleTableSlot slot,
EState estate,
Datum values,
bool isnull 
)
static

Definition at line 1473 of file execPartition.c.

1478{
1480 int i;
1481
1482 if (pd->key->partexprs != NIL && pd->keystate == NIL)
1483 {
1484 /* Check caller has set up context correctly */
1485 Assert(estate != NULL &&
1486 GetPerTupleExprContext(estate)->ecxt_scantuple == slot);
1487
1488 /* First time through, set up expression evaluation state */
1489 pd->keystate = ExecPrepareExprList(pd->key->partexprs, estate);
1490 }
1491
1493 for (i = 0; i < pd->key->partnatts; i++)
1494 {
1496 Datum datum;
1497 bool isNull;
1498
1499 if (keycol != 0)
1500 {
1501 /* Plain column; get the value directly from the heap tuple */
1502 datum = slot_getattr(slot, keycol, &isNull);
1503 }
1504 else
1505 {
1506 /* Expression; need to evaluate it */
1507 if (partexpr_item == NULL)
1508 elog(ERROR, "wrong number of partition key expressions");
1510 GetPerTupleExprContext(estate),
1511 &isNull);
1513 }
1514 values[i] = datum;
1515 isnull[i] = isNull;
1516 }
1517
1518 if (partexpr_item != NULL)
1519 elog(ERROR, "wrong number of partition key expressions");
1520}
List * ExecPrepareExprList(List *nodes, EState *estate)
Definition execExpr.c:872
static Datum ExecEvalExprSwitchContext(ExprState *state, ExprContext *econtext, bool *isNull)
Definition executor.h:439
static ListCell * list_head(const List *l)
Definition pg_list.h:128
static ListCell * lnext(const List *l, const ListCell *c)
Definition pg_list.h:343
AttrNumber * partattrs
Definition partcache.h:29
static Datum slot_getattr(TupleTableSlot *slot, int attnum, bool *isnull)
Definition tuptable.h:417

References Assert, elog, ERROR, ExecEvalExprSwitchContext(), ExecPrepareExprList(), fb(), GetPerTupleExprContext, i, PartitionDispatchData::key, PartitionDispatchData::keystate, lfirst, list_head(), lnext(), NIL, PartitionKeyData::partattrs, PartitionKeyData::partexprs, PartitionKeyData::partnatts, slot_getattr(), and values.

Referenced by ExecFindPartition().

◆ get_partition_for_tuple()

static int get_partition_for_tuple ( PartitionDispatch  pd,
const Datum values,
const bool isnull 
)
static

Definition at line 1570 of file execPartition.c.

1571{
1572 int bound_offset = -1;
1573 int part_index = -1;
1574 PartitionKey key = pd->key;
1575 PartitionDesc partdesc = pd->partdesc;
1576 PartitionBoundInfo boundinfo = partdesc->boundinfo;
1577
1578 /*
1579 * In the switch statement below, when we perform a cached lookup for
1580 * RANGE and LIST partitioned tables, if we find that the last found
1581 * partition matches the 'values', we return the partition index right
1582 * away. We do this instead of breaking out of the switch as we don't
1583 * want to execute the code about the DEFAULT partition or do any updates
1584 * for any of the cache-related fields. That would be a waste of effort
1585 * as we already know it's not the DEFAULT partition and have no need to
1586 * increment the number of times we found the same partition any higher
1587 * than PARTITION_CACHED_FIND_THRESHOLD.
1588 */
1589
1590 /* Route as appropriate based on partitioning strategy. */
1591 switch (key->strategy)
1592 {
1594 {
1596
1597 /* hash partitioning is too cheap to bother caching */
1599 key->partsupfunc,
1600 key->partcollation,
1601 values, isnull);
1602
1603 /*
1604 * HASH partitions can't have a DEFAULT partition and we don't
1605 * do any caching work for them, so just return the part index
1606 */
1607 return boundinfo->indexes[rowHash % boundinfo->nindexes];
1608 }
1609
1611 if (isnull[0])
1612 {
1613 /* this is far too cheap to bother doing any caching */
1614 if (partition_bound_accepts_nulls(boundinfo))
1615 {
1616 /*
1617 * When there is a NULL partition we just return that
1618 * directly. We don't have a bound_offset so it's not
1619 * valid to drop into the code after the switch which
1620 * checks and updates the cache fields. We perhaps should
1621 * be invalidating the details of the last cached
1622 * partition but there's no real need to. Keeping those
1623 * fields set gives a chance at matching to the cached
1624 * partition on the next lookup.
1625 */
1626 return boundinfo->null_index;
1627 }
1628 }
1629 else
1630 {
1631 bool equal;
1632
1634 {
1636 Datum lastDatum = boundinfo->datums[last_datum_offset][0];
1637 int32 cmpval;
1638
1639 /* does the last found datum index match this datum? */
1640 cmpval = DatumGetInt32(FunctionCall2Coll(&key->partsupfunc[0],
1641 key->partcollation[0],
1642 lastDatum,
1643 values[0]));
1644
1645 if (cmpval == 0)
1646 return boundinfo->indexes[last_datum_offset];
1647
1648 /* fall-through and do a manual lookup */
1649 }
1650
1652 key->partcollation,
1653 boundinfo,
1654 values[0], &equal);
1655 if (bound_offset >= 0 && equal)
1656 part_index = boundinfo->indexes[bound_offset];
1657 }
1658 break;
1659
1661 {
1662 bool equal = false,
1663 range_partkey_has_null = false;
1664 int i;
1665
1666 /*
1667 * No range includes NULL, so this will be accepted by the
1668 * default partition if there is one, and otherwise rejected.
1669 */
1670 for (i = 0; i < key->partnatts; i++)
1671 {
1672 if (isnull[i])
1673 {
1675 break;
1676 }
1677 }
1678
1679 /* NULLs belong in the DEFAULT partition */
1681 break;
1682
1684 {
1688 int32 cmpval;
1689
1690 /* check if the value is >= to the lower bound */
1691 cmpval = partition_rbound_datum_cmp(key->partsupfunc,
1692 key->partcollation,
1693 lastDatums,
1694 kind,
1695 values,
1696 key->partnatts);
1697
1698 /*
1699 * If it's equal to the lower bound then no need to check
1700 * the upper bound.
1701 */
1702 if (cmpval == 0)
1703 return boundinfo->indexes[last_datum_offset + 1];
1704
1705 if (cmpval < 0 && last_datum_offset + 1 < boundinfo->ndatums)
1706 {
1707 /* check if the value is below the upper bound */
1708 lastDatums = boundinfo->datums[last_datum_offset + 1];
1709 kind = boundinfo->kind[last_datum_offset + 1];
1710 cmpval = partition_rbound_datum_cmp(key->partsupfunc,
1711 key->partcollation,
1712 lastDatums,
1713 kind,
1714 values,
1715 key->partnatts);
1716
1717 if (cmpval > 0)
1718 return boundinfo->indexes[last_datum_offset + 1];
1719 }
1720 /* fall-through and do a manual lookup */
1721 }
1722
1724 key->partcollation,
1725 boundinfo,
1726 key->partnatts,
1727 values,
1728 &equal);
1729
1730 /*
1731 * The bound at bound_offset is less than or equal to the
1732 * tuple value, so the bound at offset+1 is the upper bound of
1733 * the partition we're looking for, if there actually exists
1734 * one.
1735 */
1736 part_index = boundinfo->indexes[bound_offset + 1];
1737 }
1738 break;
1739
1740 default:
1741 elog(ERROR, "unexpected partition strategy: %d",
1742 (int) key->strategy);
1743 }
1744
1745 /*
1746 * part_index < 0 means we failed to find a partition of this parent. Use
1747 * the default partition, if there is one.
1748 */
1749 if (part_index < 0)
1750 {
1751 /*
1752 * No need to reset the cache fields here. The next set of values
1753 * might end up belonging to the cached partition, so leaving the
1754 * cache alone improves the chances of a cache hit on the next lookup.
1755 */
1756 return boundinfo->default_index;
1757 }
1758
1759 /* we should only make it here when the code above set bound_offset */
1760 Assert(bound_offset >= 0);
1761
1762 /*
1763 * Attend to the cache fields. If the bound_offset matches the last
1764 * cached bound offset then we've found the same partition as last time,
1765 * so bump the count by one. If all goes well, we'll eventually reach
1766 * PARTITION_CACHED_FIND_THRESHOLD and try the cache path next time
1767 * around. Otherwise, we'll reset the cache count back to 1 to mark that
1768 * we've found this partition for the first time.
1769 */
1770 if (bound_offset == partdesc->last_found_datum_index)
1771 partdesc->last_found_count++;
1772 else
1773 {
1774 partdesc->last_found_count = 1;
1777 }
1778
1779 return part_index;
1780}
int32_t int32
Definition c.h:614
uint64_t uint64
Definition c.h:619
bool equal(const void *a, const void *b)
Definition equalfuncs.c:223
#define PARTITION_CACHED_FIND_THRESHOLD
Datum FunctionCall2Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2)
Definition fmgr.c:1151
@ PARTITION_STRATEGY_HASH
Definition parsenodes.h:916
@ PARTITION_STRATEGY_LIST
Definition parsenodes.h:914
@ PARTITION_STRATEGY_RANGE
Definition parsenodes.h:915
PartitionRangeDatumKind
Definition parsenodes.h:965
int32 partition_rbound_datum_cmp(FmgrInfo *partsupfunc, Oid *partcollation, const Datum *rb_datums, PartitionRangeDatumKind *rb_kind, const Datum *tuple_datums, int n_tuple_datums)
uint64 compute_partition_hash_value(int partnatts, FmgrInfo *partsupfunc, const Oid *partcollation, const Datum *values, const bool *isnull)
int partition_range_datum_bsearch(FmgrInfo *partsupfunc, Oid *partcollation, PartitionBoundInfo boundinfo, int nvalues, const Datum *values, bool *is_equal)
int partition_list_bsearch(FmgrInfo *partsupfunc, Oid *partcollation, PartitionBoundInfo boundinfo, Datum value, bool *is_equal)
#define partition_bound_accepts_nulls(bi)
Definition partbounds.h:98
static int32 DatumGetInt32(Datum X)
Definition postgres.h:202
PartitionRangeDatumKind ** kind
Definition partbounds.h:84
int last_found_datum_index
Definition partdesc.h:46
int last_found_part_index
Definition partdesc.h:52

References Assert, PartitionDescData::boundinfo, compute_partition_hash_value(), DatumGetInt32(), PartitionBoundInfoData::datums, PartitionBoundInfoData::default_index, elog, equal(), ERROR, fb(), FunctionCall2Coll(), i, PartitionBoundInfoData::indexes, PartitionDispatchData::key, PartitionBoundInfoData::kind, PartitionDescData::last_found_count, PartitionDescData::last_found_datum_index, PartitionDescData::last_found_part_index, PartitionBoundInfoData::ndatums, PartitionBoundInfoData::nindexes, PartitionBoundInfoData::null_index, PartitionDispatchData::partdesc, partition_bound_accepts_nulls, PARTITION_CACHED_FIND_THRESHOLD, partition_list_bsearch(), partition_range_datum_bsearch(), partition_rbound_datum_cmp(), PARTITION_STRATEGY_HASH, PARTITION_STRATEGY_LIST, PARTITION_STRATEGY_RANGE, and values.

Referenced by ExecFindPartition().

◆ InitExecPartitionPruneContexts()

static void InitExecPartitionPruneContexts ( PartitionPruneState prunestate,
PlanState parent_plan,
Bitmapset initially_valid_subplans,
int  n_total_subplans 
)
static

Definition at line 2510 of file execPartition.c.

2514{
2515 EState *estate;
2518 int i;
2519 int newidx;
2520 bool fix_subplan_map = false;
2521
2522 Assert(prunestate->do_exec_prune);
2524 estate = parent_plan->state;
2525
2526 /*
2527 * No need to fix subplans maps if initial pruning didn't eliminate any
2528 * subplans.
2529 */
2531 {
2532 fix_subplan_map = true;
2533
2534 /*
2535 * First we must build a temporary array which maps old subplan
2536 * indexes to new ones. For convenience of initialization, we use
2537 * 1-based indexes in this array and leave pruned items as 0.
2538 */
2540 newidx = 1;
2541 i = -1;
2542 while ((i = bms_next_member(initially_valid_subplans, i)) >= 0)
2543 {
2546 }
2547 }
2548
2549 /*
2550 * Now we can update each PartitionedRelPruneInfo's subplan_map with new
2551 * subplan indexes. We must also recompute its present_parts bitmap.
2552 */
2553 for (i = 0; i < prunestate->num_partprunedata; i++)
2554 {
2555 PartitionPruningData *prunedata = prunestate->partprunedata[i];
2556 int j;
2557
2558 /*
2559 * Within each hierarchy, we perform this loop in back-to-front order
2560 * so that we determine present_parts for the lowest-level partitioned
2561 * tables first. This way we can tell whether a sub-partitioned
2562 * table's partitions were entirely pruned so we can exclude it from
2563 * the current level's present_parts.
2564 */
2565 for (j = prunedata->num_partrelprunedata - 1; j >= 0; j--)
2566 {
2567 PartitionedRelPruningData *pprune = &prunedata->partrelprunedata[j];
2568 int nparts = pprune->nparts;
2569 int k;
2570
2571 /* Initialize PartitionPruneContext for exec pruning, if needed. */
2572 if (pprune->exec_pruning_steps != NIL)
2573 {
2575 PartitionDesc partdesc;
2576
2577 /*
2578 * See the comment in CreatePartitionPruneState() regarding
2579 * the usage of partdesc and partkey.
2580 */
2583 pprune->partrel);
2584
2585 InitPartitionPruneContext(&pprune->exec_context,
2586 pprune->exec_pruning_steps,
2587 partdesc, partkey, parent_plan,
2588 prunestate->econtext);
2589 }
2590
2591 if (!fix_subplan_map)
2592 continue;
2593
2594 /* We just rebuild present_parts from scratch */
2595 bms_free(pprune->present_parts);
2596 pprune->present_parts = NULL;
2597
2598 for (k = 0; k < nparts; k++)
2599 {
2600 int oldidx = pprune->subplan_map[k];
2601 int subidx;
2602
2603 /*
2604 * If this partition existed as a subplan then change the old
2605 * subplan index to the new subplan index. The new index may
2606 * become -1 if the partition was pruned above, or it may just
2607 * come earlier in the subplan list due to some subplans being
2608 * removed earlier in the list. If it's a subpartition, add
2609 * it to present_parts unless it's entirely pruned.
2610 */
2611 if (oldidx >= 0)
2612 {
2614 pprune->subplan_map[k] = new_subplan_indexes[oldidx] - 1;
2615
2616 if (new_subplan_indexes[oldidx] > 0)
2617 pprune->present_parts =
2618 bms_add_member(pprune->present_parts, k);
2619 }
2620 else if ((subidx = pprune->subpart_map[k]) >= 0)
2621 {
2623
2624 subprune = &prunedata->partrelprunedata[subidx];
2625
2626 if (!bms_is_empty(subprune->present_parts))
2628 bms_add_member(pprune->present_parts, k);
2629 }
2630 }
2631 }
2632 }
2633
2634 /*
2635 * If we fixed subplan maps, we must also recompute the other_subplans
2636 * set, since indexes in it may change.
2637 */
2638 if (fix_subplan_map)
2639 {
2641 i = -1;
2642 while ((i = bms_next_member(prunestate->other_subplans, i)) >= 0)
2644 new_subplan_indexes[i] - 1);
2645
2646 bms_free(prunestate->other_subplans);
2647 prunestate->other_subplans = new_other_subplans;
2648
2650 }
2651}
void bms_free(Bitmapset *a)
Definition bitmapset.c:239
int bms_num_members(const Bitmapset *a)
Definition bitmapset.c:744
#define bms_is_empty(a)
Definition bitmapset.h:118
#define palloc0_array(type, count)
Definition fe_memutils.h:77
void pfree(void *pointer)
Definition mcxt.c:1616

References Assert, bms_add_member(), bms_free(), bms_is_empty, bms_next_member(), bms_num_members(), EState::es_partition_directory, fb(), i, InitPartitionPruneContext(), j, NIL, PartitionedRelPruningData::nparts, palloc0_array, PartitionDirectoryLookup(), pfree(), PartitionedRelPruningData::present_parts, and RelationGetPartitionKey().

Referenced by ExecInitPartitionExecPruning().

◆ InitPartitionPruneContext()

static void InitPartitionPruneContext ( PartitionPruneContext context,
List pruning_steps,
PartitionDesc  partdesc,
PartitionKey  partkey,
PlanState planstate,
ExprContext econtext 
)
static

Definition at line 2410 of file execPartition.c.

2416{
2417 int n_steps;
2418 int partnatts;
2419 ListCell *lc;
2420
2422
2423 context->strategy = partkey->strategy;
2424 context->partnatts = partnatts = partkey->partnatts;
2425 context->nparts = partdesc->nparts;
2426 context->boundinfo = partdesc->boundinfo;
2427 context->partcollation = partkey->partcollation;
2428 context->partsupfunc = partkey->partsupfunc;
2429
2430 /* We'll look up type-specific support functions as needed */
2431 context->stepcmpfuncs = palloc0_array(FmgrInfo, n_steps * partnatts);
2432
2434 context->planstate = planstate;
2435 context->exprcontext = econtext;
2436
2437 /* Initialize expression state for each expression we need */
2438 context->exprstates = palloc0_array(ExprState *, n_steps * partnatts);
2439 foreach(lc, pruning_steps)
2440 {
2442 ListCell *lc2 = list_head(step->exprs);
2443 int keyno;
2444
2445 /* not needed for other step kinds */
2446 if (!IsA(step, PartitionPruneStepOp))
2447 continue;
2448
2449 Assert(list_length(step->exprs) <= partnatts);
2450
2451 for (keyno = 0; keyno < partnatts; keyno++)
2452 {
2453 if (bms_is_member(keyno, step->nullkeys))
2454 continue;
2455
2456 if (lc2 != NULL)
2457 {
2458 Expr *expr = lfirst(lc2);
2459
2460 /* not needed for Consts */
2461 if (!IsA(expr, Const))
2462 {
2463 int stateidx = PruneCxtStateIdx(partnatts,
2464 step->step.step_id,
2465 keyno);
2466
2467 /*
2468 * When planstate is NULL, pruning_steps is known not to
2469 * contain any expressions that depend on the parent plan.
2470 * Information of any available EXTERN parameters must be
2471 * passed explicitly in that case, which the caller must
2472 * have made available via econtext.
2473 */
2474 if (planstate == NULL)
2475 context->exprstates[stateidx] =
2477 econtext->ecxt_param_list_info);
2478 else
2479 context->exprstates[stateidx] =
2480 ExecInitExpr(expr, context->planstate);
2481 }
2482 lc2 = lnext(step->exprs, lc2);
2483 }
2484 }
2485 }
2486}
bool bms_is_member(int x, const Bitmapset *a)
Definition bitmapset.c:510
ExprState * ExecInitExpr(Expr *node, PlanState *parent)
Definition execExpr.c:143
ExprState * ExecInitExprWithParams(Expr *node, ParamListInfo ext_params)
Definition execExpr.c:201
#define IsA(nodeptr, _type_)
Definition nodes.h:164
#define PruneCxtStateIdx(partnatts, step_id, keyno)
Definition partprune.h:70
ParamListInfo ecxt_param_list_info
Definition execnodes.h:296
FmgrInfo * partsupfunc
Definition partprune.h:56
ExprContext * exprcontext
Definition partprune.h:60
MemoryContext ppccontext
Definition partprune.h:58
PartitionBoundInfo boundinfo
Definition partprune.h:54
PlanState * planstate
Definition partprune.h:59
FmgrInfo * stepcmpfuncs
Definition partprune.h:57
ExprState ** exprstates
Definition partprune.h:61
PartitionPruneStep step
Definition plannodes.h:1775
Bitmapset * nullkeys
Definition plannodes.h:1780

References Assert, bms_is_member(), PartitionDescData::boundinfo, PartitionPruneContext::boundinfo, CurrentMemoryContext, ExprContext::ecxt_param_list_info, ExecInitExpr(), ExecInitExprWithParams(), PartitionPruneContext::exprcontext, PartitionPruneStepOp::exprs, PartitionPruneContext::exprstates, fb(), IsA, lfirst, list_head(), list_length(), lnext(), PartitionDescData::nparts, PartitionPruneContext::nparts, PartitionPruneStepOp::nullkeys, palloc0_array, PartitionPruneContext::partcollation, PartitionPruneContext::partnatts, PartitionPruneContext::partsupfunc, PartitionPruneContext::planstate, PartitionPruneContext::ppccontext, PruneCxtStateIdx, PartitionPruneStepOp::step, PartitionPruneStep::step_id, PartitionPruneContext::stepcmpfuncs, and PartitionPruneContext::strategy.

Referenced by CreatePartitionPruneState(), and InitExecPartitionPruneContexts().

◆ IsIndexCompatibleAsArbiter()

static bool IsIndexCompatibleAsArbiter ( Relation  arbiterIndexRelation,
IndexInfo arbiterIndexInfo,
Relation  indexRelation,
IndexInfo indexInfo 
)
static

Definition at line 504 of file execPartition.c.

508{
509 Assert(arbiterIndexRelation->rd_index->indrelid == indexRelation->rd_index->indrelid);
510
511 /* must match whether they're unique */
512 if (arbiterIndexInfo->ii_Unique != indexInfo->ii_Unique)
513 return false;
514
515 /* No support currently for comparing exclusion indexes. */
516 if (arbiterIndexInfo->ii_ExclusionOps != NULL ||
517 indexInfo->ii_ExclusionOps != NULL)
518 return false;
519
520 /* the "nulls not distinct" criterion must match */
521 if (arbiterIndexInfo->ii_NullsNotDistinct !=
522 indexInfo->ii_NullsNotDistinct)
523 return false;
524
525 /* number of key attributes must match */
526 if (arbiterIndexInfo->ii_NumIndexKeyAttrs !=
527 indexInfo->ii_NumIndexKeyAttrs)
528 return false;
529
530 for (int i = 0; i < arbiterIndexInfo->ii_NumIndexKeyAttrs; i++)
531 {
532 if (arbiterIndexRelation->rd_indcollation[i] !=
533 indexRelation->rd_indcollation[i])
534 return false;
535
536 if (arbiterIndexRelation->rd_opfamily[i] !=
537 indexRelation->rd_opfamily[i])
538 return false;
539
540 if (arbiterIndexRelation->rd_index->indkey.values[i] !=
541 indexRelation->rd_index->indkey.values[i])
542 return false;
543 }
544
546 RelationGetIndexExpressions(indexRelation)) != NIL)
547 return false;
548
550 RelationGetIndexPredicate(indexRelation)) != NIL)
551 return false;
552 return true;
553}
List * list_difference(const List *list1, const List *list2)
Definition list.c:1237
List * RelationGetIndexPredicate(Relation relation)
Definition relcache.c:5200
List * RelationGetIndexExpressions(Relation relation)
Definition relcache.c:5087
bool ii_Unique
Definition execnodes.h:211
Oid * ii_ExclusionOps
Definition execnodes.h:199
bool ii_NullsNotDistinct
Definition execnodes.h:213
int ii_NumIndexKeyAttrs
Definition execnodes.h:180
Form_pg_index rd_index
Definition rel.h:192
Oid * rd_opfamily
Definition rel.h:207
Oid * rd_indcollation
Definition rel.h:217

References Assert, fb(), i, IndexInfo::ii_ExclusionOps, IndexInfo::ii_NullsNotDistinct, IndexInfo::ii_NumIndexKeyAttrs, IndexInfo::ii_Unique, list_difference(), NIL, RelationData::rd_indcollation, RelationData::rd_index, RelationData::rd_opfamily, RelationGetIndexExpressions(), and RelationGetIndexPredicate().

Referenced by ExecInitPartitionInfo().