PostgreSQL Source Code  git master
selfuncs.c File Reference
#include "postgres.h"
#include <ctype.h>
#include <math.h>
#include "access/brin.h"
#include "access/brin_page.h"
#include "access/gin.h"
#include "access/table.h"
#include "access/tableam.h"
#include "access/visibilitymap.h"
#include "catalog/pg_am.h"
#include "catalog/pg_collation.h"
#include "catalog/pg_operator.h"
#include "catalog/pg_statistic.h"
#include "catalog/pg_statistic_ext.h"
#include "executor/nodeAgg.h"
#include "miscadmin.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
#include "optimizer/clauses.h"
#include "optimizer/cost.h"
#include "optimizer/optimizer.h"
#include "optimizer/pathnode.h"
#include "optimizer/paths.h"
#include "optimizer/plancat.h"
#include "parser/parse_clause.h"
#include "parser/parsetree.h"
#include "statistics/statistics.h"
#include "storage/bufmgr.h"
#include "utils/acl.h"
#include "utils/builtins.h"
#include "utils/date.h"
#include "utils/datum.h"
#include "utils/fmgroids.h"
#include "utils/index_selfuncs.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/pg_locale.h"
#include "utils/rel.h"
#include "utils/selfuncs.h"
#include "utils/snapmgr.h"
#include "utils/spccache.h"
#include "utils/syscache.h"
#include "utils/timestamp.h"
#include "utils/typcache.h"
Include dependency graph for selfuncs.c:

Go to the source code of this file.

Data Structures

struct  GroupVarInfo
 
struct  GinQualCounts
 

Functions

static double eqsel_internal (PG_FUNCTION_ARGS, bool negate)
 
static double eqjoinsel_inner (Oid opfuncoid, Oid collation, VariableStatData *vardata1, VariableStatData *vardata2, double nd1, double nd2, bool isdefault1, bool isdefault2, AttStatsSlot *sslot1, AttStatsSlot *sslot2, Form_pg_statistic stats1, Form_pg_statistic stats2, bool have_mcvs1, bool have_mcvs2)
 
static double eqjoinsel_semi (Oid opfuncoid, Oid collation, VariableStatData *vardata1, VariableStatData *vardata2, double nd1, double nd2, bool isdefault1, bool isdefault2, AttStatsSlot *sslot1, AttStatsSlot *sslot2, Form_pg_statistic stats1, Form_pg_statistic stats2, bool have_mcvs1, bool have_mcvs2, RelOptInfo *inner_rel)
 
static bool estimate_multivariate_ndistinct (PlannerInfo *root, RelOptInfo *rel, List **varinfos, double *ndistinct)
 
static bool convert_to_scalar (Datum value, Oid valuetypid, Oid collid, double *scaledvalue, Datum lobound, Datum hibound, Oid boundstypid, double *scaledlobound, double *scaledhibound)
 
static double convert_numeric_to_scalar (Datum value, Oid typid, bool *failure)
 
static void convert_string_to_scalar (char *value, double *scaledvalue, char *lobound, double *scaledlobound, char *hibound, double *scaledhibound)
 
static void convert_bytea_to_scalar (Datum value, double *scaledvalue, Datum lobound, double *scaledlobound, Datum hibound, double *scaledhibound)
 
static double convert_one_string_to_scalar (char *value, int rangelo, int rangehi)
 
static double convert_one_bytea_to_scalar (unsigned char *value, int valuelen, int rangelo, int rangehi)
 
static char * convert_string_datum (Datum value, Oid typid, Oid collid, bool *failure)
 
static double convert_timevalue_to_scalar (Datum value, Oid typid, bool *failure)
 
static void examine_simple_variable (PlannerInfo *root, Var *var, VariableStatData *vardata)
 
static bool get_variable_range (PlannerInfo *root, VariableStatData *vardata, Oid sortop, Oid collation, Datum *min, Datum *max)
 
static void get_stats_slot_range (AttStatsSlot *sslot, Oid opfuncoid, FmgrInfo *opproc, Oid collation, int16 typLen, bool typByVal, Datum *min, Datum *max, bool *p_have_data)
 
static bool get_actual_variable_range (PlannerInfo *root, VariableStatData *vardata, Oid sortop, Oid collation, Datum *min, Datum *max)
 
static bool get_actual_variable_endpoint (Relation heapRel, Relation indexRel, ScanDirection indexscandir, ScanKey scankeys, int16 typLen, bool typByVal, TupleTableSlot *tableslot, MemoryContext outercontext, Datum *endpointDatum)
 
static RelOptInfofind_join_input_rel (PlannerInfo *root, Relids relids)
 
Datum eqsel (PG_FUNCTION_ARGS)
 
double var_eq_const (VariableStatData *vardata, Oid operator, Oid collation, Datum constval, bool constisnull, bool varonleft, bool negate)
 
double var_eq_non_const (VariableStatData *vardata, Oid operator, Oid collation, Node *other, bool varonleft, bool negate)
 
Datum neqsel (PG_FUNCTION_ARGS)
 
static double scalarineqsel (PlannerInfo *root, Oid operator, bool isgt, bool iseq, Oid collation, VariableStatData *vardata, Datum constval, Oid consttype)
 
double mcv_selectivity (VariableStatData *vardata, FmgrInfo *opproc, Oid collation, Datum constval, bool varonleft, double *sumcommonp)
 
double histogram_selectivity (VariableStatData *vardata, FmgrInfo *opproc, Oid collation, Datum constval, bool varonleft, int min_hist_size, int n_skip, int *hist_size)
 
double generic_restriction_selectivity (PlannerInfo *root, Oid oproid, Oid collation, List *args, int varRelid, double default_selectivity)
 
double ineq_histogram_selectivity (PlannerInfo *root, VariableStatData *vardata, Oid opoid, FmgrInfo *opproc, bool isgt, bool iseq, Oid collation, Datum constval, Oid consttype)
 
static Datum scalarineqsel_wrapper (PG_FUNCTION_ARGS, bool isgt, bool iseq)
 
Datum scalarltsel (PG_FUNCTION_ARGS)
 
Datum scalarlesel (PG_FUNCTION_ARGS)
 
Datum scalargtsel (PG_FUNCTION_ARGS)
 
Datum scalargesel (PG_FUNCTION_ARGS)
 
Selectivity boolvarsel (PlannerInfo *root, Node *arg, int varRelid)
 
Selectivity booltestsel (PlannerInfo *root, BoolTestType booltesttype, Node *arg, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
 
Selectivity nulltestsel (PlannerInfo *root, NullTestType nulltesttype, Node *arg, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
 
static Nodestrip_array_coercion (Node *node)
 
Selectivity scalararraysel (PlannerInfo *root, ScalarArrayOpExpr *clause, bool is_join_clause, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
 
int estimate_array_length (Node *arrayexpr)
 
Selectivity rowcomparesel (PlannerInfo *root, RowCompareExpr *clause, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
 
Datum eqjoinsel (PG_FUNCTION_ARGS)
 
Datum neqjoinsel (PG_FUNCTION_ARGS)
 
Datum scalarltjoinsel (PG_FUNCTION_ARGS)
 
Datum scalarlejoinsel (PG_FUNCTION_ARGS)
 
Datum scalargtjoinsel (PG_FUNCTION_ARGS)
 
Datum scalargejoinsel (PG_FUNCTION_ARGS)
 
void mergejoinscansel (PlannerInfo *root, Node *clause, Oid opfamily, int strategy, bool nulls_first, Selectivity *leftstart, Selectivity *leftend, Selectivity *rightstart, Selectivity *rightend)
 
Datum matchingsel (PG_FUNCTION_ARGS)
 
Datum matchingjoinsel (PG_FUNCTION_ARGS)
 
static Listadd_unique_group_var (PlannerInfo *root, List *varinfos, Node *var, VariableStatData *vardata)
 
double estimate_num_groups (PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo)
 
double estimate_num_groups_incremental (PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo, List **cache_varinfos, int prevNExprs)
 
void estimate_hash_bucket_stats (PlannerInfo *root, Node *hashkey, double nbuckets, Selectivity *mcv_freq, Selectivity *bucketsize_frac)
 
double estimate_hashagg_tablesize (PlannerInfo *root, Path *path, const AggClauseCosts *agg_costs, double dNumGroups)
 
bool get_restriction_variable (PlannerInfo *root, List *args, int varRelid, VariableStatData *vardata, Node **other, bool *varonleft)
 
void get_join_variables (PlannerInfo *root, List *args, SpecialJoinInfo *sjinfo, VariableStatData *vardata1, VariableStatData *vardata2, bool *join_is_reversed)
 
static void ReleaseDummy (HeapTuple tuple)
 
void examine_variable (PlannerInfo *root, Node *node, int varRelid, VariableStatData *vardata)
 
bool statistic_proc_security_check (VariableStatData *vardata, Oid func_oid)
 
double get_variable_numdistinct (VariableStatData *vardata, bool *isdefault)
 
Listget_quals_from_indexclauses (List *indexclauses)
 
Cost index_other_operands_eval_cost (PlannerInfo *root, List *indexquals)
 
void genericcostestimate (PlannerInfo *root, IndexPath *path, double loop_count, GenericCosts *costs)
 
Listadd_predicate_to_index_quals (IndexOptInfo *index, List *indexQuals)
 
void btcostestimate (PlannerInfo *root, IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void hashcostestimate (PlannerInfo *root, IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void gistcostestimate (PlannerInfo *root, IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void spgcostestimate (PlannerInfo *root, IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
static bool gincost_pattern (IndexOptInfo *index, int indexcol, Oid clause_op, Datum query, GinQualCounts *counts)
 
static bool gincost_opexpr (PlannerInfo *root, IndexOptInfo *index, int indexcol, OpExpr *clause, GinQualCounts *counts)
 
static bool gincost_scalararrayopexpr (PlannerInfo *root, IndexOptInfo *index, int indexcol, ScalarArrayOpExpr *clause, double numIndexEntries, GinQualCounts *counts)
 
void gincostestimate (PlannerInfo *root, IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void brincostestimate (PlannerInfo *root, IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 

Variables

get_relation_stats_hook_type get_relation_stats_hook = NULL
 
get_index_stats_hook_type get_index_stats_hook = NULL
 

Function Documentation

◆ add_predicate_to_index_quals()

List* add_predicate_to_index_quals ( IndexOptInfo index,
List indexQuals 
)

Definition at line 6618 of file selfuncs.c.

6619 {
6620  List *predExtraQuals = NIL;
6621  ListCell *lc;
6622 
6623  if (index->indpred == NIL)
6624  return indexQuals;
6625 
6626  foreach(lc, index->indpred)
6627  {
6628  Node *predQual = (Node *) lfirst(lc);
6629  List *oneQual = list_make1(predQual);
6630 
6631  if (!predicate_implied_by(oneQual, indexQuals, false))
6632  predExtraQuals = list_concat(predExtraQuals, oneQual);
6633  }
6634  return list_concat(predExtraQuals, indexQuals);
6635 }
List * list_concat(List *list1, const List *list2)
Definition: list.c:540
#define lfirst(lc)
Definition: pg_list.h:169
#define NIL
Definition: pg_list.h:65
#define list_make1(x1)
Definition: pg_list.h:206
bool predicate_implied_by(List *predicate_list, List *clause_list, bool weak)
Definition: predtest.c:151
Definition: pg_list.h:51
Definition: nodes.h:574
Definition: type.h:90

References lfirst, list_concat(), list_make1, NIL, and predicate_implied_by().

Referenced by btcostestimate(), genericcostestimate(), and gincostestimate().

◆ add_unique_group_var()

static List* add_unique_group_var ( PlannerInfo root,
List varinfos,
Node var,
VariableStatData vardata 
)
static

Definition at line 3248 of file selfuncs.c.

3250 {
3251  GroupVarInfo *varinfo;
3252  double ndistinct;
3253  bool isdefault;
3254  ListCell *lc;
3255 
3256  ndistinct = get_variable_numdistinct(vardata, &isdefault);
3257 
3258  foreach(lc, varinfos)
3259  {
3260  varinfo = (GroupVarInfo *) lfirst(lc);
3261 
3262  /* Drop exact duplicates */
3263  if (equal(var, varinfo->var))
3264  return varinfos;
3265 
3266  /*
3267  * Drop known-equal vars, but only if they belong to different
3268  * relations (see comments for estimate_num_groups)
3269  */
3270  if (vardata->rel != varinfo->rel &&
3271  exprs_known_equal(root, var, varinfo->var))
3272  {
3273  if (varinfo->ndistinct <= ndistinct)
3274  {
3275  /* Keep older item, forget new one */
3276  return varinfos;
3277  }
3278  else
3279  {
3280  /* Delete the older item */
3281  varinfos = foreach_delete_current(varinfos, lc);
3282  }
3283  }
3284  }
3285 
3286  varinfo = (GroupVarInfo *) palloc(sizeof(GroupVarInfo));
3287 
3288  varinfo->var = var;
3289  varinfo->rel = vardata->rel;
3290  varinfo->ndistinct = ndistinct;
3291  varinfo->isdefault = isdefault;
3292  varinfos = lappend(varinfos, varinfo);
3293  return varinfos;
3294 }
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:3564
bool exprs_known_equal(PlannerInfo *root, Node *item1, Node *item2)
Definition: equivclass.c:2368
List * lappend(List *list, void *datum)
Definition: list.c:336
void * palloc(Size size)
Definition: mcxt.c:1068
#define foreach_delete_current(lst, cell)
Definition: pg_list.h:369
double get_variable_numdistinct(VariableStatData *vardata, bool *isdefault)
Definition: selfuncs.c:5656
RelOptInfo * rel
Definition: selfuncs.c:3242
double ndistinct
Definition: selfuncs.c:3243
bool isdefault
Definition: selfuncs.c:3244
Node * var
Definition: selfuncs.c:3241
RelOptInfo * rel
Definition: selfuncs.h:88

References equal(), exprs_known_equal(), foreach_delete_current, get_variable_numdistinct(), GroupVarInfo::isdefault, lappend(), lfirst, GroupVarInfo::ndistinct, palloc(), GroupVarInfo::rel, VariableStatData::rel, and GroupVarInfo::var.

Referenced by estimate_num_groups_incremental().

◆ booltestsel()

Selectivity booltestsel ( PlannerInfo root,
BoolTestType  booltesttype,
Node arg,
int  varRelid,
JoinType  jointype,
SpecialJoinInfo sjinfo 
)

Definition at line 1537 of file selfuncs.c.

1539 {
1540  VariableStatData vardata;
1541  double selec;
1542 
1543  examine_variable(root, arg, varRelid, &vardata);
1544 
1545  if (HeapTupleIsValid(vardata.statsTuple))
1546  {
1547  Form_pg_statistic stats;
1548  double freq_null;
1549  AttStatsSlot sslot;
1550 
1551  stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
1552  freq_null = stats->stanullfrac;
1553 
1554  if (get_attstatsslot(&sslot, vardata.statsTuple,
1555  STATISTIC_KIND_MCV, InvalidOid,
1557  && sslot.nnumbers > 0)
1558  {
1559  double freq_true;
1560  double freq_false;
1561 
1562  /*
1563  * Get first MCV frequency and derive frequency for true.
1564  */
1565  if (DatumGetBool(sslot.values[0]))
1566  freq_true = sslot.numbers[0];
1567  else
1568  freq_true = 1.0 - sslot.numbers[0] - freq_null;
1569 
1570  /*
1571  * Next derive frequency for false. Then use these as appropriate
1572  * to derive frequency for each case.
1573  */
1574  freq_false = 1.0 - freq_true - freq_null;
1575 
1576  switch (booltesttype)
1577  {
1578  case IS_UNKNOWN:
1579  /* select only NULL values */
1580  selec = freq_null;
1581  break;
1582  case IS_NOT_UNKNOWN:
1583  /* select non-NULL values */
1584  selec = 1.0 - freq_null;
1585  break;
1586  case IS_TRUE:
1587  /* select only TRUE values */
1588  selec = freq_true;
1589  break;
1590  case IS_NOT_TRUE:
1591  /* select non-TRUE values */
1592  selec = 1.0 - freq_true;
1593  break;
1594  case IS_FALSE:
1595  /* select only FALSE values */
1596  selec = freq_false;
1597  break;
1598  case IS_NOT_FALSE:
1599  /* select non-FALSE values */
1600  selec = 1.0 - freq_false;
1601  break;
1602  default:
1603  elog(ERROR, "unrecognized booltesttype: %d",
1604  (int) booltesttype);
1605  selec = 0.0; /* Keep compiler quiet */
1606  break;
1607  }
1608 
1609  free_attstatsslot(&sslot);
1610  }
1611  else
1612  {
1613  /*
1614  * No most-common-value info available. Still have null fraction
1615  * information, so use it for IS [NOT] UNKNOWN. Otherwise adjust
1616  * for null fraction and assume a 50-50 split of TRUE and FALSE.
1617  */
1618  switch (booltesttype)
1619  {
1620  case IS_UNKNOWN:
1621  /* select only NULL values */
1622  selec = freq_null;
1623  break;
1624  case IS_NOT_UNKNOWN:
1625  /* select non-NULL values */
1626  selec = 1.0 - freq_null;
1627  break;
1628  case IS_TRUE:
1629  case IS_FALSE:
1630  /* Assume we select half of the non-NULL values */
1631  selec = (1.0 - freq_null) / 2.0;
1632  break;
1633  case IS_NOT_TRUE:
1634  case IS_NOT_FALSE:
1635  /* Assume we select NULLs plus half of the non-NULLs */
1636  /* equiv. to freq_null + (1.0 - freq_null) / 2.0 */
1637  selec = (freq_null + 1.0) / 2.0;
1638  break;
1639  default:
1640  elog(ERROR, "unrecognized booltesttype: %d",
1641  (int) booltesttype);
1642  selec = 0.0; /* Keep compiler quiet */
1643  break;
1644  }
1645  }
1646  }
1647  else
1648  {
1649  /*
1650  * If we can't get variable statistics for the argument, perhaps
1651  * clause_selectivity can do something with it. We ignore the
1652  * possibility of a NULL value when using clause_selectivity, and just
1653  * assume the value is either TRUE or FALSE.
1654  */
1655  switch (booltesttype)
1656  {
1657  case IS_UNKNOWN:
1658  selec = DEFAULT_UNK_SEL;
1659  break;
1660  case IS_NOT_UNKNOWN:
1661  selec = DEFAULT_NOT_UNK_SEL;
1662  break;
1663  case IS_TRUE:
1664  case IS_NOT_FALSE:
1665  selec = (double) clause_selectivity(root, arg,
1666  varRelid,
1667  jointype, sjinfo);
1668  break;
1669  case IS_FALSE:
1670  case IS_NOT_TRUE:
1671  selec = 1.0 - (double) clause_selectivity(root, arg,
1672  varRelid,
1673  jointype, sjinfo);
1674  break;
1675  default:
1676  elog(ERROR, "unrecognized booltesttype: %d",
1677  (int) booltesttype);
1678  selec = 0.0; /* Keep compiler quiet */
1679  break;
1680  }
1681  }
1682 
1683  ReleaseVariableStats(vardata);
1684 
1685  /* result should be in range, but make sure... */
1686  CLAMP_PROBABILITY(selec);
1687 
1688  return (Selectivity) selec;
1689 }
Selectivity clause_selectivity(PlannerInfo *root, Node *clause, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
Definition: clausesel.c:690
#define ERROR
Definition: elog.h:33
#define elog(elevel,...)
Definition: elog.h:218
#define HeapTupleIsValid(tuple)
Definition: htup.h:78
#define GETSTRUCT(TUP)
Definition: htup_details.h:649
void free_attstatsslot(AttStatsSlot *sslot)
Definition: lsyscache.c:3304
bool get_attstatsslot(AttStatsSlot *sslot, HeapTuple statstuple, int reqkind, Oid reqop, int flags)
Definition: lsyscache.c:3187
#define ATTSTATSSLOT_NUMBERS
Definition: lsyscache.h:43
#define ATTSTATSSLOT_VALUES
Definition: lsyscache.h:42
double Selectivity
Definition: nodes.h:706
void * arg
FormData_pg_statistic * Form_pg_statistic
Definition: pg_statistic.h:135
#define DatumGetBool(X)
Definition: postgres.h:437
#define InvalidOid
Definition: postgres_ext.h:36
@ IS_NOT_TRUE
Definition: primnodes.h:1543
@ IS_NOT_FALSE
Definition: primnodes.h:1543
@ IS_NOT_UNKNOWN
Definition: primnodes.h:1543
@ IS_TRUE
Definition: primnodes.h:1543
@ IS_UNKNOWN
Definition: primnodes.h:1543
@ IS_FALSE
Definition: primnodes.h:1543
void examine_variable(PlannerInfo *root, Node *node, int varRelid, VariableStatData *vardata)
Definition: selfuncs.c:4987
#define DEFAULT_NOT_UNK_SEL
Definition: selfuncs.h:56
#define ReleaseVariableStats(vardata)
Definition: selfuncs.h:99
#define CLAMP_PROBABILITY(p)
Definition: selfuncs.h:63
#define DEFAULT_UNK_SEL
Definition: selfuncs.h:55
Datum * values
Definition: lsyscache.h:53
float4 * numbers
Definition: lsyscache.h:56
int nnumbers
Definition: lsyscache.h:57
HeapTuple statsTuple
Definition: selfuncs.h:89

References arg, ATTSTATSSLOT_NUMBERS, ATTSTATSSLOT_VALUES, CLAMP_PROBABILITY, clause_selectivity(), DatumGetBool, DEFAULT_NOT_UNK_SEL, DEFAULT_UNK_SEL, elog, ERROR, examine_variable(), free_attstatsslot(), get_attstatsslot(), GETSTRUCT, HeapTupleIsValid, InvalidOid, IS_FALSE, IS_NOT_FALSE, IS_NOT_TRUE, IS_NOT_UNKNOWN, IS_TRUE, IS_UNKNOWN, AttStatsSlot::nnumbers, AttStatsSlot::numbers, ReleaseVariableStats, VariableStatData::statsTuple, and AttStatsSlot::values.

Referenced by clause_selectivity_ext().

◆ boolvarsel()

Selectivity boolvarsel ( PlannerInfo root,
Node arg,
int  varRelid 
)

Definition at line 1509 of file selfuncs.c.

1510 {
1511  VariableStatData vardata;
1512  double selec;
1513 
1514  examine_variable(root, arg, varRelid, &vardata);
1515  if (HeapTupleIsValid(vardata.statsTuple))
1516  {
1517  /*
1518  * A boolean variable V is equivalent to the clause V = 't', so we
1519  * compute the selectivity as if that is what we have.
1520  */
1521  selec = var_eq_const(&vardata, BooleanEqualOperator, InvalidOid,
1522  BoolGetDatum(true), false, true, false);
1523  }
1524  else
1525  {
1526  /* Otherwise, the default estimate is 0.5 */
1527  selec = 0.5;
1528  }
1529  ReleaseVariableStats(vardata);
1530  return selec;
1531 }
#define BoolGetDatum(X)
Definition: postgres.h:446
double var_eq_const(VariableStatData *vardata, Oid operator, Oid collation, Datum constval, bool constisnull, bool varonleft, bool negate)
Definition: selfuncs.c:292

References arg, BoolGetDatum, examine_variable(), HeapTupleIsValid, InvalidOid, ReleaseVariableStats, VariableStatData::statsTuple, and var_eq_const().

Referenced by clause_selectivity_ext().

◆ brincostestimate()

void brincostestimate ( PlannerInfo root,
IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 7734 of file selfuncs.c.

7738 {
7739  IndexOptInfo *index = path->indexinfo;
7740  List *indexQuals = get_quals_from_indexclauses(path->indexclauses);
7741  double numPages = index->pages;
7742  RelOptInfo *baserel = index->rel;
7743  RangeTblEntry *rte = planner_rt_fetch(baserel->relid, root);
7744  Cost spc_seq_page_cost;
7745  Cost spc_random_page_cost;
7746  double qual_arg_cost;
7747  double qualSelectivity;
7748  BrinStatsData statsData;
7749  double indexRanges;
7750  double minimalRanges;
7751  double estimatedRanges;
7752  double selec;
7753  Relation indexRel;
7754  ListCell *l;
7755  VariableStatData vardata;
7756 
7757  Assert(rte->rtekind == RTE_RELATION);
7758 
7759  /* fetch estimated page cost for the tablespace containing the index */
7760  get_tablespace_page_costs(index->reltablespace,
7761  &spc_random_page_cost,
7762  &spc_seq_page_cost);
7763 
7764  /*
7765  * Obtain some data from the index itself, if possible. Otherwise invent
7766  * some plausible internal statistics based on the relation page count.
7767  */
7768  if (!index->hypothetical)
7769  {
7770  /*
7771  * A lock should have already been obtained on the index in plancat.c.
7772  */
7773  indexRel = index_open(index->indexoid, NoLock);
7774  brinGetStats(indexRel, &statsData);
7775  index_close(indexRel, NoLock);
7776 
7777  /* work out the actual number of ranges in the index */
7778  indexRanges = Max(ceil((double) baserel->pages /
7779  statsData.pagesPerRange), 1.0);
7780  }
7781  else
7782  {
7783  /*
7784  * Assume default number of pages per range, and estimate the number
7785  * of ranges based on that.
7786  */
7787  indexRanges = Max(ceil((double) baserel->pages /
7789 
7791  statsData.revmapNumPages = (indexRanges / REVMAP_PAGE_MAXITEMS) + 1;
7792  }
7793 
7794  /*
7795  * Compute index correlation
7796  *
7797  * Because we can use all index quals equally when scanning, we can use
7798  * the largest correlation (in absolute value) among columns used by the
7799  * query. Start at zero, the worst possible case. If we cannot find any
7800  * correlation statistics, we will keep it as 0.
7801  */
7802  *indexCorrelation = 0;
7803 
7804  foreach(l, path->indexclauses)
7805  {
7806  IndexClause *iclause = lfirst_node(IndexClause, l);
7807  AttrNumber attnum = index->indexkeys[iclause->indexcol];
7808 
7809  /* attempt to lookup stats in relation for this index column */
7810  if (attnum != 0)
7811  {
7812  /* Simple variable -- look to stats for the underlying table */
7814  (*get_relation_stats_hook) (root, rte, attnum, &vardata))
7815  {
7816  /*
7817  * The hook took control of acquiring a stats tuple. If it
7818  * did supply a tuple, it'd better have supplied a freefunc.
7819  */
7820  if (HeapTupleIsValid(vardata.statsTuple) && !vardata.freefunc)
7821  elog(ERROR,
7822  "no function provided to release variable stats with");
7823  }
7824  else
7825  {
7826  vardata.statsTuple =
7828  ObjectIdGetDatum(rte->relid),
7830  BoolGetDatum(false));
7831  vardata.freefunc = ReleaseSysCache;
7832  }
7833  }
7834  else
7835  {
7836  /*
7837  * Looks like we've found an expression column in the index. Let's
7838  * see if there's any stats for it.
7839  */
7840 
7841  /* get the attnum from the 0-based index. */
7842  attnum = iclause->indexcol + 1;
7843 
7844  if (get_index_stats_hook &&
7845  (*get_index_stats_hook) (root, index->indexoid, attnum, &vardata))
7846  {
7847  /*
7848  * The hook took control of acquiring a stats tuple. If it
7849  * did supply a tuple, it'd better have supplied a freefunc.
7850  */
7851  if (HeapTupleIsValid(vardata.statsTuple) &&
7852  !vardata.freefunc)
7853  elog(ERROR, "no function provided to release variable stats with");
7854  }
7855  else
7856  {
7858  ObjectIdGetDatum(index->indexoid),
7860  BoolGetDatum(false));
7861  vardata.freefunc = ReleaseSysCache;
7862  }
7863  }
7864 
7865  if (HeapTupleIsValid(vardata.statsTuple))
7866  {
7867  AttStatsSlot sslot;
7868 
7869  if (get_attstatsslot(&sslot, vardata.statsTuple,
7870  STATISTIC_KIND_CORRELATION, InvalidOid,
7872  {
7873  double varCorrelation = 0.0;
7874 
7875  if (sslot.nnumbers > 0)
7876  varCorrelation = Abs(sslot.numbers[0]);
7877 
7878  if (varCorrelation > *indexCorrelation)
7879  *indexCorrelation = varCorrelation;
7880 
7881  free_attstatsslot(&sslot);
7882  }
7883  }
7884 
7885  ReleaseVariableStats(vardata);
7886  }
7887 
7888  qualSelectivity = clauselist_selectivity(root, indexQuals,
7889  baserel->relid,
7890  JOIN_INNER, NULL);
7891 
7892  /*
7893  * Now calculate the minimum possible ranges we could match with if all of
7894  * the rows were in the perfect order in the table's heap.
7895  */
7896  minimalRanges = ceil(indexRanges * qualSelectivity);
7897 
7898  /*
7899  * Now estimate the number of ranges that we'll touch by using the
7900  * indexCorrelation from the stats. Careful not to divide by zero (note
7901  * we're using the absolute value of the correlation).
7902  */
7903  if (*indexCorrelation < 1.0e-10)
7904  estimatedRanges = indexRanges;
7905  else
7906  estimatedRanges = Min(minimalRanges / *indexCorrelation, indexRanges);
7907 
7908  /* we expect to visit this portion of the table */
7909  selec = estimatedRanges / indexRanges;
7910 
7911  CLAMP_PROBABILITY(selec);
7912 
7913  *indexSelectivity = selec;
7914 
7915  /*
7916  * Compute the index qual costs, much as in genericcostestimate, to add to
7917  * the index costs. We can disregard indexorderbys, since BRIN doesn't
7918  * support those.
7919  */
7920  qual_arg_cost = index_other_operands_eval_cost(root, indexQuals);
7921 
7922  /*
7923  * Compute the startup cost as the cost to read the whole revmap
7924  * sequentially, including the cost to execute the index quals.
7925  */
7926  *indexStartupCost =
7927  spc_seq_page_cost * statsData.revmapNumPages * loop_count;
7928  *indexStartupCost += qual_arg_cost;
7929 
7930  /*
7931  * To read a BRIN index there might be a bit of back and forth over
7932  * regular pages, as revmap might point to them out of sequential order;
7933  * calculate the total cost as reading the whole index in random order.
7934  */
7935  *indexTotalCost = *indexStartupCost +
7936  spc_random_page_cost * (numPages - statsData.revmapNumPages) * loop_count;
7937 
7938  /*
7939  * Charge a small amount per range tuple which we expect to match to. This
7940  * is meant to reflect the costs of manipulating the bitmap. The BRIN scan
7941  * will set a bit for each page in the range when we find a matching
7942  * range, so we must multiply the charge by the number of pages in the
7943  * range.
7944  */
7945  *indexTotalCost += 0.1 * cpu_operator_cost * estimatedRanges *
7946  statsData.pagesPerRange;
7947 
7948  *indexPages = index->pages;
7949 }
int16 AttrNumber
Definition: attnum.h:21
void brinGetStats(Relation index, BrinStatsData *stats)
Definition: brin.c:1249
#define BRIN_DEFAULT_PAGES_PER_RANGE
Definition: brin.h:38
#define REVMAP_PAGE_MAXITEMS
Definition: brin_page.h:93
#define Min(x, y)
Definition: c.h:986
#define Max(x, y)
Definition: c.h:980
#define Abs(x)
Definition: c.h:992
Selectivity clauselist_selectivity(PlannerInfo *root, List *clauses, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
Definition: clausesel.c:102
double cpu_operator_cost
Definition: costsize.c:124
void index_close(Relation relation, LOCKMODE lockmode)
Definition: indexam.c:158
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition: indexam.c:132
Assert(fmt[strlen(fmt) - 1] !='\n')
#define NoLock
Definition: lockdefs.h:34
double Cost
Definition: nodes.h:707
@ JOIN_INNER
Definition: nodes.h:749
@ RTE_RELATION
Definition: parsenodes.h:998
#define planner_rt_fetch(rti, root)
Definition: pathnodes.h:389
int16 attnum
Definition: pg_attribute.h:83
#define lfirst_node(type, lc)
Definition: pg_list.h:172
#define ObjectIdGetDatum(X)
Definition: postgres.h:551
#define Int16GetDatum(X)
Definition: postgres.h:495
List * get_quals_from_indexclauses(List *indexclauses)
Definition: selfuncs.c:6316
get_index_stats_hook_type get_index_stats_hook
Definition: selfuncs.c:145
Cost index_other_operands_eval_cost(PlannerInfo *root, List *indexquals)
Definition: selfuncs.c:6346
get_relation_stats_hook_type get_relation_stats_hook
Definition: selfuncs.c:144
void get_tablespace_page_costs(Oid spcid, double *spc_random_page_cost, double *spc_seq_page_cost)
Definition: spccache.c:181
BlockNumber revmapNumPages
Definition: brin.h:34
BlockNumber pagesPerRange
Definition: brin.h:33
AttrNumber indexcol
Definition: pathnodes.h:1306
List * indexclauses
Definition: pathnodes.h:1258
IndexOptInfo * indexinfo
Definition: pathnodes.h:1257
RTEKind rtekind
Definition: parsenodes.h:1015
Index relid
Definition: pathnodes.h:710
BlockNumber pages
Definition: pathnodes.h:721
void(* freefunc)(HeapTuple tuple)
Definition: selfuncs.h:91
void ReleaseSysCache(HeapTuple tuple)
Definition: syscache.c:1221
HeapTuple SearchSysCache3(int cacheId, Datum key1, Datum key2, Datum key3)
Definition: syscache.c:1195
@ STATRELATTINH
Definition: syscache.h:97

References Abs, Assert(), attnum, ATTSTATSSLOT_NUMBERS, BoolGetDatum, BRIN_DEFAULT_PAGES_PER_RANGE, brinGetStats(), CLAMP_PROBABILITY, clauselist_selectivity(), cpu_operator_cost, elog, ERROR, free_attstatsslot(), VariableStatData::freefunc, get_attstatsslot(), get_index_stats_hook, get_quals_from_indexclauses(), get_relation_stats_hook, get_tablespace_page_costs(), HeapTupleIsValid, index_close(), index_open(), index_other_operands_eval_cost(), IndexPath::indexclauses, IndexClause::indexcol, IndexPath::indexinfo, Int16GetDatum, InvalidOid, JOIN_INNER, lfirst_node, Max, Min, AttStatsSlot::nnumbers, NoLock, AttStatsSlot::numbers, ObjectIdGetDatum, RelOptInfo::pages, BrinStatsData::pagesPerRange, planner_rt_fetch, ReleaseSysCache(), ReleaseVariableStats, RangeTblEntry::relid, RelOptInfo::relid, REVMAP_PAGE_MAXITEMS, BrinStatsData::revmapNumPages, RTE_RELATION, RangeTblEntry::rtekind, SearchSysCache3(), STATRELATTINH, and VariableStatData::statsTuple.

Referenced by brinhandler().

◆ btcostestimate()

void btcostestimate ( PlannerInfo root,
IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 6639 of file selfuncs.c.

6643 {
6644  IndexOptInfo *index = path->indexinfo;
6645  GenericCosts costs;
6646  Oid relid;
6647  AttrNumber colnum;
6648  VariableStatData vardata;
6649  double numIndexTuples;
6650  Cost descentCost;
6651  List *indexBoundQuals;
6652  int indexcol;
6653  bool eqQualHere;
6654  bool found_saop;
6655  bool found_is_null_op;
6656  double num_sa_scans;
6657  ListCell *lc;
6658 
6659  /*
6660  * For a btree scan, only leading '=' quals plus inequality quals for the
6661  * immediately next attribute contribute to index selectivity (these are
6662  * the "boundary quals" that determine the starting and stopping points of
6663  * the index scan). Additional quals can suppress visits to the heap, so
6664  * it's OK to count them in indexSelectivity, but they should not count
6665  * for estimating numIndexTuples. So we must examine the given indexquals
6666  * to find out which ones count as boundary quals. We rely on the
6667  * knowledge that they are given in index column order.
6668  *
6669  * For a RowCompareExpr, we consider only the first column, just as
6670  * rowcomparesel() does.
6671  *
6672  * If there's a ScalarArrayOpExpr in the quals, we'll actually perform N
6673  * index scans not one, but the ScalarArrayOpExpr's operator can be
6674  * considered to act the same as it normally does.
6675  */
6676  indexBoundQuals = NIL;
6677  indexcol = 0;
6678  eqQualHere = false;
6679  found_saop = false;
6680  found_is_null_op = false;
6681  num_sa_scans = 1;
6682  foreach(lc, path->indexclauses)
6683  {
6684  IndexClause *iclause = lfirst_node(IndexClause, lc);
6685  ListCell *lc2;
6686 
6687  if (indexcol != iclause->indexcol)
6688  {
6689  /* Beginning of a new column's quals */
6690  if (!eqQualHere)
6691  break; /* done if no '=' qual for indexcol */
6692  eqQualHere = false;
6693  indexcol++;
6694  if (indexcol != iclause->indexcol)
6695  break; /* no quals at all for indexcol */
6696  }
6697 
6698  /* Examine each indexqual associated with this index clause */
6699  foreach(lc2, iclause->indexquals)
6700  {
6701  RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc2);
6702  Expr *clause = rinfo->clause;
6703  Oid clause_op = InvalidOid;
6704  int op_strategy;
6705 
6706  if (IsA(clause, OpExpr))
6707  {
6708  OpExpr *op = (OpExpr *) clause;
6709 
6710  clause_op = op->opno;
6711  }
6712  else if (IsA(clause, RowCompareExpr))
6713  {
6714  RowCompareExpr *rc = (RowCompareExpr *) clause;
6715 
6716  clause_op = linitial_oid(rc->opnos);
6717  }
6718  else if (IsA(clause, ScalarArrayOpExpr))
6719  {
6720  ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
6721  Node *other_operand = (Node *) lsecond(saop->args);
6722  int alength = estimate_array_length(other_operand);
6723 
6724  clause_op = saop->opno;
6725  found_saop = true;
6726  /* count number of SA scans induced by indexBoundQuals only */
6727  if (alength > 1)
6728  num_sa_scans *= alength;
6729  }
6730  else if (IsA(clause, NullTest))
6731  {
6732  NullTest *nt = (NullTest *) clause;
6733 
6734  if (nt->nulltesttype == IS_NULL)
6735  {
6736  found_is_null_op = true;
6737  /* IS NULL is like = for selectivity purposes */
6738  eqQualHere = true;
6739  }
6740  }
6741  else
6742  elog(ERROR, "unsupported indexqual type: %d",
6743  (int) nodeTag(clause));
6744 
6745  /* check for equality operator */
6746  if (OidIsValid(clause_op))
6747  {
6748  op_strategy = get_op_opfamily_strategy(clause_op,
6749  index->opfamily[indexcol]);
6750  Assert(op_strategy != 0); /* not a member of opfamily?? */
6751  if (op_strategy == BTEqualStrategyNumber)
6752  eqQualHere = true;
6753  }
6754 
6755  indexBoundQuals = lappend(indexBoundQuals, rinfo);
6756  }
6757  }
6758 
6759  /*
6760  * If index is unique and we found an '=' clause for each column, we can
6761  * just assume numIndexTuples = 1 and skip the expensive
6762  * clauselist_selectivity calculations. However, a ScalarArrayOp or
6763  * NullTest invalidates that theory, even though it sets eqQualHere.
6764  */
6765  if (index->unique &&
6766  indexcol == index->nkeycolumns - 1 &&
6767  eqQualHere &&
6768  !found_saop &&
6769  !found_is_null_op)
6770  numIndexTuples = 1.0;
6771  else
6772  {
6773  List *selectivityQuals;
6774  Selectivity btreeSelectivity;
6775 
6776  /*
6777  * If the index is partial, AND the index predicate with the
6778  * index-bound quals to produce a more accurate idea of the number of
6779  * rows covered by the bound conditions.
6780  */
6781  selectivityQuals = add_predicate_to_index_quals(index, indexBoundQuals);
6782 
6783  btreeSelectivity = clauselist_selectivity(root, selectivityQuals,
6784  index->rel->relid,
6785  JOIN_INNER,
6786  NULL);
6787  numIndexTuples = btreeSelectivity * index->rel->tuples;
6788 
6789  /*
6790  * As in genericcostestimate(), we have to adjust for any
6791  * ScalarArrayOpExpr quals included in indexBoundQuals, and then round
6792  * to integer.
6793  */
6794  numIndexTuples = rint(numIndexTuples / num_sa_scans);
6795  }
6796 
6797  /*
6798  * Now do generic index cost estimation.
6799  */
6800  MemSet(&costs, 0, sizeof(costs));
6801  costs.numIndexTuples = numIndexTuples;
6802 
6803  genericcostestimate(root, path, loop_count, &costs);
6804 
6805  /*
6806  * Add a CPU-cost component to represent the costs of initial btree
6807  * descent. We don't charge any I/O cost for touching upper btree levels,
6808  * since they tend to stay in cache, but we still have to do about log2(N)
6809  * comparisons to descend a btree of N leaf tuples. We charge one
6810  * cpu_operator_cost per comparison.
6811  *
6812  * If there are ScalarArrayOpExprs, charge this once per SA scan. The
6813  * ones after the first one are not startup cost so far as the overall
6814  * plan is concerned, so add them only to "total" cost.
6815  */
6816  if (index->tuples > 1) /* avoid computing log(0) */
6817  {
6818  descentCost = ceil(log(index->tuples) / log(2.0)) * cpu_operator_cost;
6819  costs.indexStartupCost += descentCost;
6820  costs.indexTotalCost += costs.num_sa_scans * descentCost;
6821  }
6822 
6823  /*
6824  * Even though we're not charging I/O cost for touching upper btree pages,
6825  * it's still reasonable to charge some CPU cost per page descended
6826  * through. Moreover, if we had no such charge at all, bloated indexes
6827  * would appear to have the same search cost as unbloated ones, at least
6828  * in cases where only a single leaf page is expected to be visited. This
6829  * cost is somewhat arbitrarily set at 50x cpu_operator_cost per page
6830  * touched. The number of such pages is btree tree height plus one (ie,
6831  * we charge for the leaf page too). As above, charge once per SA scan.
6832  */
6833  descentCost = (index->tree_height + 1) * 50.0 * cpu_operator_cost;
6834  costs.indexStartupCost += descentCost;
6835  costs.indexTotalCost += costs.num_sa_scans * descentCost;
6836 
6837  /*
6838  * If we can get an estimate of the first column's ordering correlation C
6839  * from pg_statistic, estimate the index correlation as C for a
6840  * single-column index, or C * 0.75 for multiple columns. (The idea here
6841  * is that multiple columns dilute the importance of the first column's
6842  * ordering, but don't negate it entirely. Before 8.0 we divided the
6843  * correlation by the number of columns, but that seems too strong.)
6844  */
6845  MemSet(&vardata, 0, sizeof(vardata));
6846 
6847  if (index->indexkeys[0] != 0)
6848  {
6849  /* Simple variable --- look to stats for the underlying table */
6850  RangeTblEntry *rte = planner_rt_fetch(index->rel->relid, root);
6851 
6852  Assert(rte->rtekind == RTE_RELATION);
6853  relid = rte->relid;
6854  Assert(relid != InvalidOid);
6855  colnum = index->indexkeys[0];
6856 
6858  (*get_relation_stats_hook) (root, rte, colnum, &vardata))
6859  {
6860  /*
6861  * The hook took control of acquiring a stats tuple. If it did
6862  * supply a tuple, it'd better have supplied a freefunc.
6863  */
6864  if (HeapTupleIsValid(vardata.statsTuple) &&
6865  !vardata.freefunc)
6866  elog(ERROR, "no function provided to release variable stats with");
6867  }
6868  else
6869  {
6871  ObjectIdGetDatum(relid),
6872  Int16GetDatum(colnum),
6873  BoolGetDatum(rte->inh));
6874  vardata.freefunc = ReleaseSysCache;
6875  }
6876  }
6877  else
6878  {
6879  /* Expression --- maybe there are stats for the index itself */
6880  relid = index->indexoid;
6881  colnum = 1;
6882 
6883  if (get_index_stats_hook &&
6884  (*get_index_stats_hook) (root, relid, colnum, &vardata))
6885  {
6886  /*
6887  * The hook took control of acquiring a stats tuple. If it did
6888  * supply a tuple, it'd better have supplied a freefunc.
6889  */
6890  if (HeapTupleIsValid(vardata.statsTuple) &&
6891  !vardata.freefunc)
6892  elog(ERROR, "no function provided to release variable stats with");
6893  }
6894  else
6895  {
6897  ObjectIdGetDatum(relid),
6898  Int16GetDatum(colnum),
6899  BoolGetDatum(false));
6900  vardata.freefunc = ReleaseSysCache;
6901  }
6902  }
6903 
6904  if (HeapTupleIsValid(vardata.statsTuple))
6905  {
6906  Oid sortop;
6907  AttStatsSlot sslot;
6908 
6909  sortop = get_opfamily_member(index->opfamily[0],
6910  index->opcintype[0],
6911  index->opcintype[0],
6913  if (OidIsValid(sortop) &&
6914  get_attstatsslot(&sslot, vardata.statsTuple,
6915  STATISTIC_KIND_CORRELATION, sortop,
6917  {
6918  double varCorrelation;
6919 
6920  Assert(sslot.nnumbers == 1);
6921  varCorrelation = sslot.numbers[0];
6922 
6923  if (index->reverse_sort[0])
6924  varCorrelation = -varCorrelation;
6925 
6926  if (index->nkeycolumns > 1)
6927  costs.indexCorrelation = varCorrelation * 0.75;
6928  else
6929  costs.indexCorrelation = varCorrelation;
6930 
6931  free_attstatsslot(&sslot);
6932  }
6933  }
6934 
6935  ReleaseVariableStats(vardata);
6936 
6937  *indexStartupCost = costs.indexStartupCost;
6938  *indexTotalCost = costs.indexTotalCost;
6939  *indexSelectivity = costs.indexSelectivity;
6940  *indexCorrelation = costs.indexCorrelation;
6941  *indexPages = costs.numIndexPages;
6942 }
#define MemSet(start, val, len)
Definition: c.h:1008
#define OidIsValid(objectId)
Definition: c.h:710
int get_op_opfamily_strategy(Oid opno, Oid opfamily)
Definition: lsyscache.c:81
Oid get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype, int16 strategy)
Definition: lsyscache.c:164
#define IsA(nodeptr, _type_)
Definition: nodes.h:624
#define nodeTag(nodeptr)
Definition: nodes.h:578
#define lsecond(l)
Definition: pg_list.h:179
#define linitial_oid(l)
Definition: pg_list.h:176
unsigned int Oid
Definition: postgres_ext.h:31
@ IS_NULL
Definition: primnodes.h:1520
int estimate_array_length(Node *arrayexpr)
Definition: selfuncs.c:2132
void genericcostestimate(PlannerInfo *root, IndexPath *path, double loop_count, GenericCosts *costs)
Definition: selfuncs.c:6400
List * add_predicate_to_index_quals(IndexOptInfo *index, List *indexQuals)
Definition: selfuncs.c:6618
#define BTLessStrategyNumber
Definition: stratnum.h:29
#define BTEqualStrategyNumber
Definition: stratnum.h:31
Selectivity indexSelectivity
Definition: selfuncs.h:124
Cost indexStartupCost
Definition: selfuncs.h:122
double indexCorrelation
Definition: selfuncs.h:125
double num_sa_scans
Definition: selfuncs.h:131
Cost indexTotalCost
Definition: selfuncs.h:123
double numIndexPages
Definition: selfuncs.h:128
double numIndexTuples
Definition: selfuncs.h:129
List * indexquals
Definition: pathnodes.h:1304
NullTestType nulltesttype
Definition: primnodes.h:1527
Oid opno
Definition: primnodes.h:551
Expr * clause
Definition: pathnodes.h:2075

References add_predicate_to_index_quals(), ScalarArrayOpExpr::args, Assert(), ATTSTATSSLOT_NUMBERS, BoolGetDatum, BTEqualStrategyNumber, BTLessStrategyNumber, RestrictInfo::clause, clauselist_selectivity(), cpu_operator_cost, elog, ERROR, estimate_array_length(), free_attstatsslot(), VariableStatData::freefunc, genericcostestimate(), get_attstatsslot(), get_index_stats_hook, get_op_opfamily_strategy(), get_opfamily_member(), get_relation_stats_hook, HeapTupleIsValid, IndexPath::indexclauses, IndexClause::indexcol, GenericCosts::indexCorrelation, IndexPath::indexinfo, IndexClause::indexquals, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, RangeTblEntry::inh, Int16GetDatum, InvalidOid, IS_NULL, IsA, JOIN_INNER, lappend(), lfirst_node, linitial_oid, lsecond, MemSet, NIL, AttStatsSlot::nnumbers, nodeTag, NullTest::nulltesttype, GenericCosts::num_sa_scans, AttStatsSlot::numbers, GenericCosts::numIndexPages, GenericCosts::numIndexTuples, ObjectIdGetDatum, OidIsValid, OpExpr::opno, ScalarArrayOpExpr::opno, RowCompareExpr::opnos, planner_rt_fetch, ReleaseSysCache(), ReleaseVariableStats, RangeTblEntry::relid, RTE_RELATION, RangeTblEntry::rtekind, SearchSysCache3(), STATRELATTINH, and VariableStatData::statsTuple.

Referenced by bthandler().

◆ convert_bytea_to_scalar()

static void convert_bytea_to_scalar ( Datum  value,
double *  scaledvalue,
Datum  lobound,
double *  scaledlobound,
Datum  hibound,
double *  scaledhibound 
)
static

Definition at line 4705 of file selfuncs.c.

4711 {
4712  bytea *valuep = DatumGetByteaPP(value);
4713  bytea *loboundp = DatumGetByteaPP(lobound);
4714  bytea *hiboundp = DatumGetByteaPP(hibound);
4715  int rangelo,
4716  rangehi,
4717  valuelen = VARSIZE_ANY_EXHDR(valuep),
4718  loboundlen = VARSIZE_ANY_EXHDR(loboundp),
4719  hiboundlen = VARSIZE_ANY_EXHDR(hiboundp),
4720  i,
4721  minlen;
4722  unsigned char *valstr = (unsigned char *) VARDATA_ANY(valuep);
4723  unsigned char *lostr = (unsigned char *) VARDATA_ANY(loboundp);
4724  unsigned char *histr = (unsigned char *) VARDATA_ANY(hiboundp);
4725 
4726  /*
4727  * Assume bytea data is uniformly distributed across all byte values.
4728  */
4729  rangelo = 0;
4730  rangehi = 255;
4731 
4732  /*
4733  * Now strip any common prefix of the three strings.
4734  */
4735  minlen = Min(Min(valuelen, loboundlen), hiboundlen);
4736  for (i = 0; i < minlen; i++)
4737  {
4738  if (*lostr != *histr || *lostr != *valstr)
4739  break;
4740  lostr++, histr++, valstr++;
4741  loboundlen--, hiboundlen--, valuelen--;
4742  }
4743 
4744  /*
4745  * Now we can do the conversions.
4746  */
4747  *scaledvalue = convert_one_bytea_to_scalar(valstr, valuelen, rangelo, rangehi);
4748  *scaledlobound = convert_one_bytea_to_scalar(lostr, loboundlen, rangelo, rangehi);
4749  *scaledhibound = convert_one_bytea_to_scalar(histr, hiboundlen, rangelo, rangehi);
4750 }
#define DatumGetByteaPP(X)
Definition: fmgr.h:291
static struct @151 value
int i
Definition: isn.c:73
#define VARDATA_ANY(PTR)
Definition: postgres.h:361
#define VARSIZE_ANY_EXHDR(PTR)
Definition: postgres.h:354
static double convert_one_bytea_to_scalar(unsigned char *value, int valuelen, int rangelo, int rangehi)
Definition: selfuncs.c:4753
Definition: c.h:622

References convert_one_bytea_to_scalar(), DatumGetByteaPP, i, Min, value, VARDATA_ANY, and VARSIZE_ANY_EXHDR.

Referenced by convert_to_scalar().

◆ convert_numeric_to_scalar()

static double convert_numeric_to_scalar ( Datum  value,
Oid  typid,
bool failure 
)
static

Definition at line 4432 of file selfuncs.c.

4433 {
4434  switch (typid)
4435  {
4436  case BOOLOID:
4437  return (double) DatumGetBool(value);
4438  case INT2OID:
4439  return (double) DatumGetInt16(value);
4440  case INT4OID:
4441  return (double) DatumGetInt32(value);
4442  case INT8OID:
4443  return (double) DatumGetInt64(value);
4444  case FLOAT4OID:
4445  return (double) DatumGetFloat4(value);
4446  case FLOAT8OID:
4447  return (double) DatumGetFloat8(value);
4448  case NUMERICOID:
4449  /* Note: out-of-range values will be clamped to +-HUGE_VAL */
4450  return (double)
4452  value));
4453  case OIDOID:
4454  case REGPROCOID:
4455  case REGPROCEDUREOID:
4456  case REGOPEROID:
4457  case REGOPERATOROID:
4458  case REGCLASSOID:
4459  case REGTYPEOID:
4460  case REGCONFIGOID:
4461  case REGDICTIONARYOID:
4462  case REGROLEOID:
4463  case REGNAMESPACEOID:
4464  /* we can treat OIDs as integers... */
4465  return (double) DatumGetObjectId(value);
4466  }
4467 
4468  *failure = true;
4469  return 0;
4470 }
Datum numeric_float8_no_overflow(PG_FUNCTION_ARGS)
Definition: numeric.c:4459
#define DirectFunctionCall1(func, arg1)
Definition: fmgr.h:631
#define DatumGetObjectId(X)
Definition: postgres.h:544
static float4 DatumGetFloat4(Datum X)
Definition: postgres.h:708
#define DatumGetFloat8(X)
Definition: postgres.h:758
#define DatumGetInt32(X)
Definition: postgres.h:516
#define DatumGetInt16(X)
Definition: postgres.h:488
#define DatumGetInt64(X)
Definition: postgres.h:651

References DatumGetBool, DatumGetFloat4(), DatumGetFloat8, DatumGetInt16, DatumGetInt32, DatumGetInt64, DatumGetObjectId, DirectFunctionCall1, numeric_float8_no_overflow(), and value.

Referenced by convert_to_scalar().

◆ convert_one_bytea_to_scalar()

static double convert_one_bytea_to_scalar ( unsigned char *  value,
int  valuelen,
int  rangelo,
int  rangehi 
)
static

Definition at line 4753 of file selfuncs.c.

4755 {
4756  double num,
4757  denom,
4758  base;
4759 
4760  if (valuelen <= 0)
4761  return 0.0; /* empty string has scalar value 0 */
4762 
4763  /*
4764  * Since base is 256, need not consider more than about 10 chars (even
4765  * this many seems like overkill)
4766  */
4767  if (valuelen > 10)
4768  valuelen = 10;
4769 
4770  /* Convert initial characters to fraction */
4771  base = rangehi - rangelo + 1;
4772  num = 0.0;
4773  denom = base;
4774  while (valuelen-- > 0)
4775  {
4776  int ch = *value++;
4777 
4778  if (ch < rangelo)
4779  ch = rangelo - 1;
4780  else if (ch > rangehi)
4781  ch = rangehi + 1;
4782  num += ((double) (ch - rangelo)) / denom;
4783  denom *= base;
4784  }
4785 
4786  return num;
4787 }

References value.

Referenced by convert_bytea_to_scalar().

◆ convert_one_string_to_scalar()

static double convert_one_string_to_scalar ( char *  value,
int  rangelo,
int  rangehi 
)
static

Definition at line 4573 of file selfuncs.c.

4574 {
4575  int slen = strlen(value);
4576  double num,
4577  denom,
4578  base;
4579 
4580  if (slen <= 0)
4581  return 0.0; /* empty string has scalar value 0 */
4582 
4583  /*
4584  * There seems little point in considering more than a dozen bytes from
4585  * the string. Since base is at least 10, that will give us nominal
4586  * resolution of at least 12 decimal digits, which is surely far more
4587  * precision than this estimation technique has got anyway (especially in
4588  * non-C locales). Also, even with the maximum possible base of 256, this
4589  * ensures denom cannot grow larger than 256^13 = 2.03e31, which will not
4590  * overflow on any known machine.
4591  */
4592  if (slen > 12)
4593  slen = 12;
4594 
4595  /* Convert initial characters to fraction */
4596  base = rangehi - rangelo + 1;
4597  num = 0.0;
4598  denom = base;
4599  while (slen-- > 0)
4600  {
4601  int ch = (unsigned char) *value++;
4602 
4603  if (ch < rangelo)
4604  ch = rangelo - 1;
4605  else if (ch > rangehi)
4606  ch = rangehi + 1;
4607  num += ((double) (ch - rangelo)) / denom;
4608  denom *= base;
4609  }
4610 
4611  return num;
4612 }

References value.

Referenced by convert_string_to_scalar().

◆ convert_string_datum()

static char * convert_string_datum ( Datum  value,
Oid  typid,
Oid  collid,
bool failure 
)
static

Definition at line 4624 of file selfuncs.c.

4625 {
4626  char *val;
4627 
4628  switch (typid)
4629  {
4630  case CHAROID:
4631  val = (char *) palloc(2);
4632  val[0] = DatumGetChar(value);
4633  val[1] = '\0';
4634  break;
4635  case BPCHAROID:
4636  case VARCHAROID:
4637  case TEXTOID:
4639  break;
4640  case NAMEOID:
4641  {
4643 
4644  val = pstrdup(NameStr(*nm));
4645  break;
4646  }
4647  default:
4648  *failure = true;
4649  return NULL;
4650  }
4651 
4652  if (!lc_collate_is_c(collid))
4653  {
4654  char *xfrmstr;
4655  size_t xfrmlen;
4656  size_t xfrmlen2 PG_USED_FOR_ASSERTS_ONLY;
4657 
4658  /*
4659  * XXX: We could guess at a suitable output buffer size and only call
4660  * strxfrm twice if our guess is too small.
4661  *
4662  * XXX: strxfrm doesn't support UTF-8 encoding on Win32, it can return
4663  * bogus data or set an error. This is not really a problem unless it
4664  * crashes since it will only give an estimation error and nothing
4665  * fatal.
4666  */
4667  xfrmlen = strxfrm(NULL, val, 0);
4668 #ifdef WIN32
4669 
4670  /*
4671  * On Windows, strxfrm returns INT_MAX when an error occurs. Instead
4672  * of trying to allocate this much memory (and fail), just return the
4673  * original string unmodified as if we were in the C locale.
4674  */
4675  if (xfrmlen == INT_MAX)
4676  return val;
4677 #endif
4678  xfrmstr = (char *) palloc(xfrmlen + 1);
4679  xfrmlen2 = strxfrm(xfrmstr, val, xfrmlen + 1);
4680 
4681  /*
4682  * Some systems (e.g., glibc) can return a smaller value from the
4683  * second call than the first; thus the Assert must be <= not ==.
4684  */
4685  Assert(xfrmlen2 <= xfrmlen);
4686  pfree(val);
4687  val = xfrmstr;
4688  }
4689 
4690  return val;
4691 }
#define TextDatumGetCString(d)
Definition: builtins.h:86
#define NameStr(name)
Definition: c.h:681
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:155
long val
Definition: informix.c:664
char * pstrdup(const char *in)
Definition: mcxt.c:1305
void pfree(void *pointer)
Definition: mcxt.c:1175
bool lc_collate_is_c(Oid collation)
Definition: pg_locale.c:1336
#define DatumGetPointer(X)
Definition: postgres.h:593
#define DatumGetChar(X)
Definition: postgres.h:453
Definition: c.h:676

References Assert(), DatumGetChar, DatumGetPointer, lc_collate_is_c(), NameStr, palloc(), pfree(), PG_USED_FOR_ASSERTS_ONLY, pstrdup(), TextDatumGetCString, val, and value.

Referenced by convert_to_scalar().

◆ convert_string_to_scalar()

static void convert_string_to_scalar ( char *  value,
double *  scaledvalue,
char *  lobound,
double *  scaledlobound,
char *  hibound,
double *  scaledhibound 
)
static

Definition at line 4493 of file selfuncs.c.

4499 {
4500  int rangelo,
4501  rangehi;
4502  char *sptr;
4503 
4504  rangelo = rangehi = (unsigned char) hibound[0];
4505  for (sptr = lobound; *sptr; sptr++)
4506  {
4507  if (rangelo > (unsigned char) *sptr)
4508  rangelo = (unsigned char) *sptr;
4509  if (rangehi < (unsigned char) *sptr)
4510  rangehi = (unsigned char) *sptr;
4511  }
4512  for (sptr = hibound; *sptr; sptr++)
4513  {
4514  if (rangelo > (unsigned char) *sptr)
4515  rangelo = (unsigned char) *sptr;
4516  if (rangehi < (unsigned char) *sptr)
4517  rangehi = (unsigned char) *sptr;
4518  }
4519  /* If range includes any upper-case ASCII chars, make it include all */
4520  if (rangelo <= 'Z' && rangehi >= 'A')
4521  {
4522  if (rangelo > 'A')
4523  rangelo = 'A';
4524  if (rangehi < 'Z')
4525  rangehi = 'Z';
4526  }
4527  /* Ditto lower-case */
4528  if (rangelo <= 'z' && rangehi >= 'a')
4529  {
4530  if (rangelo > 'a')
4531  rangelo = 'a';
4532  if (rangehi < 'z')
4533  rangehi = 'z';
4534  }
4535  /* Ditto digits */
4536  if (rangelo <= '9' && rangehi >= '0')
4537  {
4538  if (rangelo > '0')
4539  rangelo = '0';
4540  if (rangehi < '9')
4541  rangehi = '9';
4542  }
4543 
4544  /*
4545  * If range includes less than 10 chars, assume we have not got enough
4546  * data, and make it include regular ASCII set.
4547  */
4548  if (rangehi - rangelo < 9)
4549  {
4550  rangelo = ' ';
4551  rangehi = 127;
4552  }
4553 
4554  /*
4555  * Now strip any common prefix of the three strings.
4556  */
4557  while (*lobound)
4558  {
4559  if (*lobound != *hibound || *lobound != *value)
4560  break;
4561  lobound++, hibound++, value++;
4562  }
4563 
4564  /*
4565  * Now we can do the conversions.
4566  */
4567  *scaledvalue = convert_one_string_to_scalar(value, rangelo, rangehi);
4568  *scaledlobound = convert_one_string_to_scalar(lobound, rangelo, rangehi);
4569  *scaledhibound = convert_one_string_to_scalar(hibound, rangelo, rangehi);
4570 }
static double convert_one_string_to_scalar(char *value, int rangelo, int rangehi)
Definition: selfuncs.c:4573

References convert_one_string_to_scalar(), and value.

Referenced by convert_to_scalar().

◆ convert_timevalue_to_scalar()

static double convert_timevalue_to_scalar ( Datum  value,
Oid  typid,
bool failure 
)
static

Definition at line 4796 of file selfuncs.c.

4797 {
4798  switch (typid)
4799  {
4800  case TIMESTAMPOID:
4801  return DatumGetTimestamp(value);
4802  case TIMESTAMPTZOID:
4803  return DatumGetTimestampTz(value);
4804  case DATEOID:
4806  case INTERVALOID:
4807  {
4809 
4810  /*
4811  * Convert the month part of Interval to days using assumed
4812  * average month length of 365.25/12.0 days. Not too
4813  * accurate, but plenty good enough for our purposes.
4814  */
4815  return interval->time + interval->day * (double) USECS_PER_DAY +
4817  }
4818  case TIMEOID:
4819  return DatumGetTimeADT(value);
4820  case TIMETZOID:
4821  {
4822  TimeTzADT *timetz = DatumGetTimeTzADTP(value);
4823 
4824  /* use GMT-equivalent time */
4825  return (double) (timetz->time + (timetz->zone * 1000000.0));
4826  }
4827  }
4828 
4829  *failure = true;
4830  return 0;
4831 }
#define MONTHS_PER_YEAR
Definition: timestamp.h:108
#define USECS_PER_DAY
Definition: timestamp.h:130
#define DAYS_PER_YEAR
Definition: timestamp.h:107
double date2timestamp_no_overflow(DateADT dateVal)
Definition: date.c:713
#define DatumGetTimeTzADTP(X)
Definition: date.h:55
#define DatumGetTimeADT(X)
Definition: date.h:54
#define DatumGetDateADT(X)
Definition: date.h:53
Definition: date.h:28
TimeADT time
Definition: date.h:29
int32 zone
Definition: date.h:30
#define DatumGetIntervalP(X)
Definition: timestamp.h:29
#define DatumGetTimestamp(X)
Definition: timestamp.h:27
#define DatumGetTimestampTz(X)
Definition: timestamp.h:28

References date2timestamp_no_overflow(), DatumGetDateADT, DatumGetIntervalP, DatumGetTimeADT, DatumGetTimestamp, DatumGetTimestampTz, DatumGetTimeTzADTP, DAYS_PER_YEAR, interval::month, MONTHS_PER_YEAR, TimeTzADT::time, interval::time, USECS_PER_DAY, value, and TimeTzADT::zone.

Referenced by convert_to_scalar().

◆ convert_to_scalar()

static bool convert_to_scalar ( Datum  value,
Oid  valuetypid,
Oid  collid,
double *  scaledvalue,
Datum  lobound,
Datum  hibound,
Oid  boundstypid,
double *  scaledlobound,
double *  scaledhibound 
)
static

Definition at line 4286 of file selfuncs.c.

4289 {
4290  bool failure = false;
4291 
4292  /*
4293  * Both the valuetypid and the boundstypid should exactly match the
4294  * declared input type(s) of the operator we are invoked for. However,
4295  * extensions might try to use scalarineqsel as estimator for operators
4296  * with input type(s) we don't handle here; in such cases, we want to
4297  * return false, not fail. In any case, we mustn't assume that valuetypid
4298  * and boundstypid are identical.
4299  *
4300  * XXX The histogram we are interpolating between points of could belong
4301  * to a column that's only binary-compatible with the declared type. In
4302  * essence we are assuming that the semantics of binary-compatible types
4303  * are enough alike that we can use a histogram generated with one type's
4304  * operators to estimate selectivity for the other's. This is outright
4305  * wrong in some cases --- in particular signed versus unsigned
4306  * interpretation could trip us up. But it's useful enough in the
4307  * majority of cases that we do it anyway. Should think about more
4308  * rigorous ways to do it.
4309  */
4310  switch (valuetypid)
4311  {
4312  /*
4313  * Built-in numeric types
4314  */
4315  case BOOLOID:
4316  case INT2OID:
4317  case INT4OID:
4318  case INT8OID:
4319  case FLOAT4OID:
4320  case FLOAT8OID:
4321  case NUMERICOID:
4322  case OIDOID:
4323  case REGPROCOID:
4324  case REGPROCEDUREOID:
4325  case REGOPEROID:
4326  case REGOPERATOROID:
4327  case REGCLASSOID:
4328  case REGTYPEOID:
4329  case REGCONFIGOID:
4330  case REGDICTIONARYOID:
4331  case REGROLEOID:
4332  case REGNAMESPACEOID:
4333  *scaledvalue = convert_numeric_to_scalar(value, valuetypid,
4334  &failure);
4335  *scaledlobound = convert_numeric_to_scalar(lobound, boundstypid,
4336  &failure);
4337  *scaledhibound = convert_numeric_to_scalar(hibound, boundstypid,
4338  &failure);
4339  return !failure;
4340 
4341  /*
4342  * Built-in string types
4343  */
4344  case CHAROID:
4345  case BPCHAROID:
4346  case VARCHAROID:
4347  case TEXTOID:
4348  case NAMEOID:
4349  {
4350  char *valstr = convert_string_datum(value, valuetypid,
4351  collid, &failure);
4352  char *lostr = convert_string_datum(lobound, boundstypid,
4353  collid, &failure);
4354  char *histr = convert_string_datum(hibound, boundstypid,
4355  collid, &failure);
4356 
4357  /*
4358  * Bail out if any of the values is not of string type. We
4359  * might leak converted strings for the other value(s), but
4360  * that's not worth troubling over.
4361  */
4362  if (failure)
4363  return false;
4364 
4365  convert_string_to_scalar(valstr, scaledvalue,
4366  lostr, scaledlobound,
4367  histr, scaledhibound);
4368  pfree(valstr);
4369  pfree(lostr);
4370  pfree(histr);
4371  return true;
4372  }
4373 
4374  /*
4375  * Built-in bytea type
4376  */
4377  case BYTEAOID:
4378  {
4379  /* We only support bytea vs bytea comparison */
4380  if (boundstypid != BYTEAOID)
4381  return false;
4382  convert_bytea_to_scalar(value, scaledvalue,
4383  lobound, scaledlobound,
4384  hibound, scaledhibound);
4385  return true;
4386  }
4387 
4388  /*
4389  * Built-in time types
4390  */
4391  case TIMESTAMPOID:
4392  case TIMESTAMPTZOID:
4393  case DATEOID:
4394  case INTERVALOID:
4395  case TIMEOID:
4396  case TIMETZOID:
4397  *scaledvalue = convert_timevalue_to_scalar(value, valuetypid,
4398  &failure);
4399  *scaledlobound = convert_timevalue_to_scalar(lobound, boundstypid,
4400  &failure);
4401  *scaledhibound = convert_timevalue_to_scalar(hibound, boundstypid,
4402  &failure);
4403  return !failure;
4404 
4405  /*
4406  * Built-in network types
4407  */
4408  case INETOID:
4409  case CIDROID:
4410  case MACADDROID:
4411  case MACADDR8OID:
4412  *scaledvalue = convert_network_to_scalar(value, valuetypid,
4413  &failure);
4414  *scaledlobound = convert_network_to_scalar(lobound, boundstypid,
4415  &failure);
4416  *scaledhibound = convert_network_to_scalar(hibound, boundstypid,
4417  &failure);
4418  return !failure;
4419  }
4420  /* Don't know how to convert */
4421  *scaledvalue = *scaledlobound = *scaledhibound = 0;
4422  return false;
4423 }
double convert_network_to_scalar(Datum value, Oid typid, bool *failure)
Definition: network.c:1502
static void convert_string_to_scalar(char *value, double *scaledvalue, char *lobound, double *scaledlobound, char *hibound, double *scaledhibound)
Definition: selfuncs.c:4493
static double convert_timevalue_to_scalar(Datum value, Oid typid, bool *failure)
Definition: selfuncs.c:4796
static double convert_numeric_to_scalar(Datum value, Oid typid, bool *failure)
Definition: selfuncs.c:4432
static void convert_bytea_to_scalar(Datum value, double *scaledvalue, Datum lobound, double *scaledlobound, Datum hibound, double *scaledhibound)
Definition: selfuncs.c:4705
static char * convert_string_datum(Datum value, Oid typid, Oid collid, bool *failure)
Definition: selfuncs.c:4624

References convert_bytea_to_scalar(), convert_network_to_scalar(), convert_numeric_to_scalar(), convert_string_datum(), convert_string_to_scalar(), convert_timevalue_to_scalar(), pfree(), and value.

Referenced by ineq_histogram_selectivity().

◆ eqjoinsel()

Datum eqjoinsel ( PG_FUNCTION_ARGS  )

Definition at line 2237 of file selfuncs.c.

2238 {
2239  PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
2240  Oid operator = PG_GETARG_OID(1);
2241  List *args = (List *) PG_GETARG_POINTER(2);
2242 
2243 #ifdef NOT_USED
2244  JoinType jointype = (JoinType) PG_GETARG_INT16(3);
2245 #endif
2247  Oid collation = PG_GET_COLLATION();
2248  double selec;
2249  double selec_inner;
2250  VariableStatData vardata1;
2251  VariableStatData vardata2;
2252  double nd1;
2253  double nd2;
2254  bool isdefault1;
2255  bool isdefault2;
2256  Oid opfuncoid;
2257  AttStatsSlot sslot1;
2258  AttStatsSlot sslot2;
2259  Form_pg_statistic stats1 = NULL;
2260  Form_pg_statistic stats2 = NULL;
2261  bool have_mcvs1 = false;
2262  bool have_mcvs2 = false;
2263  bool join_is_reversed;
2264  RelOptInfo *inner_rel;
2265 
2266  get_join_variables(root, args, sjinfo,
2267  &vardata1, &vardata2, &join_is_reversed);
2268 
2269  nd1 = get_variable_numdistinct(&vardata1, &isdefault1);
2270  nd2 = get_variable_numdistinct(&vardata2, &isdefault2);
2271 
2272  opfuncoid = get_opcode(operator);
2273 
2274  memset(&sslot1, 0, sizeof(sslot1));
2275  memset(&sslot2, 0, sizeof(sslot2));
2276 
2277  if (HeapTupleIsValid(vardata1.statsTuple))
2278  {
2279  /* note we allow use of nullfrac regardless of security check */
2280  stats1 = (Form_pg_statistic) GETSTRUCT(vardata1.statsTuple);
2281  if (statistic_proc_security_check(&vardata1, opfuncoid))
2282  have_mcvs1 = get_attstatsslot(&sslot1, vardata1.statsTuple,
2283  STATISTIC_KIND_MCV, InvalidOid,
2285  }
2286 
2287  if (HeapTupleIsValid(vardata2.statsTuple))
2288  {
2289  /* note we allow use of nullfrac regardless of security check */
2290  stats2 = (Form_pg_statistic) GETSTRUCT(vardata2.statsTuple);
2291  if (statistic_proc_security_check(&vardata2, opfuncoid))
2292  have_mcvs2 = get_attstatsslot(&sslot2, vardata2.statsTuple,
2293  STATISTIC_KIND_MCV, InvalidOid,
2295  }
2296 
2297  /* We need to compute the inner-join selectivity in all cases */
2298  selec_inner = eqjoinsel_inner(opfuncoid, collation,
2299  &vardata1, &vardata2,
2300  nd1, nd2,
2301  isdefault1, isdefault2,
2302  &sslot1, &sslot2,
2303  stats1, stats2,
2304  have_mcvs1, have_mcvs2);
2305 
2306  switch (sjinfo->jointype)
2307  {
2308  case JOIN_INNER:
2309  case JOIN_LEFT:
2310  case JOIN_FULL:
2311  selec = selec_inner;
2312  break;
2313  case JOIN_SEMI:
2314  case JOIN_ANTI:
2315 
2316  /*
2317  * Look up the join's inner relation. min_righthand is sufficient
2318  * information because neither SEMI nor ANTI joins permit any
2319  * reassociation into or out of their RHS, so the righthand will
2320  * always be exactly that set of rels.
2321  */
2322  inner_rel = find_join_input_rel(root, sjinfo->min_righthand);
2323 
2324  if (!join_is_reversed)
2325  selec = eqjoinsel_semi(opfuncoid, collation,
2326  &vardata1, &vardata2,
2327  nd1, nd2,
2328  isdefault1, isdefault2,
2329  &sslot1, &sslot2,
2330  stats1, stats2,
2331  have_mcvs1, have_mcvs2,
2332  inner_rel);
2333  else
2334  {
2335  Oid commop = get_commutator(operator);
2336  Oid commopfuncoid = OidIsValid(commop) ? get_opcode(commop) : InvalidOid;
2337 
2338  selec = eqjoinsel_semi(commopfuncoid, collation,
2339  &vardata2, &vardata1,
2340  nd2, nd1,
2341  isdefault2, isdefault1,
2342  &sslot2, &sslot1,
2343  stats2, stats1,
2344  have_mcvs2, have_mcvs1,
2345  inner_rel);
2346  }
2347 
2348  /*
2349  * We should never estimate the output of a semijoin to be more
2350  * rows than we estimate for an inner join with the same input
2351  * rels and join condition; it's obviously impossible for that to
2352  * happen. The former estimate is N1 * Ssemi while the latter is
2353  * N1 * N2 * Sinner, so we may clamp Ssemi <= N2 * Sinner. Doing
2354  * this is worthwhile because of the shakier estimation rules we
2355  * use in eqjoinsel_semi, particularly in cases where it has to
2356  * punt entirely.
2357  */
2358  selec = Min(selec, inner_rel->rows * selec_inner);
2359  break;
2360  default:
2361  /* other values not expected here */
2362  elog(ERROR, "unrecognized join type: %d",
2363  (int) sjinfo->jointype);
2364  selec = 0; /* keep compiler quiet */
2365  break;
2366  }
2367 
2368  free_attstatsslot(&sslot1);
2369  free_attstatsslot(&sslot2);
2370 
2371  ReleaseVariableStats(vardata1);
2372  ReleaseVariableStats(vardata2);
2373 
2374  CLAMP_PROBABILITY(selec);
2375 
2376  PG_RETURN_FLOAT8((float8) selec);
2377 }
double float8
Definition: c.h:565
#define PG_GETARG_OID(n)
Definition: fmgr.h:275
#define PG_RETURN_FLOAT8(x)
Definition: fmgr.h:367
#define PG_GETARG_POINTER(n)
Definition: fmgr.h:276
#define PG_GET_COLLATION()
Definition: fmgr.h:198
#define PG_GETARG_INT16(n)
Definition: fmgr.h:271
RegProcedure get_opcode(Oid opno)
Definition: lsyscache.c:1266
Oid get_commutator(Oid opno)
Definition: lsyscache.c:1490
JoinType
Definition: nodes.h:744
@ JOIN_SEMI
Definition: nodes.h:763
@ JOIN_FULL
Definition: nodes.h:751
@ JOIN_LEFT
Definition: nodes.h:750
@ JOIN_ANTI
Definition: nodes.h:764
static RelOptInfo * find_join_input_rel(PlannerInfo *root, Relids relids)
Definition: selfuncs.c:6281
static double eqjoinsel_inner(Oid opfuncoid, Oid collation, VariableStatData *vardata1, VariableStatData *vardata2, double nd1, double nd2, bool isdefault1, bool isdefault2, AttStatsSlot *sslot1, AttStatsSlot *sslot2, Form_pg_statistic stats1, Form_pg_statistic stats2, bool have_mcvs1, bool have_mcvs2)
Definition: selfuncs.c:2386
static double eqjoinsel_semi(Oid opfuncoid, Oid collation, VariableStatData *vardata1, VariableStatData *vardata2, double nd1, double nd2, bool isdefault1, bool isdefault2, AttStatsSlot *sslot1, AttStatsSlot *sslot2, Form_pg_statistic stats1, Form_pg_statistic stats2, bool have_mcvs1, bool have_mcvs2, RelOptInfo *inner_rel)
Definition: selfuncs.c:2583
bool statistic_proc_security_check(VariableStatData *vardata, Oid func_oid)
Definition: selfuncs.c:5627
void get_join_variables(PlannerInfo *root, List *args, SpecialJoinInfo *sjinfo, VariableStatData *vardata1, VariableStatData *vardata2, bool *join_is_reversed)
Definition: selfuncs.c:4918
Cardinality rows
Definition: pathnodes.h:685
Relids min_righthand
Definition: pathnodes.h:2273
JoinType jointype
Definition: pathnodes.h:2276

References generate_unaccent_rules::args, ATTSTATSSLOT_NUMBERS, ATTSTATSSLOT_VALUES, CLAMP_PROBABILITY, elog, eqjoinsel_inner(), eqjoinsel_semi(), ERROR, find_join_input_rel(), free_attstatsslot(), get_attstatsslot(), get_commutator(), get_join_variables(), get_opcode(), get_variable_numdistinct(), GETSTRUCT, HeapTupleIsValid, InvalidOid, JOIN_ANTI, JOIN_FULL, JOIN_INNER, JOIN_LEFT, JOIN_SEMI, SpecialJoinInfo::jointype, Min, SpecialJoinInfo::min_righthand, OidIsValid, PG_GET_COLLATION, PG_GETARG_INT16, PG_GETARG_OID, PG_GETARG_POINTER, PG_RETURN_FLOAT8, ReleaseVariableStats, RelOptInfo::rows, statistic_proc_security_check(), and VariableStatData::statsTuple.

Referenced by neqjoinsel().

◆ eqjoinsel_inner()

static double eqjoinsel_inner ( Oid  opfuncoid,
Oid  collation,
VariableStatData vardata1,
VariableStatData vardata2,
double  nd1,
double  nd2,
bool  isdefault1,
bool  isdefault2,
AttStatsSlot sslot1,
AttStatsSlot sslot2,
Form_pg_statistic  stats1,
Form_pg_statistic  stats2,
bool  have_mcvs1,
bool  have_mcvs2 
)
static

Definition at line 2386 of file selfuncs.c.

2393 {
2394  double selec;
2395 
2396  if (have_mcvs1 && have_mcvs2)
2397  {
2398  /*
2399  * We have most-common-value lists for both relations. Run through
2400  * the lists to see which MCVs actually join to each other with the
2401  * given operator. This allows us to determine the exact join
2402  * selectivity for the portion of the relations represented by the MCV
2403  * lists. We still have to estimate for the remaining population, but
2404  * in a skewed distribution this gives us a big leg up in accuracy.
2405  * For motivation see the analysis in Y. Ioannidis and S.
2406  * Christodoulakis, "On the propagation of errors in the size of join
2407  * results", Technical Report 1018, Computer Science Dept., University
2408  * of Wisconsin, Madison, March 1991 (available from ftp.cs.wisc.edu).
2409  */
2410  LOCAL_FCINFO(fcinfo, 2);
2411  FmgrInfo eqproc;
2412  bool *hasmatch1;
2413  bool *hasmatch2;
2414  double nullfrac1 = stats1->stanullfrac;
2415  double nullfrac2 = stats2->stanullfrac;
2416  double matchprodfreq,
2417  matchfreq1,
2418  matchfreq2,
2419  unmatchfreq1,
2420  unmatchfreq2,
2421  otherfreq1,
2422  otherfreq2,
2423  totalsel1,
2424  totalsel2;
2425  int i,
2426  nmatches;
2427 
2428  fmgr_info(opfuncoid, &eqproc);
2429 
2430  /*
2431  * Save a few cycles by setting up the fcinfo struct just once. Using
2432  * FunctionCallInvoke directly also avoids failure if the eqproc
2433  * returns NULL, though really equality functions should never do
2434  * that.
2435  */
2436  InitFunctionCallInfoData(*fcinfo, &eqproc, 2, collation,
2437  NULL, NULL);
2438  fcinfo->args[0].isnull = false;
2439  fcinfo->args[1].isnull = false;
2440 
2441  hasmatch1 = (bool *) palloc0(sslot1->nvalues * sizeof(bool));
2442  hasmatch2 = (bool *) palloc0(sslot2->nvalues * sizeof(bool));
2443 
2444  /*
2445  * Note we assume that each MCV will match at most one member of the
2446  * other MCV list. If the operator isn't really equality, there could
2447  * be multiple matches --- but we don't look for them, both for speed
2448  * and because the math wouldn't add up...
2449  */
2450  matchprodfreq = 0.0;
2451  nmatches = 0;
2452  for (i = 0; i < sslot1->nvalues; i++)
2453  {
2454  int j;
2455 
2456  fcinfo->args[0].value = sslot1->values[i];
2457 
2458  for (j = 0; j < sslot2->nvalues; j++)
2459  {
2460  Datum fresult;
2461 
2462  if (hasmatch2[j])
2463  continue;
2464  fcinfo->args[1].value = sslot2->values[j];
2465  fcinfo->isnull = false;
2466  fresult = FunctionCallInvoke(fcinfo);
2467  if (!fcinfo->isnull && DatumGetBool(fresult))
2468  {
2469  hasmatch1[i] = hasmatch2[j] = true;
2470  matchprodfreq += sslot1->numbers[i] * sslot2->numbers[j];
2471  nmatches++;
2472  break;
2473  }
2474  }
2475  }
2476  CLAMP_PROBABILITY(matchprodfreq);
2477  /* Sum up frequencies of matched and unmatched MCVs */
2478  matchfreq1 = unmatchfreq1 = 0.0;
2479  for (i = 0; i < sslot1->nvalues; i++)
2480  {
2481  if (hasmatch1[i])
2482  matchfreq1 += sslot1->numbers[i];
2483  else
2484  unmatchfreq1 += sslot1->numbers[i];
2485  }
2486  CLAMP_PROBABILITY(matchfreq1);
2487  CLAMP_PROBABILITY(unmatchfreq1);
2488  matchfreq2 = unmatchfreq2 = 0.0;
2489  for (i = 0; i < sslot2->nvalues; i++)
2490  {
2491  if (hasmatch2[i])
2492  matchfreq2 += sslot2->numbers[i];
2493  else
2494  unmatchfreq2 += sslot2->numbers[i];
2495  }
2496  CLAMP_PROBABILITY(matchfreq2);
2497  CLAMP_PROBABILITY(unmatchfreq2);
2498  pfree(hasmatch1);
2499  pfree(hasmatch2);
2500 
2501  /*
2502  * Compute total frequency of non-null values that are not in the MCV
2503  * lists.
2504  */
2505  otherfreq1 = 1.0 - nullfrac1 - matchfreq1 - unmatchfreq1;
2506  otherfreq2 = 1.0 - nullfrac2 - matchfreq2 - unmatchfreq2;
2507  CLAMP_PROBABILITY(otherfreq1);
2508  CLAMP_PROBABILITY(otherfreq2);
2509 
2510  /*
2511  * We can estimate the total selectivity from the point of view of
2512  * relation 1 as: the known selectivity for matched MCVs, plus
2513  * unmatched MCVs that are assumed to match against random members of
2514  * relation 2's non-MCV population, plus non-MCV values that are
2515  * assumed to match against random members of relation 2's unmatched
2516  * MCVs plus non-MCV values.
2517  */
2518  totalsel1 = matchprodfreq;
2519  if (nd2 > sslot2->nvalues)
2520  totalsel1 += unmatchfreq1 * otherfreq2 / (nd2 - sslot2->nvalues);
2521  if (nd2 > nmatches)
2522  totalsel1 += otherfreq1 * (otherfreq2 + unmatchfreq2) /
2523  (nd2 - nmatches);
2524  /* Same estimate from the point of view of relation 2. */
2525  totalsel2 = matchprodfreq;
2526  if (nd1 > sslot1->nvalues)
2527  totalsel2 += unmatchfreq2 * otherfreq1 / (nd1 - sslot1->nvalues);
2528  if (nd1 > nmatches)
2529  totalsel2 += otherfreq2 * (otherfreq1 + unmatchfreq1) /
2530  (nd1 - nmatches);
2531 
2532  /*
2533  * Use the smaller of the two estimates. This can be justified in
2534  * essentially the same terms as given below for the no-stats case: to
2535  * a first approximation, we are estimating from the point of view of
2536  * the relation with smaller nd.
2537  */
2538  selec = (totalsel1 < totalsel2) ? totalsel1 : totalsel2;
2539  }
2540  else
2541  {
2542  /*
2543  * We do not have MCV lists for both sides. Estimate the join
2544  * selectivity as MIN(1/nd1,1/nd2)*(1-nullfrac1)*(1-nullfrac2). This
2545  * is plausible if we assume that the join operator is strict and the
2546  * non-null values are about equally distributed: a given non-null
2547  * tuple of rel1 will join to either zero or N2*(1-nullfrac2)/nd2 rows
2548  * of rel2, so total join rows are at most
2549  * N1*(1-nullfrac1)*N2*(1-nullfrac2)/nd2 giving a join selectivity of
2550  * not more than (1-nullfrac1)*(1-nullfrac2)/nd2. By the same logic it
2551  * is not more than (1-nullfrac1)*(1-nullfrac2)/nd1, so the expression
2552  * with MIN() is an upper bound. Using the MIN() means we estimate
2553  * from the point of view of the relation with smaller nd (since the
2554  * larger nd is determining the MIN). It is reasonable to assume that
2555  * most tuples in this rel will have join partners, so the bound is
2556  * probably reasonably tight and should be taken as-is.
2557  *
2558  * XXX Can we be smarter if we have an MCV list for just one side? It
2559  * seems that if we assume equal distribution for the other side, we
2560  * end up with the same answer anyway.
2561  */
2562  double nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
2563  double nullfrac2 = stats2 ? stats2->stanullfrac : 0.0;
2564 
2565  selec = (1.0 - nullfrac1) * (1.0 - nullfrac2);
2566  if (nd1 > nd2)
2567  selec /= nd1;
2568  else
2569  selec /= nd2;
2570  }
2571 
2572  return selec;
2573 }
void fmgr_info(Oid functionId, FmgrInfo *finfo)
Definition: fmgr.c:126
#define InitFunctionCallInfoData(Fcinfo, Flinfo, Nargs, Collation, Context, Resultinfo)
Definition: fmgr.h:150
#define LOCAL_FCINFO(name, nargs)
Definition: fmgr.h:110
#define FunctionCallInvoke(fcinfo)
Definition: fmgr.h:172
int j
Definition: isn.c:74
void * palloc0(Size size)
Definition: mcxt.c:1099
uintptr_t Datum
Definition: postgres.h:411
Definition: fmgr.h:57

References CLAMP_PROBABILITY, DatumGetBool, fmgr_info(), FunctionCallInvoke, i, InitFunctionCallInfoData, j, LOCAL_FCINFO, AttStatsSlot::numbers, AttStatsSlot::nvalues, palloc0(), pfree(), and AttStatsSlot::values.

Referenced by eqjoinsel().

◆ eqjoinsel_semi()

static double eqjoinsel_semi ( Oid  opfuncoid,
Oid  collation,
VariableStatData vardata1,
VariableStatData vardata2,
double  nd1,
double  nd2,
bool  isdefault1,
bool  isdefault2,
AttStatsSlot sslot1,
AttStatsSlot sslot2,
Form_pg_statistic  stats1,
Form_pg_statistic  stats2,
bool  have_mcvs1,
bool  have_mcvs2,
RelOptInfo inner_rel 
)
static

Definition at line 2583 of file selfuncs.c.

2591 {
2592  double selec;
2593 
2594  /*
2595  * We clamp nd2 to be not more than what we estimate the inner relation's
2596  * size to be. This is intuitively somewhat reasonable since obviously
2597  * there can't be more than that many distinct values coming from the
2598  * inner rel. The reason for the asymmetry (ie, that we don't clamp nd1
2599  * likewise) is that this is the only pathway by which restriction clauses
2600  * applied to the inner rel will affect the join result size estimate,
2601  * since set_joinrel_size_estimates will multiply SEMI/ANTI selectivity by
2602  * only the outer rel's size. If we clamped nd1 we'd be double-counting
2603  * the selectivity of outer-rel restrictions.
2604  *
2605  * We can apply this clamping both with respect to the base relation from
2606  * which the join variable comes (if there is just one), and to the
2607  * immediate inner input relation of the current join.
2608  *
2609  * If we clamp, we can treat nd2 as being a non-default estimate; it's not
2610  * great, maybe, but it didn't come out of nowhere either. This is most
2611  * helpful when the inner relation is empty and consequently has no stats.
2612  */
2613  if (vardata2->rel)
2614  {
2615  if (nd2 >= vardata2->rel->rows)
2616  {
2617  nd2 = vardata2->rel->rows;
2618  isdefault2 = false;
2619  }
2620  }
2621  if (nd2 >= inner_rel->rows)
2622  {
2623  nd2 = inner_rel->rows;
2624  isdefault2 = false;
2625  }
2626 
2627  if (have_mcvs1 && have_mcvs2 && OidIsValid(opfuncoid))
2628  {
2629  /*
2630  * We have most-common-value lists for both relations. Run through
2631  * the lists to see which MCVs actually join to each other with the
2632  * given operator. This allows us to determine the exact join
2633  * selectivity for the portion of the relations represented by the MCV
2634  * lists. We still have to estimate for the remaining population, but
2635  * in a skewed distribution this gives us a big leg up in accuracy.
2636  */
2637  LOCAL_FCINFO(fcinfo, 2);
2638  FmgrInfo eqproc;
2639  bool *hasmatch1;
2640  bool *hasmatch2;
2641  double nullfrac1 = stats1->stanullfrac;
2642  double matchfreq1,
2643  uncertainfrac,
2644  uncertain;
2645  int i,
2646  nmatches,
2647  clamped_nvalues2;
2648 
2649  /*
2650  * The clamping above could have resulted in nd2 being less than
2651  * sslot2->nvalues; in which case, we assume that precisely the nd2
2652  * most common values in the relation will appear in the join input,
2653  * and so compare to only the first nd2 members of the MCV list. Of
2654  * course this is frequently wrong, but it's the best bet we can make.
2655  */
2656  clamped_nvalues2 = Min(sslot2->nvalues, nd2);
2657 
2658  fmgr_info(opfuncoid, &eqproc);
2659 
2660  /*
2661  * Save a few cycles by setting up the fcinfo struct just once. Using
2662  * FunctionCallInvoke directly also avoids failure if the eqproc
2663  * returns NULL, though really equality functions should never do
2664  * that.
2665  */
2666  InitFunctionCallInfoData(*fcinfo, &eqproc, 2, collation,
2667  NULL, NULL);
2668  fcinfo->args[0].isnull = false;
2669  fcinfo->args[1].isnull = false;
2670 
2671  hasmatch1 = (bool *) palloc0(sslot1->nvalues * sizeof(bool));
2672  hasmatch2 = (bool *) palloc0(clamped_nvalues2 * sizeof(bool));
2673 
2674  /*
2675  * Note we assume that each MCV will match at most one member of the
2676  * other MCV list. If the operator isn't really equality, there could
2677  * be multiple matches --- but we don't look for them, both for speed
2678  * and because the math wouldn't add up...
2679  */
2680  nmatches = 0;
2681  for (i = 0; i < sslot1->nvalues; i++)
2682  {
2683  int j;
2684 
2685  fcinfo->args[0].value = sslot1->values[i];
2686 
2687  for (j = 0; j < clamped_nvalues2; j++)
2688  {
2689  Datum fresult;
2690 
2691  if (hasmatch2[j])
2692  continue;
2693  fcinfo->args[1].value = sslot2->values[j];
2694  fcinfo->isnull = false;
2695  fresult = FunctionCallInvoke(fcinfo);
2696  if (!fcinfo->isnull && DatumGetBool(fresult))
2697  {
2698  hasmatch1[i] = hasmatch2[j] = true;
2699  nmatches++;
2700  break;
2701  }
2702  }
2703  }
2704  /* Sum up frequencies of matched MCVs */
2705  matchfreq1 = 0.0;
2706  for (i = 0; i < sslot1->nvalues; i++)
2707  {
2708  if (hasmatch1[i])
2709  matchfreq1 += sslot1->numbers[i];
2710  }
2711  CLAMP_PROBABILITY(matchfreq1);
2712  pfree(hasmatch1);
2713  pfree(hasmatch2);
2714 
2715  /*
2716  * Now we need to estimate the fraction of relation 1 that has at
2717  * least one join partner. We know for certain that the matched MCVs
2718  * do, so that gives us a lower bound, but we're really in the dark
2719  * about everything else. Our crude approach is: if nd1 <= nd2 then
2720  * assume all non-null rel1 rows have join partners, else assume for
2721  * the uncertain rows that a fraction nd2/nd1 have join partners. We
2722  * can discount the known-matched MCVs from the distinct-values counts
2723  * before doing the division.
2724  *
2725  * Crude as the above is, it's completely useless if we don't have
2726  * reliable ndistinct values for both sides. Hence, if either nd1 or
2727  * nd2 is default, punt and assume half of the uncertain rows have
2728  * join partners.
2729  */
2730  if (!isdefault1 && !isdefault2)
2731  {
2732  nd1 -= nmatches;
2733  nd2 -= nmatches;
2734  if (nd1 <= nd2 || nd2 < 0)
2735  uncertainfrac = 1.0;
2736  else
2737  uncertainfrac = nd2 / nd1;
2738  }
2739  else
2740  uncertainfrac = 0.5;
2741  uncertain = 1.0 - matchfreq1 - nullfrac1;
2742  CLAMP_PROBABILITY(uncertain);
2743  selec = matchfreq1 + uncertainfrac * uncertain;
2744  }
2745  else
2746  {
2747  /*
2748  * Without MCV lists for both sides, we can only use the heuristic
2749  * about nd1 vs nd2.
2750  */
2751  double nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
2752 
2753  if (!isdefault1 && !isdefault2)
2754  {
2755  if (nd1 <= nd2 || nd2 < 0)
2756  selec = 1.0 - nullfrac1;
2757  else
2758  selec = (nd2 / nd1) * (1.0 - nullfrac1);
2759  }
2760  else
2761  selec = 0.5 * (1.0 - nullfrac1);
2762  }
2763 
2764  return selec;
2765 }

References CLAMP_PROBABILITY, DatumGetBool, fmgr_info(), FunctionCallInvoke, i, InitFunctionCallInfoData, j, LOCAL_FCINFO, Min, AttStatsSlot::numbers, AttStatsSlot::nvalues, OidIsValid, palloc0(), pfree(), VariableStatData::rel, RelOptInfo::rows, and AttStatsSlot::values.

Referenced by eqjoinsel().

◆ eqsel()

Datum eqsel ( PG_FUNCTION_ARGS  )

Definition at line 224 of file selfuncs.c.

225 {
226  PG_RETURN_FLOAT8((float8) eqsel_internal(fcinfo, false));
227 }
static double eqsel_internal(PG_FUNCTION_ARGS, bool negate)
Definition: selfuncs.c:233

References eqsel_internal(), and PG_RETURN_FLOAT8.

◆ eqsel_internal()

static double eqsel_internal ( PG_FUNCTION_ARGS  ,
bool  negate 
)
static

Definition at line 233 of file selfuncs.c.

234 {
236  Oid operator = PG_GETARG_OID(1);
237  List *args = (List *) PG_GETARG_POINTER(2);
238  int varRelid = PG_GETARG_INT32(3);
239  Oid collation = PG_GET_COLLATION();
240  VariableStatData vardata;
241  Node *other;
242  bool varonleft;
243  double selec;
244 
245  /*
246  * When asked about <>, we do the estimation using the corresponding =
247  * operator, then convert to <> via "1.0 - eq_selectivity - nullfrac".
248  */
249  if (negate)
250  {
251  operator = get_negator(operator);
252  if (!OidIsValid(operator))
253  {
254  /* Use default selectivity (should we raise an error instead?) */
255  return 1.0 - DEFAULT_EQ_SEL;
256  }
257  }
258 
259  /*
260  * If expression is not variable = something or something = variable, then
261  * punt and return a default estimate.
262  */
263  if (!get_restriction_variable(root, args, varRelid,
264  &vardata, &other, &varonleft))
265  return negate ? (1.0 - DEFAULT_EQ_SEL) : DEFAULT_EQ_SEL;
266 
267  /*
268  * We can do a lot better if the something is a constant. (Note: the
269  * Const might result from estimation rather than being a simple constant
270  * in the query.)
271  */
272  if (IsA(other, Const))
273  selec = var_eq_const(&vardata, operator, collation,
274  ((Const *) other)->constvalue,
275  ((Const *) other)->constisnull,
276  varonleft, negate);
277  else
278  selec = var_eq_non_const(&vardata, operator, collation, other,
279  varonleft, negate);
280 
281  ReleaseVariableStats(vardata);
282 
283  return selec;
284 }
#define PG_GETARG_INT32(n)
Definition: fmgr.h:269
Oid get_negator(Oid opno)
Definition: lsyscache.c:1514
bool get_restriction_variable(PlannerInfo *root, List *args, int varRelid, VariableStatData *vardata, Node **other, bool *varonleft)
Definition: selfuncs.c:4858
double var_eq_non_const(VariableStatData *vardata, Oid operator, Oid collation, Node *other, bool varonleft, bool negate)
Definition: selfuncs.c:463
#define DEFAULT_EQ_SEL
Definition: selfuncs.h:34

References generate_unaccent_rules::args, DEFAULT_EQ_SEL, get_negator(), get_restriction_variable(), IsA, OidIsValid, PG_GET_COLLATION, PG_GETARG_INT32, PG_GETARG_OID, PG_GETARG_POINTER, ReleaseVariableStats, var_eq_const(), and var_eq_non_const().

Referenced by eqsel(), and neqsel().

◆ estimate_array_length()

int estimate_array_length ( Node arrayexpr)

Definition at line 2132 of file selfuncs.c.

2133 {
2134  /* look through any binary-compatible relabeling of arrayexpr */
2135  arrayexpr = strip_array_coercion(arrayexpr);
2136 
2137  if (arrayexpr && IsA(arrayexpr, Const))
2138  {
2139  Datum arraydatum = ((Const *) arrayexpr)->constvalue;
2140  bool arrayisnull = ((Const *) arrayexpr)->constisnull;
2141  ArrayType *arrayval;
2142 
2143  if (arrayisnull)
2144  return 0;
2145  arrayval = DatumGetArrayTypeP(arraydatum);
2146  return ArrayGetNItems(ARR_NDIM(arrayval), ARR_DIMS(arrayval));
2147  }
2148  else if (arrayexpr && IsA(arrayexpr, ArrayExpr) &&
2149  !((ArrayExpr *) arrayexpr)->multidims)
2150  {
2151  return list_length(((ArrayExpr *) arrayexpr)->elements);
2152  }
2153  else
2154  {
2155  /* default guess --- see also scalararraysel */
2156  return 10;
2157  }
2158 }
#define ARR_NDIM(a)
Definition: array.h:283
#define DatumGetArrayTypeP(X)
Definition: array.h:254
#define ARR_DIMS(a)
Definition: array.h:287
int ArrayGetNItems(int ndim, const int *dims)
Definition: arrayutils.c:76
static int list_length(const List *l)
Definition: pg_list.h:149
static Node * strip_array_coercion(Node *node)
Definition: selfuncs.c:1780

References ARR_DIMS, ARR_NDIM, ArrayGetNItems(), DatumGetArrayTypeP, IsA, list_length(), and strip_array_coercion().

Referenced by array_unnest_support(), btcostestimate(), cost_qual_eval_walker(), cost_tidscan(), genericcostestimate(), and gincost_scalararrayopexpr().

◆ estimate_hash_bucket_stats()

void estimate_hash_bucket_stats ( PlannerInfo root,
Node hashkey,
double  nbuckets,
Selectivity mcv_freq,
Selectivity bucketsize_frac 
)

Definition at line 3782 of file selfuncs.c.

3785 {
3786  VariableStatData vardata;
3787  double estfract,
3788  ndistinct,
3789  stanullfrac,
3790  avgfreq;
3791  bool isdefault;
3792  AttStatsSlot sslot;
3793 
3794  examine_variable(root, hashkey, 0, &vardata);
3795 
3796  /* Look up the frequency of the most common value, if available */
3797  *mcv_freq = 0.0;
3798 
3799  if (HeapTupleIsValid(vardata.statsTuple))
3800  {
3801  if (get_attstatsslot(&sslot, vardata.statsTuple,
3802  STATISTIC_KIND_MCV, InvalidOid,
3804  {
3805  /*
3806  * The first MCV stat is for the most common value.
3807  */
3808  if (sslot.nnumbers > 0)
3809  *mcv_freq = sslot.numbers[0];
3810  free_attstatsslot(&sslot);
3811  }
3812  }
3813 
3814  /* Get number of distinct values */
3815  ndistinct = get_variable_numdistinct(&vardata, &isdefault);
3816 
3817  /*
3818  * If ndistinct isn't real, punt. We normally return 0.1, but if the
3819  * mcv_freq is known to be even higher than that, use it instead.
3820  */
3821  if (isdefault)
3822  {
3823  *bucketsize_frac = (Selectivity) Max(0.1, *mcv_freq);
3824  ReleaseVariableStats(vardata);
3825  return;
3826  }
3827 
3828  /* Get fraction that are null */
3829  if (HeapTupleIsValid(vardata.statsTuple))
3830  {
3831  Form_pg_statistic stats;
3832 
3833  stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
3834  stanullfrac = stats->stanullfrac;
3835  }
3836  else
3837  stanullfrac = 0.0;
3838 
3839  /* Compute avg freq of all distinct data values in raw relation */
3840  avgfreq = (1.0 - stanullfrac) / ndistinct;
3841 
3842  /*
3843  * Adjust ndistinct to account for restriction clauses. Observe we are
3844  * assuming that the data distribution is affected uniformly by the
3845  * restriction clauses!
3846  *
3847  * XXX Possibly better way, but much more expensive: multiply by
3848  * selectivity of rel's restriction clauses that mention the target Var.
3849  */
3850  if (vardata.rel && vardata.rel->tuples > 0)
3851  {
3852  ndistinct *= vardata.rel->rows / vardata.rel->tuples;
3853  ndistinct = clamp_row_est(ndistinct);
3854  }
3855 
3856  /*
3857  * Initial estimate of bucketsize fraction is 1/nbuckets as long as the
3858  * number of buckets is less than the expected number of distinct values;
3859  * otherwise it is 1/ndistinct.
3860  */
3861  if (ndistinct > nbuckets)
3862  estfract = 1.0 / nbuckets;
3863  else
3864  estfract = 1.0 / ndistinct;
3865 
3866  /*
3867  * Adjust estimated bucketsize upward to account for skewed distribution.
3868  */
3869  if (avgfreq > 0.0 && *mcv_freq > avgfreq)
3870  estfract *= *mcv_freq / avgfreq;
3871 
3872  /*
3873  * Clamp bucketsize to sane range (the above adjustment could easily
3874  * produce an out-of-range result). We set the lower bound a little above
3875  * zero, since zero isn't a very sane result.
3876  */
3877  if (estfract < 1.0e-6)
3878  estfract = 1.0e-6;
3879  else if (estfract > 1.0)
3880  estfract = 1.0;
3881 
3882  *bucketsize_frac = (Selectivity) estfract;
3883 
3884  ReleaseVariableStats(vardata);
3885 }
double clamp_row_est(double nrows)
Definition: costsize.c:201
Cardinality tuples
Definition: pathnodes.h:722

References ATTSTATSSLOT_NUMBERS, clamp_row_est(), examine_variable(), free_attstatsslot(), get_attstatsslot(), get_variable_numdistinct(), GETSTRUCT, HeapTupleIsValid, InvalidOid, Max, AttStatsSlot::nnumbers, AttStatsSlot::numbers, VariableStatData::rel, ReleaseVariableStats, RelOptInfo::rows, VariableStatData::statsTuple, and RelOptInfo::tuples.

Referenced by final_cost_hashjoin().

◆ estimate_hashagg_tablesize()

double estimate_hashagg_tablesize ( PlannerInfo root,
Path path,
const AggClauseCosts agg_costs,
double  dNumGroups 
)

Definition at line 3901 of file selfuncs.c.

3903 {
3904  Size hashentrysize;
3905 
3906  hashentrysize = hash_agg_entry_size(list_length(root->aggtransinfos),
3907  path->pathtarget->width,
3908  agg_costs->transitionSpace);
3909 
3910  /*
3911  * Note that this disregards the effect of fill-factor and growth policy
3912  * of the hash table. That's probably ok, given that the default
3913  * fill-factor is relatively high. It'd be hard to meaningfully factor in
3914  * "double-in-size" growth policies here.
3915  */
3916  return hashentrysize * dNumGroups;
3917 }
size_t Size
Definition: c.h:540
Size hash_agg_entry_size(int numTrans, Size tupleWidth, Size transitionSpace)
Definition: nodeAgg.c:1676
Size transitionSpace
Definition: pathnodes.h:60
PathTarget * pathtarget
Definition: pathnodes.h:1195
List * aggtransinfos
Definition: pathnodes.h:359

References PlannerInfo::aggtransinfos, hash_agg_entry_size(), list_length(), Path::pathtarget, AggClauseCosts::transitionSpace, and PathTarget::width.

Referenced by consider_groupingsets_paths().

◆ estimate_multivariate_ndistinct()

static bool estimate_multivariate_ndistinct ( PlannerInfo root,
RelOptInfo rel,
List **  varinfos,
double *  ndistinct 
)
static

Definition at line 3938 of file selfuncs.c.

3940 {
3941  ListCell *lc;
3942  int nmatches_vars;
3943  int nmatches_exprs;
3944  Oid statOid = InvalidOid;
3945  MVNDistinct *stats;
3946  StatisticExtInfo *matched_info = NULL;
3947  RangeTblEntry *rte;
3948 
3949  /* bail out immediately if the table has no extended statistics */
3950  if (!rel->statlist)
3951  return false;
3952 
3953  /* look for the ndistinct statistics object matching the most vars */
3954  nmatches_vars = 0; /* we require at least two matches */
3955  nmatches_exprs = 0;
3956  foreach(lc, rel->statlist)
3957  {
3958  ListCell *lc2;
3959  StatisticExtInfo *info = (StatisticExtInfo *) lfirst(lc);
3960  int nshared_vars = 0;
3961  int nshared_exprs = 0;
3962 
3963  /* skip statistics of other kinds */
3964  if (info->kind != STATS_EXT_NDISTINCT)
3965  continue;
3966 
3967  /*
3968  * Determine how many expressions (and variables in non-matched
3969  * expressions) match. We'll then use these numbers to pick the
3970  * statistics object that best matches the clauses.
3971  */
3972  foreach(lc2, *varinfos)
3973  {
3974  ListCell *lc3;
3975  GroupVarInfo *varinfo = (GroupVarInfo *) lfirst(lc2);
3977 
3978  Assert(varinfo->rel == rel);
3979 
3980  /* simple Var, search in statistics keys directly */
3981  if (IsA(varinfo->var, Var))
3982  {
3983  attnum = ((Var *) varinfo->var)->varattno;
3984 
3985  /*
3986  * Ignore system attributes - we don't support statistics on
3987  * them, so can't match them (and it'd fail as the values are
3988  * negative).
3989  */
3991  continue;
3992 
3993  if (bms_is_member(attnum, info->keys))
3994  nshared_vars++;
3995 
3996  continue;
3997  }
3998 
3999  /* expression - see if it's in the statistics object */
4000  foreach(lc3, info->exprs)
4001  {
4002  Node *expr = (Node *) lfirst(lc3);
4003 
4004  if (equal(varinfo->var, expr))
4005  {
4006  nshared_exprs++;
4007  break;
4008  }
4009  }
4010  }
4011 
4012  if (nshared_vars + nshared_exprs < 2)
4013  continue;
4014 
4015  /*
4016  * Does this statistics object match more columns than the currently
4017  * best object? If so, use this one instead.
4018  *
4019  * XXX This should break ties using name of the object, or something
4020  * like that, to make the outcome stable.
4021  */
4022  if ((nshared_exprs > nmatches_exprs) ||
4023  (((nshared_exprs == nmatches_exprs)) && (nshared_vars > nmatches_vars)))
4024  {
4025  statOid = info->statOid;
4026  nmatches_vars = nshared_vars;
4027  nmatches_exprs = nshared_exprs;
4028  matched_info = info;
4029  }
4030  }
4031 
4032  /* No match? */
4033  if (statOid == InvalidOid)
4034  return false;
4035 
4036  Assert(nmatches_vars + nmatches_exprs > 1);
4037 
4038  rte = planner_rt_fetch(rel->relid, root);
4039  stats = statext_ndistinct_load(statOid, rte->inh);
4040 
4041  /*
4042  * If we have a match, search it for the specific item that matches (there
4043  * must be one), and construct the output values.
4044  */
4045  if (stats)
4046  {
4047  int i;
4048  List *newlist = NIL;
4049  MVNDistinctItem *item = NULL;
4050  ListCell *lc2;
4051  Bitmapset *matched = NULL;
4052  AttrNumber attnum_offset;
4053 
4054  /*
4055  * How much we need to offset the attnums? If there are no
4056  * expressions, no offset is needed. Otherwise offset enough to move
4057  * the lowest one (which is equal to number of expressions) to 1.
4058  */
4059  if (matched_info->exprs)
4060  attnum_offset = (list_length(matched_info->exprs) + 1);
4061  else
4062  attnum_offset = 0;
4063 
4064  /* see what actually matched */
4065  foreach(lc2, *varinfos)
4066  {
4067  ListCell *lc3;
4068  int idx;
4069  bool found = false;
4070 
4071  GroupVarInfo *varinfo = (GroupVarInfo *) lfirst(lc2);
4072 
4073  /*
4074  * Process a simple Var expression, by matching it to keys
4075  * directly. If there's a matching expression, we'll try matching
4076  * it later.
4077  */
4078  if (IsA(varinfo->var, Var))
4079  {
4080  AttrNumber attnum = ((Var *) varinfo->var)->varattno;
4081 
4082  /*
4083  * Ignore expressions on system attributes. Can't rely on the
4084  * bms check for negative values.
4085  */
4087  continue;
4088 
4089  /* Is the variable covered by the statistics object? */
4090  if (!bms_is_member(attnum, matched_info->keys))
4091  continue;
4092 
4093  attnum = attnum + attnum_offset;
4094 
4095  /* ensure sufficient offset */
4097 
4098  matched = bms_add_member(matched, attnum);
4099 
4100  found = true;
4101  }
4102 
4103  /*
4104  * XXX Maybe we should allow searching the expressions even if we
4105  * found an attribute matching the expression? That would handle
4106  * trivial expressions like "(a)" but it seems fairly useless.
4107  */
4108  if (found)
4109  continue;
4110 
4111  /* expression - see if it's in the statistics object */
4112  idx = 0;
4113  foreach(lc3, matched_info->exprs)
4114  {
4115  Node *expr = (Node *) lfirst(lc3);
4116 
4117  if (equal(varinfo->var, expr))
4118  {
4119  AttrNumber attnum = -(idx + 1);
4120 
4121  attnum = attnum + attnum_offset;
4122 
4123  /* ensure sufficient offset */
4125 
4126  matched = bms_add_member(matched, attnum);
4127 
4128  /* there should be just one matching expression */
4129  break;
4130  }
4131 
4132  idx++;
4133  }
4134  }
4135 
4136  /* Find the specific item that exactly matches the combination */
4137  for (i = 0; i < stats->nitems; i++)
4138  {
4139  int j;
4140  MVNDistinctItem *tmpitem = &stats->items[i];
4141 
4142  if (tmpitem->nattributes != bms_num_members(matched))
4143  continue;
4144 
4145  /* assume it's the right item */
4146  item = tmpitem;
4147 
4148  /* check that all item attributes/expressions fit the match */
4149  for (j = 0; j < tmpitem->nattributes; j++)
4150  {
4151  AttrNumber attnum = tmpitem->attributes[j];
4152 
4153  /*
4154  * Thanks to how we constructed the matched bitmap above, we
4155  * can just offset all attnums the same way.
4156  */
4157  attnum = attnum + attnum_offset;
4158 
4159  if (!bms_is_member(attnum, matched))
4160  {
4161  /* nah, it's not this item */
4162  item = NULL;
4163  break;
4164  }
4165  }
4166 
4167  /*
4168  * If the item has all the matched attributes, we know it's the
4169  * right one - there can't be a better one. matching more.
4170  */
4171  if (item)
4172  break;
4173  }
4174 
4175  /*
4176  * Make sure we found an item. There has to be one, because ndistinct
4177  * statistics includes all combinations of attributes.
4178  */
4179  if (!item)
4180  elog(ERROR, "corrupt MVNDistinct entry");
4181 
4182  /* Form the output varinfo list, keeping only unmatched ones */
4183  foreach(lc, *varinfos)
4184  {
4185  GroupVarInfo *varinfo = (GroupVarInfo *) lfirst(lc);
4186  ListCell *lc3;
4187  bool found = false;
4188 
4189  /*
4190  * Let's look at plain variables first, because it's the most
4191  * common case and the check is quite cheap. We can simply get the
4192  * attnum and check (with an offset) matched bitmap.
4193  */
4194  if (IsA(varinfo->var, Var))
4195  {
4196  AttrNumber attnum = ((Var *) varinfo->var)->varattno;
4197 
4198  /*
4199  * If it's a system attribute, we're done. We don't support
4200  * extended statistics on system attributes, so it's clearly
4201  * not matched. Just keep the expression and continue.
4202  */
4204  {
4205  newlist = lappend(newlist, varinfo);
4206  continue;
4207  }
4208 
4209  /* apply the same offset as above */
4210  attnum += attnum_offset;
4211 
4212  /* if it's not matched, keep the varinfo */
4213  if (!bms_is_member(attnum, matched))
4214  newlist = lappend(newlist, varinfo);
4215 
4216  /* The rest of the loop deals with complex expressions. */
4217  continue;
4218  }
4219 
4220  /*
4221  * Process complex expressions, not just simple Vars.
4222  *
4223  * First, we search for an exact match of an expression. If we
4224  * find one, we can just discard the whole GroupExprInfo, with all
4225  * the variables we extracted from it.
4226  *
4227  * Otherwise we inspect the individual vars, and try matching it
4228  * to variables in the item.
4229  */
4230  foreach(lc3, matched_info->exprs)
4231  {
4232  Node *expr = (Node *) lfirst(lc3);
4233 
4234  if (equal(varinfo->var, expr))
4235  {
4236  found = true;
4237  break;
4238  }
4239  }
4240 
4241  /* found exact match, skip */
4242  if (found)
4243  continue;
4244 
4245  newlist = lappend(newlist, varinfo);
4246  }
4247 
4248  *varinfos = newlist;
4249  *ndistinct = item->ndistinct;
4250  return true;
4251  }
4252 
4253  return false;
4254 }
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:259
#define AttrNumberIsForUserDefinedAttr(attributeNumber)
Definition: attnum.h:41
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:648
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:427
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:738
MVNDistinct * statext_ndistinct_load(Oid mvoid, bool inh)
Definition: mvdistinct.c:149
double ndistinct
Definition: statistics.h:28
AttrNumber * attributes
Definition: statistics.h:30
uint32 nitems
Definition: statistics.h:38
MVNDistinctItem items[FLEXIBLE_ARRAY_MEMBER]
Definition: statistics.h:39
List * statlist
Definition: pathnodes.h:720
Bitmapset * keys
Definition: pathnodes.h:941
Definition: primnodes.h:196

References Assert(), attnum, MVNDistinctItem::attributes, AttrNumberIsForUserDefinedAttr, bms_add_member(), bms_is_member(), bms_num_members(), elog, equal(), ERROR, StatisticExtInfo::exprs, i, idx(), RangeTblEntry::inh, InvalidOid, IsA, MVNDistinct::items, j, StatisticExtInfo::keys, StatisticExtInfo::kind, lappend(), lfirst, list_length(), MVNDistinctItem::nattributes, MVNDistinctItem::ndistinct, NIL, MVNDistinct::nitems, planner_rt_fetch, GroupVarInfo::rel, RelOptInfo::relid, statext_ndistinct_load(), RelOptInfo::statlist, StatisticExtInfo::statOid, and GroupVarInfo::var.

Referenced by estimate_num_groups_incremental().

◆ estimate_num_groups()

double estimate_num_groups ( PlannerInfo root,
List groupExprs,
double  input_rows,
List **  pgset,
EstimationInfo estinfo 
)

Definition at line 3368 of file selfuncs.c.

3370 {
3371  return estimate_num_groups_incremental(root, groupExprs,
3372  input_rows, pgset, estinfo,
3373  NULL, 0);
3374 }
double estimate_num_groups_incremental(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo, List **cache_varinfos, int prevNExprs)
Definition: selfuncs.c:3382

References estimate_num_groups_incremental().

Referenced by adjust_rowcount_for_semijoins(), cost_incremental_sort(), cost_memoize_rescan(), create_final_distinct_paths(), create_partial_distinct_paths(), create_unique_path(), estimate_path_cost_size(), get_number_of_groups(), and recurse_set_operations().

◆ estimate_num_groups_incremental()

double estimate_num_groups_incremental ( PlannerInfo root,
List groupExprs,
double  input_rows,
List **  pgset,
EstimationInfo estinfo,
List **  cache_varinfos,
int  prevNExprs 
)

Definition at line 3382 of file selfuncs.c.

3386 {
3387  List *varinfos = (cache_varinfos) ? *cache_varinfos : NIL;
3388  double srf_multiplier = 1.0;
3389  double numdistinct;
3390  ListCell *l;
3391  int i,
3392  j;
3393 
3394  /* Zero the estinfo output parameter, if non-NULL */
3395  if (estinfo != NULL)
3396  memset(estinfo, 0, sizeof(EstimationInfo));
3397 
3398  /*
3399  * We don't ever want to return an estimate of zero groups, as that tends
3400  * to lead to division-by-zero and other unpleasantness. The input_rows
3401  * estimate is usually already at least 1, but clamp it just in case it
3402  * isn't.
3403  */
3404  input_rows = clamp_row_est(input_rows);
3405 
3406  /*
3407  * If no grouping columns, there's exactly one group. (This can't happen
3408  * for normal cases with GROUP BY or DISTINCT, but it is possible for
3409  * corner cases with set operations.)
3410  */
3411  if (groupExprs == NIL || (pgset && list_length(*pgset) < 1))
3412  return 1.0;
3413 
3414  /*
3415  * Count groups derived from boolean grouping expressions. For other
3416  * expressions, find the unique Vars used, treating an expression as a Var
3417  * if we can find stats for it. For each one, record the statistical
3418  * estimate of number of distinct values (total in its table, without
3419  * regard for filtering).
3420  */
3421  numdistinct = 1.0;
3422 
3423  i = j = 0;
3424  foreach(l, groupExprs)
3425  {
3426  Node *groupexpr = (Node *) lfirst(l);
3427  double this_srf_multiplier;
3428  VariableStatData vardata;
3429  List *varshere;
3430  ListCell *l2;
3431 
3432  /* was done on previous call */
3433  if (cache_varinfos && j++ < prevNExprs)
3434  {
3435  if (pgset)
3436  i++; /* to keep in sync with lines below */
3437  continue;
3438  }
3439 
3440  /* is expression in this grouping set? */
3441  if (pgset && !list_member_int(*pgset, i++))
3442  continue;
3443 
3444  /*
3445  * Set-returning functions in grouping columns are a bit problematic.
3446  * The code below will effectively ignore their SRF nature and come up
3447  * with a numdistinct estimate as though they were scalar functions.
3448  * We compensate by scaling up the end result by the largest SRF
3449  * rowcount estimate. (This will be an overestimate if the SRF
3450  * produces multiple copies of any output value, but it seems best to
3451  * assume the SRF's outputs are distinct. In any case, it's probably
3452  * pointless to worry too much about this without much better
3453  * estimates for SRF output rowcounts than we have today.)
3454  */
3455  this_srf_multiplier = expression_returns_set_rows(root, groupexpr);
3456  if (srf_multiplier < this_srf_multiplier)
3457  srf_multiplier = this_srf_multiplier;
3458 
3459  /* Short-circuit for expressions returning boolean */
3460  if (exprType(groupexpr) == BOOLOID)
3461  {
3462  numdistinct *= 2.0;
3463  continue;
3464  }
3465 
3466  /*
3467  * If examine_variable is able to deduce anything about the GROUP BY
3468  * expression, treat it as a single variable even if it's really more
3469  * complicated.
3470  *
3471  * XXX This has the consequence that if there's a statistics object on
3472  * the expression, we don't split it into individual Vars. This
3473  * affects our selection of statistics in
3474  * estimate_multivariate_ndistinct, because it's probably better to
3475  * use more accurate estimate for each expression and treat them as
3476  * independent, than to combine estimates for the extracted variables
3477  * when we don't know how that relates to the expressions.
3478  */
3479  examine_variable(root, groupexpr, 0, &vardata);
3480  if (HeapTupleIsValid(vardata.statsTuple) || vardata.isunique)
3481  {
3482  varinfos = add_unique_group_var(root, varinfos,
3483  groupexpr, &vardata);
3484  ReleaseVariableStats(vardata);
3485  continue;
3486  }
3487  ReleaseVariableStats(vardata);
3488 
3489  /*
3490  * Else pull out the component Vars. Handle PlaceHolderVars by
3491  * recursing into their arguments (effectively assuming that the
3492  * PlaceHolderVar doesn't change the number of groups, which boils
3493  * down to ignoring the possible addition of nulls to the result set).
3494  */
3495  varshere = pull_var_clause(groupexpr,
3499 
3500  /*
3501  * If we find any variable-free GROUP BY item, then either it is a
3502  * constant (and we can ignore it) or it contains a volatile function;
3503  * in the latter case we punt and assume that each input row will
3504  * yield a distinct group.
3505  */
3506  if (varshere == NIL)
3507  {
3508  if (contain_volatile_functions(groupexpr))
3509  {
3510  if (cache_varinfos)
3511  *cache_varinfos = varinfos;
3512  return input_rows;
3513  }
3514  continue;
3515  }
3516 
3517  /*
3518  * Else add variables to varinfos list
3519  */
3520  foreach(l2, varshere)
3521  {
3522  Node *var = (Node *) lfirst(l2);
3523 
3524  examine_variable(root, var, 0, &vardata);
3525  varinfos = add_unique_group_var(root, varinfos, var, &vardata);
3526  ReleaseVariableStats(vardata);
3527  }
3528  }
3529 
3530  if (cache_varinfos)
3531  *cache_varinfos = varinfos;
3532 
3533  /*
3534  * If now no Vars, we must have an all-constant or all-boolean GROUP BY
3535  * list.
3536  */
3537  if (varinfos == NIL)
3538  {
3539  /* Apply SRF multiplier as we would do in the long path */
3540  numdistinct *= srf_multiplier;
3541  /* Round off */
3542  numdistinct = ceil(numdistinct);
3543  /* Guard against out-of-range answers */
3544  if (numdistinct > input_rows)
3545  numdistinct = input_rows;
3546  if (numdistinct < 1.0)
3547  numdistinct = 1.0;
3548  return numdistinct;
3549  }
3550 
3551  /*
3552  * Group Vars by relation and estimate total numdistinct.
3553  *
3554  * For each iteration of the outer loop, we process the frontmost Var in
3555  * varinfos, plus all other Vars in the same relation. We remove these
3556  * Vars from the newvarinfos list for the next iteration. This is the
3557  * easiest way to group Vars of same rel together.
3558  */
3559  do
3560  {
3561  GroupVarInfo *varinfo1 = (GroupVarInfo *) linitial(varinfos);
3562  RelOptInfo *rel = varinfo1->rel;
3563  double reldistinct = 1;
3564  double relmaxndistinct = reldistinct;
3565  int relvarcount = 0;
3566  List *newvarinfos = NIL;
3567  List *relvarinfos = NIL;
3568 
3569  /*
3570  * Split the list of varinfos in two - one for the current rel, one
3571  * for remaining Vars on other rels.
3572  */
3573  relvarinfos = lappend(relvarinfos, varinfo1);
3574  for_each_from(l, varinfos, 1)
3575  {
3576  GroupVarInfo *varinfo2 = (GroupVarInfo *) lfirst(l);
3577 
3578  if (varinfo2->rel == varinfo1->rel)
3579  {
3580  /* varinfos on current rel */
3581  relvarinfos = lappend(relvarinfos, varinfo2);
3582  }
3583  else
3584  {
3585  /* not time to process varinfo2 yet */
3586  newvarinfos = lappend(newvarinfos, varinfo2);
3587  }
3588  }
3589 
3590  /*
3591  * Get the numdistinct estimate for the Vars of this rel. We
3592  * iteratively search for multivariate n-distinct with maximum number
3593  * of vars; assuming that each var group is independent of the others,
3594  * we multiply them together. Any remaining relvarinfos after no more
3595  * multivariate matches are found are assumed independent too, so
3596  * their individual ndistinct estimates are multiplied also.
3597  *
3598  * While iterating, count how many separate numdistinct values we
3599  * apply. We apply a fudge factor below, but only if we multiplied
3600  * more than one such values.
3601  */
3602  while (relvarinfos)
3603  {
3604  double mvndistinct;
3605 
3606  if (estimate_multivariate_ndistinct(root, rel, &relvarinfos,
3607  &mvndistinct))
3608  {
3609  reldistinct *= mvndistinct;
3610  if (relmaxndistinct < mvndistinct)
3611  relmaxndistinct = mvndistinct;
3612  relvarcount++;
3613  }
3614  else
3615  {
3616  foreach(l, relvarinfos)
3617  {
3618  GroupVarInfo *varinfo2 = (GroupVarInfo *) lfirst(l);
3619 
3620  reldistinct *= varinfo2->ndistinct;
3621  if (relmaxndistinct < varinfo2->ndistinct)
3622  relmaxndistinct = varinfo2->ndistinct;
3623  relvarcount++;
3624 
3625  /*
3626  * When varinfo2's isdefault is set then we'd better set
3627  * the SELFLAG_USED_DEFAULT bit in the EstimationInfo.
3628  */
3629  if (estinfo != NULL && varinfo2->isdefault)
3630  estinfo->flags |= SELFLAG_USED_DEFAULT;
3631  }
3632 
3633  /* we're done with this relation */
3634  relvarinfos = NIL;
3635  }
3636  }
3637 
3638  /*
3639  * Sanity check --- don't divide by zero if empty relation.
3640  */
3641  Assert(IS_SIMPLE_REL(rel));
3642  if (rel->tuples > 0)
3643  {
3644  /*
3645  * Clamp to size of rel, or size of rel / 10 if multiple Vars. The
3646  * fudge factor is because the Vars are probably correlated but we
3647  * don't know by how much. We should never clamp to less than the
3648  * largest ndistinct value for any of the Vars, though, since
3649  * there will surely be at least that many groups.
3650  */
3651  double clamp = rel->tuples;
3652 
3653  if (relvarcount > 1)
3654  {
3655  clamp *= 0.1;
3656  if (clamp < relmaxndistinct)
3657  {
3658  clamp = relmaxndistinct;
3659  /* for sanity in case some ndistinct is too large: */
3660  if (clamp > rel->tuples)
3661  clamp = rel->tuples;
3662  }
3663  }
3664  if (reldistinct > clamp)
3665  reldistinct = clamp;
3666 
3667  /*
3668  * Update the estimate based on the restriction selectivity,
3669  * guarding against division by zero when reldistinct is zero.
3670  * Also skip this if we know that we are returning all rows.
3671  */
3672  if (reldistinct > 0 && rel->rows < rel->tuples)
3673  {
3674  /*
3675  * Given a table containing N rows with n distinct values in a
3676  * uniform distribution, if we select p rows at random then
3677  * the expected number of distinct values selected is
3678  *
3679  * n * (1 - product((N-N/n-i)/(N-i), i=0..p-1))
3680  *
3681  * = n * (1 - (N-N/n)! / (N-N/n-p)! * (N-p)! / N!)
3682  *
3683  * See "Approximating block accesses in database
3684  * organizations", S. B. Yao, Communications of the ACM,
3685  * Volume 20 Issue 4, April 1977 Pages 260-261.
3686  *
3687  * Alternatively, re-arranging the terms from the factorials,
3688  * this may be written as
3689  *
3690  * n * (1 - product((N-p-i)/(N-i), i=0..N/n-1))
3691  *
3692  * This form of the formula is more efficient to compute in
3693  * the common case where p is larger than N/n. Additionally,
3694  * as pointed out by Dell'Era, if i << N for all terms in the
3695  * product, it can be approximated by
3696  *
3697  * n * (1 - ((N-p)/N)^(N/n))
3698  *
3699  * See "Expected distinct values when selecting from a bag
3700  * without replacement", Alberto Dell'Era,
3701  * http://www.adellera.it/investigations/distinct_balls/.
3702  *
3703  * The condition i << N is equivalent to n >> 1, so this is a
3704  * good approximation when the number of distinct values in
3705  * the table is large. It turns out that this formula also
3706  * works well even when n is small.
3707  */
3708  reldistinct *=
3709  (1 - pow((rel->tuples - rel->rows) / rel->tuples,
3710  rel->tuples / reldistinct));
3711  }
3712  reldistinct = clamp_row_est(reldistinct);
3713 
3714  /*
3715  * Update estimate of total distinct groups.
3716  */
3717  numdistinct *= reldistinct;
3718  }
3719 
3720  varinfos = newvarinfos;
3721  } while (varinfos != NIL);
3722 
3723  /* Now we can account for the effects of any SRFs */
3724  numdistinct *= srf_multiplier;
3725 
3726  /* Round off */
3727  numdistinct = ceil(numdistinct);
3728 
3729  /* Guard against out-of-range answers */
3730  if (numdistinct > input_rows)
3731  numdistinct = input_rows;
3732  if (numdistinct < 1.0)
3733  numdistinct = 1.0;
3734 
3735  return numdistinct;
3736 }
bool contain_volatile_functions(Node *clause)
Definition: clauses.c:496
double expression_returns_set_rows(PlannerInfo *root, Node *clause)
Definition: clauses.c:292
bool list_member_int(const List *list, int datum)
Definition: list.c:681
Oid exprType(const Node *expr)
Definition: nodeFuncs.c:41
#define PVC_RECURSE_AGGREGATES
Definition: optimizer.h:189
#define PVC_RECURSE_PLACEHOLDERS
Definition: optimizer.h:193
#define PVC_RECURSE_WINDOWFUNCS
Definition: optimizer.h:191
#define IS_SIMPLE_REL(rel)
Definition: pathnodes.h:655
#define for_each_from(cell, lst, N)
Definition: pg_list.h:393
#define linitial(l)
Definition: pg_list.h:174
static bool estimate_multivariate_ndistinct(PlannerInfo *root, RelOptInfo *rel, List **varinfos, double *ndistinct)
Definition: selfuncs.c:3938
static List * add_unique_group_var(PlannerInfo *root, List *varinfos, Node *var, VariableStatData *vardata)
Definition: selfuncs.c:3248
#define SELFLAG_USED_DEFAULT
Definition: selfuncs.h:76
uint32 flags
Definition: selfuncs.h:80
List * pull_var_clause(Node *node, int flags)
Definition: var.c:604

References add_unique_group_var(), Assert(), clamp_row_est(), contain_volatile_functions(), estimate_multivariate_ndistinct(), examine_variable(), expression_returns_set_rows(), exprType(), EstimationInfo::flags, for_each_from, HeapTupleIsValid, i, IS_SIMPLE_REL, GroupVarInfo::isdefault, VariableStatData::isunique, j, lappend(), lfirst, linitial, list_length(), list_member_int(), GroupVarInfo::ndistinct, NIL, pull_var_clause(), PVC_RECURSE_AGGREGATES, PVC_RECURSE_PLACEHOLDERS, PVC_RECURSE_WINDOWFUNCS, GroupVarInfo::rel, ReleaseVariableStats, RelOptInfo::rows, SELFLAG_USED_DEFAULT, VariableStatData::statsTuple, and RelOptInfo::tuples.

Referenced by compute_cpu_sort_cost(), and estimate_num_groups().

◆ examine_simple_variable()

static void examine_simple_variable ( PlannerInfo root,
Var var,
VariableStatData vardata 
)
static

Definition at line 5379 of file selfuncs.c.

5381 {
5382  RangeTblEntry *rte = root->simple_rte_array[var->varno];
5383 
5384  Assert(IsA(rte, RangeTblEntry));
5385 
5387  (*get_relation_stats_hook) (root, rte, var->varattno, vardata))
5388  {
5389  /*
5390  * The hook took control of acquiring a stats tuple. If it did supply
5391  * a tuple, it'd better have supplied a freefunc.
5392  */
5393  if (HeapTupleIsValid(vardata->statsTuple) &&
5394  !vardata->freefunc)
5395  elog(ERROR, "no function provided to release variable stats with");
5396  }
5397  else if (rte->rtekind == RTE_RELATION)
5398  {
5399  /*
5400  * Plain table or parent of an inheritance appendrel, so look up the
5401  * column in pg_statistic
5402  */
5404  ObjectIdGetDatum(rte->relid),
5405  Int16GetDatum(var->varattno),
5406  BoolGetDatum(rte->inh));
5407  vardata->freefunc = ReleaseSysCache;
5408 
5409  if (HeapTupleIsValid(vardata->statsTuple))
5410  {
5411  Oid userid;
5412 
5413  /*
5414  * Check if user has permission to read this column. We require
5415  * all rows to be accessible, so there must be no securityQuals
5416  * from security barrier views or RLS policies. Use checkAsUser
5417  * if it's set, in case we're accessing the table via a view.
5418  */
5419  userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
5420 
5421  vardata->acl_ok =
5422  rte->securityQuals == NIL &&
5423  ((pg_class_aclcheck(rte->relid, userid,
5424  ACL_SELECT) == ACLCHECK_OK) ||
5425  (pg_attribute_aclcheck(rte->relid, var->varattno, userid,
5426  ACL_SELECT) == ACLCHECK_OK));
5427 
5428  /*
5429  * If the user doesn't have permissions to access an inheritance
5430  * child relation or specifically this attribute, check the
5431  * permissions of the table/column actually mentioned in the
5432  * query, since most likely the user does have that permission
5433  * (else the query will fail at runtime), and if the user can read
5434  * the column there then he can get the values of the child table
5435  * too. To do that, we must find out which of the root parent's
5436  * attributes the child relation's attribute corresponds to.
5437  */
5438  if (!vardata->acl_ok && var->varattno > 0 &&
5439  root->append_rel_array != NULL)
5440  {
5441  AppendRelInfo *appinfo;
5442  Index varno = var->varno;
5443  int varattno = var->varattno;
5444  bool found = false;
5445 
5446  appinfo = root->append_rel_array[varno];
5447 
5448  /*
5449  * Partitions are mapped to their immediate parent, not the
5450  * root parent, so must be ready to walk up multiple
5451  * AppendRelInfos. But stop if we hit a parent that is not
5452  * RTE_RELATION --- that's a flattened UNION ALL subquery, not
5453  * an inheritance parent.
5454  */
5455  while (appinfo &&
5456  planner_rt_fetch(appinfo->parent_relid,
5457  root)->rtekind == RTE_RELATION)
5458  {
5459  int parent_varattno;
5460 
5461  found = false;
5462  if (varattno <= 0 || varattno > appinfo->num_child_cols)
5463  break; /* safety check */
5464  parent_varattno = appinfo->parent_colnos[varattno - 1];
5465  if (parent_varattno == 0)
5466  break; /* Var is local to child */
5467 
5468  varno = appinfo->parent_relid;
5469  varattno = parent_varattno;
5470  found = true;
5471 
5472  /* If the parent is itself a child, continue up. */
5473  appinfo = root->append_rel_array[varno];
5474  }
5475 
5476  /*
5477  * In rare cases, the Var may be local to the child table, in
5478  * which case, we've got to live with having no access to this
5479  * column's stats.
5480  */
5481  if (!found)
5482  return;
5483 
5484  /* Repeat the access check on this parent rel & column */
5485  rte = planner_rt_fetch(varno, root);
5486  Assert(rte->rtekind == RTE_RELATION);
5487 
5488  userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
5489 
5490  vardata->acl_ok =
5491  rte->securityQuals == NIL &&
5492  ((pg_class_aclcheck(rte->relid, userid,
5493  ACL_SELECT) == ACLCHECK_OK) ||
5494  (pg_attribute_aclcheck(rte->relid, varattno, userid,
5495  ACL_SELECT) == ACLCHECK_OK));
5496  }
5497  }
5498  else
5499  {
5500  /* suppress any possible leakproofness checks later */
5501  vardata->acl_ok = true;
5502  }
5503  }
5504  else if (rte->rtekind == RTE_SUBQUERY && !rte->inh)
5505  {
5506  /*
5507  * Plain subquery (not one that was converted to an appendrel).
5508  */
5509  Query *subquery = rte->subquery;
5510  RelOptInfo *rel;
5511  TargetEntry *ste;
5512 
5513  /*
5514  * Punt if it's a whole-row var rather than a plain column reference.
5515  */
5516  if (var->varattno == InvalidAttrNumber)
5517  return;
5518 
5519  /*
5520  * Punt if subquery uses set operations or GROUP BY, as these will
5521  * mash underlying columns' stats beyond recognition. (Set ops are
5522  * particularly nasty; if we forged ahead, we would return stats
5523  * relevant to only the leftmost subselect...) DISTINCT is also
5524  * problematic, but we check that later because there is a possibility
5525  * of learning something even with it.
5526  */
5527  if (subquery->setOperations ||
5528  subquery->groupClause ||
5529  subquery->groupingSets)
5530  return;
5531 
5532  /*
5533  * OK, fetch RelOptInfo for subquery. Note that we don't change the
5534  * rel returned in vardata, since caller expects it to be a rel of the
5535  * caller's query level. Because we might already be recursing, we
5536  * can't use that rel pointer either, but have to look up the Var's
5537  * rel afresh.
5538  */
5539  rel = find_base_rel(root, var->varno);
5540 
5541  /* If the subquery hasn't been planned yet, we have to punt */
5542  if (rel->subroot == NULL)
5543  return;
5544  Assert(IsA(rel->subroot, PlannerInfo));
5545 
5546  /*
5547  * Switch our attention to the subquery as mangled by the planner. It
5548  * was okay to look at the pre-planning version for the tests above,
5549  * but now we need a Var that will refer to the subroot's live
5550  * RelOptInfos. For instance, if any subquery pullup happened during
5551  * planning, Vars in the targetlist might have gotten replaced, and we
5552  * need to see the replacement expressions.
5553  */
5554  subquery = rel->subroot->parse;
5555  Assert(IsA(subquery, Query));
5556 
5557  /* Get the subquery output expression referenced by the upper Var */
5558  ste = get_tle_by_resno(subquery->targetList, var->varattno);
5559  if (ste == NULL || ste->resjunk)
5560  elog(ERROR, "subquery %s does not have attribute %d",
5561  rte->eref->aliasname, var->varattno);
5562  var = (Var *) ste->expr;
5563 
5564  /*
5565  * If subquery uses DISTINCT, we can't make use of any stats for the
5566  * variable ... but, if it's the only DISTINCT column, we are entitled
5567  * to consider it unique. We do the test this way so that it works
5568  * for cases involving DISTINCT ON.
5569  */
5570  if (subquery->distinctClause)
5571  {
5572  if (list_length(subquery->distinctClause) == 1 &&
5573  targetIsInSortList(ste, InvalidOid, subquery->distinctClause))
5574  vardata->isunique = true;
5575  /* cannot go further */
5576  return;
5577  }
5578 
5579  /*
5580  * If the sub-query originated from a view with the security_barrier
5581  * attribute, we must not look at the variable's statistics, though it
5582  * seems all right to notice the existence of a DISTINCT clause. So
5583  * stop here.
5584  *
5585  * This is probably a harsher restriction than necessary; it's
5586  * certainly OK for the selectivity estimator (which is a C function,
5587  * and therefore omnipotent anyway) to look at the statistics. But
5588  * many selectivity estimators will happily *invoke the operator
5589  * function* to try to work out a good estimate - and that's not OK.
5590  * So for now, don't dig down for stats.
5591  */
5592  if (rte->security_barrier)
5593  return;
5594 
5595  /* Can only handle a simple Var of subquery's query level */
5596  if (var && IsA(var, Var) &&
5597  var->varlevelsup == 0)
5598  {
5599  /*
5600  * OK, recurse into the subquery. Note that the original setting
5601  * of vardata->isunique (which will surely be false) is left
5602  * unchanged in this situation. That's what we want, since even
5603  * if the underlying column is unique, the subquery may have
5604  * joined to other tables in a way that creates duplicates.
5605  */
5606  examine_simple_variable(rel->subroot, var, vardata);
5607  }
5608  }
5609  else
5610  {
5611  /*
5612  * Otherwise, the Var comes from a FUNCTION, VALUES, or CTE RTE. (We
5613  * won't see RTE_JOIN here because join alias Vars have already been
5614  * flattened.) There's not much we can do with function outputs, but
5615  * maybe someday try to be smarter about VALUES and/or CTEs.
5616  */
5617  }
5618 }
@ ACLCHECK_OK
Definition: acl.h:182
AclResult pg_attribute_aclcheck(Oid table_oid, AttrNumber attnum, Oid roleid, AclMode mode)
Definition: aclchk.c:4878
AclResult pg_class_aclcheck(Oid table_oid, Oid roleid, AclMode mode)
Definition: aclchk.c:5007
#define InvalidAttrNumber
Definition: attnum.h:23
unsigned int Index
Definition: c.h:549
Oid GetUserId(void)
Definition: miscinit.c:492
bool targetIsInSortList(TargetEntry *tle, Oid sortop, List *sortList)
TargetEntry * get_tle_by_resno(List *tlist, AttrNumber resno)
@ RTE_SUBQUERY
Definition: parsenodes.h:999
#define ACL_SELECT
Definition: parsenodes.h:83
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition: relnode.c:375
static void examine_simple_variable(PlannerInfo *root, Var *var, VariableStatData *vardata)
Definition: selfuncs.c:5379
char * aliasname
Definition: primnodes.h:42
Index parent_relid
Definition: pathnodes.h:2326
int num_child_cols
Definition: pathnodes.h:2362
AttrNumber * parent_colnos
Definition: pathnodes.h:2363
struct AppendRelInfo ** append_rel_array
Definition: pathnodes.h:202
RangeTblEntry ** simple_rte_array
Definition: pathnodes.h:194
Query * parse
Definition: pathnodes.h:162
Node * setOperations
Definition: parsenodes.h:182
List * groupClause
Definition: parsenodes.h:163
List * targetList
Definition: parsenodes.h:155
List * groupingSets
Definition: parsenodes.h:166
List * distinctClause
Definition: parsenodes.h:172
bool security_barrier
Definition: parsenodes.h:1051
List * securityQuals
Definition: parsenodes.h:1171
Query * subquery
Definition: parsenodes.h:1050
Alias * eref
Definition: parsenodes.h:1161
PlannerInfo * subroot
Definition: pathnodes.h:726
Expr * expr
Definition: primnodes.h:1716
bool resjunk
Definition: primnodes.h:1723
AttrNumber varattno
Definition: primnodes.h:200
int varno
Definition: primnodes.h:198
Index varlevelsup
Definition: primnodes.h:205

References VariableStatData::acl_ok, ACL_SELECT, ACLCHECK_OK, Alias::aliasname, PlannerInfo::append_rel_array, Assert(), BoolGetDatum, RangeTblEntry::checkAsUser, Query::distinctClause, elog, RangeTblEntry::eref, ERROR, TargetEntry::expr, find_base_rel(), VariableStatData::freefunc, get_relation_stats_hook, get_tle_by_resno(), GetUserId(), Query::groupClause, Query::groupingSets, HeapTupleIsValid, if(), RangeTblEntry::inh, Int16GetDatum, InvalidAttrNumber, InvalidOid, IsA, VariableStatData::isunique, list_length(), NIL, AppendRelInfo::num_child_cols, ObjectIdGetDatum, AppendRelInfo::parent_colnos, AppendRelInfo::parent_relid, PlannerInfo::parse, pg_attribute_aclcheck(), pg_class_aclcheck(), planner_rt_fetch, ReleaseSysCache(), RangeTblEntry::relid, TargetEntry::resjunk, RTE_RELATION, RTE_SUBQUERY, RangeTblEntry::rtekind, SearchSysCache3(), RangeTblEntry::security_barrier, RangeTblEntry::securityQuals, Query::setOperations, PlannerInfo::simple_rte_array, STATRELATTINH, VariableStatData::statsTuple, RangeTblEntry::subquery, RelOptInfo::subroot, targetIsInSortList(), Query::targetList, Var::varattno, Var::varlevelsup, and Var::varno.

Referenced by examine_variable().

◆ examine_variable()

void examine_variable ( PlannerInfo root,
Node node,
int  varRelid,
VariableStatData vardata 
)

Definition at line 4987 of file selfuncs.c.

4989 {
4990  Node *basenode;
4991  Relids varnos;
4992  RelOptInfo *onerel;
4993 
4994  /* Make sure we don't return dangling pointers in vardata */
4995  MemSet(vardata, 0, sizeof(VariableStatData));
4996 
4997  /* Save the exposed type of the expression */
4998  vardata->vartype = exprType(node);
4999 
5000  /* Look inside any binary-compatible relabeling */
5001 
5002  if (IsA(node, RelabelType))
5003  basenode = (Node *) ((RelabelType *) node)->arg;
5004  else
5005  basenode = node;
5006 
5007  /* Fast path for a simple Var */
5008 
5009  if (IsA(basenode, Var) &&
5010  (varRelid == 0 || varRelid == ((Var *) basenode)->varno))
5011  {
5012  Var *var = (Var *) basenode;
5013 
5014  /* Set up result fields other than the stats tuple */
5015  vardata->var = basenode; /* return Var without relabeling */
5016  vardata->rel = find_base_rel(root, var->varno);
5017  vardata->atttype = var->vartype;
5018  vardata->atttypmod = var->vartypmod;
5019  vardata->isunique = has_unique_index(vardata->rel, var->varattno);
5020 
5021  /* Try to locate some stats */
5022  examine_simple_variable(root, var, vardata);
5023 
5024  return;
5025  }
5026 
5027  /*
5028  * Okay, it's a more complicated expression. Determine variable
5029  * membership. Note that when varRelid isn't zero, only vars of that
5030  * relation are considered "real" vars.
5031  */
5032  varnos = pull_varnos(root, basenode);
5033 
5034  onerel = NULL;
5035 
5036  switch (bms_membership(varnos))
5037  {
5038  case BMS_EMPTY_SET:
5039  /* No Vars at all ... must be pseudo-constant clause */
5040  break;
5041  case BMS_SINGLETON:
5042  if (varRelid == 0 || bms_is_member(varRelid, varnos))
5043  {
5044  onerel = find_base_rel(root,
5045  (varRelid ? varRelid : bms_singleton_member(varnos)));
5046  vardata->rel = onerel;
5047  node = basenode; /* strip any relabeling */
5048  }
5049  /* else treat it as a constant */
5050  break;
5051  case BMS_MULTIPLE:
5052  if (varRelid == 0)
5053  {
5054  /* treat it as a variable of a join relation */
5055  vardata->rel = find_join_rel(root, varnos);
5056  node = basenode; /* strip any relabeling */
5057  }
5058  else if (bms_is_member(varRelid, varnos))
5059  {
5060  /* ignore the vars belonging to other relations */
5061  vardata->rel = find_base_rel(root, varRelid);
5062  node = basenode; /* strip any relabeling */
5063  /* note: no point in expressional-index search here */
5064  }
5065  /* else treat it as a constant */
5066  break;
5067  }
5068 
5069  bms_free(varnos);
5070 
5071  vardata->var = node;
5072  vardata->atttype = exprType(node);
5073  vardata->atttypmod = exprTypmod(node);
5074 
5075  if (onerel)
5076  {
5077  /*
5078  * We have an expression in vars of a single relation. Try to match
5079  * it to expressional index columns, in hopes of finding some
5080  * statistics.
5081  *
5082  * Note that we consider all index columns including INCLUDE columns,
5083  * since there could be stats for such columns. But the test for
5084  * uniqueness needs to be warier.
5085  *
5086  * XXX it's conceivable that there are multiple matches with different
5087  * index opfamilies; if so, we need to pick one that matches the
5088  * operator we are estimating for. FIXME later.
5089  */
5090  ListCell *ilist;
5091  ListCell *slist;
5092 
5093  foreach(ilist, onerel->indexlist)
5094  {
5095  IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
5096  ListCell *indexpr_item;
5097  int pos;
5098 
5099  indexpr_item = list_head(index->indexprs);
5100  if (indexpr_item == NULL)
5101  continue; /* no expressions here... */
5102 
5103  for (pos = 0; pos < index->ncolumns; pos++)
5104  {
5105  if (index->indexkeys[pos] == 0)
5106  {
5107  Node *indexkey;
5108 
5109  if (indexpr_item == NULL)
5110  elog(ERROR, "too few entries in indexprs list");
5111  indexkey = (Node *) lfirst(indexpr_item);
5112  if (indexkey && IsA(indexkey, RelabelType))
5113  indexkey = (Node *) ((RelabelType *) indexkey)->arg;
5114  if (equal(node, indexkey))
5115  {
5116  /*
5117  * Found a match ... is it a unique index? Tests here
5118  * should match has_unique_index().
5119  */
5120  if (index->unique &&
5121  index->nkeycolumns == 1 &&
5122  pos == 0 &&
5123  (index->indpred == NIL || index->predOK))
5124  vardata->isunique = true;
5125 
5126  /*
5127  * Has it got stats? We only consider stats for
5128  * non-partial indexes, since partial indexes probably
5129  * don't reflect whole-relation statistics; the above
5130  * check for uniqueness is the only info we take from
5131  * a partial index.
5132  *
5133  * An index stats hook, however, must make its own
5134  * decisions about what to do with partial indexes.
5135  */
5137  (*get_index_stats_hook) (root, index->indexoid,
5138  pos + 1, vardata))
5139  {
5140  /*
5141  * The hook took control of acquiring a stats
5142  * tuple. If it did supply a tuple, it'd better
5143  * have supplied a freefunc.
5144  */
5145  if (HeapTupleIsValid(vardata->statsTuple) &&
5146  !vardata->freefunc)
5147  elog(ERROR, "no function provided to release variable stats with");
5148  }
5149  else if (index->indpred == NIL)
5150  {
5151  vardata->statsTuple =
5153  ObjectIdGetDatum(index->indexoid),
5154  Int16GetDatum(pos + 1),
5155  BoolGetDatum(false));
5156  vardata->freefunc = ReleaseSysCache;
5157 
5158  if (HeapTupleIsValid(vardata->statsTuple))
5159  {
5160  /* Get index's table for permission check */
5161  RangeTblEntry *rte;
5162  Oid userid;
5163 
5164  rte = planner_rt_fetch(index->rel->relid, root);
5165  Assert(rte->rtekind == RTE_RELATION);
5166 
5167  /*
5168  * Use checkAsUser if it's set, in case we're
5169  * accessing the table via a view.
5170  */
5171  userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
5172 
5173  /*
5174  * For simplicity, we insist on the whole
5175  * table being selectable, rather than trying
5176  * to identify which column(s) the index
5177  * depends on. Also require all rows to be
5178  * selectable --- there must be no
5179  * securityQuals from security barrier views
5180  * or RLS policies.
5181  */
5182  vardata->acl_ok =
5183  rte->securityQuals == NIL &&
5184  (pg_class_aclcheck(rte->relid, userid,
5185  ACL_SELECT) == ACLCHECK_OK);
5186 
5187  /*
5188  * If the user doesn't have permissions to
5189  * access an inheritance child relation, check
5190  * the permissions of the table actually
5191  * mentioned in the query, since most likely
5192  * the user does have that permission. Note
5193  * that whole-table select privilege on the
5194  * parent doesn't quite guarantee that the
5195  * user could read all columns of the child.
5196  * But in practice it's unlikely that any
5197  * interesting security violation could result
5198  * from allowing access to the expression
5199  * index's stats, so we allow it anyway. See
5200  * similar code in examine_simple_variable()
5201  * for additional comments.
5202  */
5203  if (!vardata->acl_ok &&
5204  root->append_rel_array != NULL)
5205  {
5206  AppendRelInfo *appinfo;
5207  Index varno = index->rel->relid;
5208 
5209  appinfo = root->append_rel_array[varno];
5210  while (appinfo &&
5211  planner_rt_fetch(appinfo->parent_relid,
5212  root)->rtekind == RTE_RELATION)
5213  {
5214  varno = appinfo->parent_relid;
5215  appinfo = root->append_rel_array[varno];
5216  }
5217  if (varno != index->rel->relid)
5218  {
5219  /* Repeat access check on this rel */
5220  rte = planner_rt_fetch(varno, root);
5221  Assert(rte->rtekind == RTE_RELATION);
5222 
5223  userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
5224 
5225  vardata->acl_ok =
5226  rte->securityQuals == NIL &&
5227  (pg_class_aclcheck(rte->relid,
5228  userid,
5229  ACL_SELECT) == ACLCHECK_OK);
5230  }
5231  }
5232  }
5233  else
5234  {
5235  /* suppress leakproofness checks later */
5236  vardata->acl_ok = true;
5237  }
5238  }
5239  if (vardata->statsTuple)
5240  break;
5241  }
5242  indexpr_item = lnext(index->indexprs, indexpr_item);
5243  }
5244  }
5245  if (vardata->statsTuple)
5246  break;
5247  }
5248 
5249  /*
5250  * Search extended statistics for one with a matching expression.
5251  * There might be multiple ones, so just grab the first one. In the
5252  * future, we might consider the statistics target (and pick the most
5253  * accurate statistics) and maybe some other parameters.
5254  */
5255  foreach(slist, onerel->statlist)
5256  {
5257  StatisticExtInfo *info = (StatisticExtInfo *) lfirst(slist);
5258  RangeTblEntry *rte = planner_rt_fetch(onerel->relid, root);
5259  ListCell *expr_item;
5260  int pos;
5261 
5262  /*
5263  * Stop once we've found statistics for the expression (either
5264  * from extended stats, or for an index in the preceding loop).
5265  */
5266  if (vardata->statsTuple)
5267  break;
5268 
5269  /* skip stats without per-expression stats */
5270  if (info->kind != STATS_EXT_EXPRESSIONS)
5271  continue;
5272 
5273  pos = 0;
5274  foreach(expr_item, info->exprs)
5275  {
5276  Node *expr = (Node *) lfirst(expr_item);
5277 
5278  Assert(expr);
5279 
5280  /* strip RelabelType before comparing it */
5281  if (expr && IsA(expr, RelabelType))
5282  expr = (Node *) ((RelabelType *) expr)->arg;
5283 
5284  /* found a match, see if we can extract pg_statistic row */
5285  if (equal(node, expr))
5286  {
5287  Oid userid;
5288 
5289  /*
5290  * XXX Not sure if we should cache the tuple somewhere.
5291  * Now we just create a new copy every time.
5292  */
5293  vardata->statsTuple =
5294  statext_expressions_load(info->statOid, rte->inh, pos);
5295 
5296  vardata->freefunc = ReleaseDummy;
5297 
5298  /*
5299  * Use checkAsUser if it's set, in case we're accessing
5300  * the table via a view.
5301  */
5302  userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
5303 
5304  /*
5305  * For simplicity, we insist on the whole table being
5306  * selectable, rather than trying to identify which
5307  * column(s) the statistics object depends on. Also
5308  * require all rows to be selectable --- there must be no
5309  * securityQuals from security barrier views or RLS
5310  * policies.
5311  */
5312  vardata->acl_ok =
5313  rte->securityQuals == NIL &&
5314  (pg_class_aclcheck(rte->relid, userid,
5315  ACL_SELECT) == ACLCHECK_OK);
5316 
5317  /*
5318  * If the user doesn't have permissions to access an
5319  * inheritance child relation, check the permissions of
5320  * the table actually mentioned in the query, since most
5321  * likely the user does have that permission. Note that
5322  * whole-table select privilege on the parent doesn't
5323  * quite guarantee that the user could read all columns of
5324  * the child. But in practice it's unlikely that any
5325  * interesting security violation could result from
5326  * allowing access to the expression stats, so we allow it
5327  * anyway. See similar code in examine_simple_variable()
5328  * for additional comments.
5329  */
5330  if (!vardata->acl_ok &&
5331  root->append_rel_array != NULL)
5332  {
5333  AppendRelInfo *appinfo;
5334  Index varno = onerel->relid;
5335 
5336  appinfo = root->append_rel_array[varno];
5337  while (appinfo &&
5338  planner_rt_fetch(appinfo->parent_relid,
5339  root)->rtekind == RTE_RELATION)
5340  {
5341  varno = appinfo->parent_relid;
5342  appinfo = root->append_rel_array[varno];
5343  }
5344  if (varno != onerel->relid)
5345  {
5346  /* Repeat access check on this rel */
5347  rte = planner_rt_fetch(varno, root);
5348  Assert(rte->rtekind == RTE_RELATION);
5349 
5350  userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
5351 
5352  vardata->acl_ok =
5353  rte->securityQuals == NIL &&
5354  (pg_class_aclcheck(rte->relid,
5355  userid,
5356  ACL_SELECT) == ACLCHECK_OK);
5357  }
5358  }
5359 
5360  break;
5361  }
5362 
5363  pos++;
5364  }
5365  }
5366  }
5367 }
int bms_singleton_member(const Bitmapset *a)
Definition: bitmapset.c:579
void bms_free(Bitmapset *a)
Definition: bitmapset.c:208
BMS_Membership bms_membership(const Bitmapset *a)
Definition: bitmapset.c:674
@ BMS_SINGLETON
Definition: bitmapset.h:69
@ BMS_EMPTY_SET
Definition: bitmapset.h:68
@ BMS_MULTIPLE
Definition: bitmapset.h:70
HeapTuple statext_expressions_load(Oid stxoid, bool inh, int idx)
int32 exprTypmod(const Node *expr)
Definition: nodeFuncs.c:286
static ListCell * list_head(const List *l)
Definition: pg_list.h:125
static ListCell * lnext(const List *l, const ListCell *c)
Definition: pg_list.h:322
bool has_unique_index(RelOptInfo *rel, AttrNumber attno)
Definition: plancat.c:2107
RelOptInfo * find_join_rel(PlannerInfo *root, Relids relids)
Definition: relnode.c:439
static void ReleaseDummy(HeapTuple tuple)
Definition: selfuncs.c:4946
List * indexlist
Definition: pathnodes.h:719
Oid vartype
Definition: primnodes.h:202
int32 vartypmod
Definition: primnodes.h:203
int32 atttypmod
Definition: selfuncs.h:94
Relids pull_varnos(PlannerInfo *root, Node *node)
Definition: var.c:100

References VariableStatData::acl_ok, ACL_SELECT, ACLCHECK_OK, PlannerInfo::append_rel_array, arg, Assert(), VariableStatData::atttype, VariableStatData::atttypmod, BMS_EMPTY_SET, bms_free(), bms_is_member(), bms_membership(), BMS_MULTIPLE, BMS_SINGLETON, bms_singleton_member(), BoolGetDatum, RangeTblEntry::checkAsUser, elog, equal(), ERROR, examine_simple_variable(), StatisticExtInfo::exprs, exprType(), exprTypmod(), find_base_rel(), find_join_rel(), VariableStatData::freefunc, get_index_stats_hook, GetUserId(), has_unique_index(), HeapTupleIsValid, if(), RelOptInfo::indexlist, RangeTblEntry::inh, Int16GetDatum, IsA, VariableStatData::isunique, StatisticExtInfo::kind, lfirst, list_head(), lnext(), MemSet, NIL, ObjectIdGetDatum, AppendRelInfo::parent_relid, pg_class_aclcheck(), planner_rt_fetch, pull_varnos(), VariableStatData::rel, ReleaseDummy(), ReleaseSysCache(), RangeTblEntry::relid, RelOptInfo::relid, RTE_RELATION, RangeTblEntry::rtekind, SearchSysCache3(), RangeTblEntry::securityQuals, statext_expressions_load(), RelOptInfo::statlist, StatisticExtInfo::statOid, STATRELATTINH, VariableStatData::statsTuple, VariableStatData::var, Var::varattno, Var::varno, Var::vartype, VariableStatData::vartype, and Var::vartypmod.

Referenced by booltestsel(), boolvarsel(), estimate_hash_bucket_stats(), estimate_num_groups_incremental(), get_join_variables(), get_restriction_variable(), mergejoinscansel(), nulltestsel(), and scalararraysel_containment().

◆ find_join_input_rel()

static RelOptInfo * find_join_input_rel ( PlannerInfo root,
Relids  relids 
)
static

Definition at line 6281 of file selfuncs.c.

6282 {
6283  RelOptInfo *rel = NULL;
6284 
6285  switch (bms_membership(relids))
6286  {
6287  case BMS_EMPTY_SET:
6288  /* should not happen */
6289  break;
6290  case BMS_SINGLETON:
6291  rel = find_base_rel(root, bms_singleton_member(relids));
6292  break;
6293  case BMS_MULTIPLE:
6294  rel = find_join_rel(root, relids);
6295  break;
6296  }
6297 
6298  if (rel == NULL)
6299  elog(ERROR, "could not find RelOptInfo for given relids");
6300 
6301  return rel;
6302 }

References BMS_EMPTY_SET, bms_membership(), BMS_MULTIPLE, BMS_SINGLETON, bms_singleton_member(), elog, ERROR, find_base_rel(), and find_join_rel().

Referenced by eqjoinsel().

◆ generic_restriction_selectivity()

double generic_restriction_selectivity ( PlannerInfo root,
Oid  oproid,
Oid  collation,
List args,
int  varRelid,
double  default_selectivity 
)

Definition at line 911 of file selfuncs.c.

914 {
915  double selec;
916  VariableStatData vardata;
917  Node *other;
918  bool varonleft;
919 
920  /*
921  * If expression is not variable OP something or something OP variable,
922  * then punt and return the default estimate.
923  */
924  if (!get_restriction_variable(root, args, varRelid,
925  &vardata, &other, &varonleft))
926  return default_selectivity;
927 
928  /*
929  * If the something is a NULL constant, assume operator is strict and
930  * return zero, ie, operator will never return TRUE.
931  */
932  if (IsA(other, Const) &&
933  ((Const *) other)->constisnull)
934  {
935  ReleaseVariableStats(vardata);
936  return 0.0;
937  }
938 
939  if (IsA(other, Const))
940  {
941  /* Variable is being compared to a known non-null constant */
942  Datum constval = ((Const *) other)->constvalue;
943  FmgrInfo opproc;
944  double mcvsum;
945  double mcvsel;
946  double nullfrac;
947  int hist_size;
948 
949  fmgr_info(get_opcode(oproid), &opproc);
950 
951  /*
952  * Calculate the selectivity for the column's most common values.
953  */
954  mcvsel = mcv_selectivity(&vardata, &opproc, collation,
955  constval, varonleft,
956  &mcvsum);
957 
958  /*
959  * If the histogram is large enough, see what fraction of it matches
960  * the query, and assume that's representative of the non-MCV
961  * population. Otherwise use the default selectivity for the non-MCV
962  * population.
963  */
964  selec = histogram_selectivity(&vardata, &opproc, collation,
965  constval, varonleft,
966  10, 1, &hist_size);
967  if (selec < 0)
968  {
969  /* Nope, fall back on default */
970  selec = default_selectivity;
971  }
972  else if (hist_size < 100)
973  {
974  /*
975  * For histogram sizes from 10 to 100, we combine the histogram
976  * and default selectivities, putting increasingly more trust in
977  * the histogram for larger sizes.
978  */
979  double hist_weight = hist_size / 100.0;
980 
981  selec = selec * hist_weight +
982  default_selectivity * (1.0 - hist_weight);
983  }
984 
985  /* In any case, don't believe extremely small or large estimates. */
986  if (selec < 0.0001)
987  selec = 0.0001;
988  else if (selec > 0.9999)
989  selec = 0.9999;
990 
991  /* Don't forget to account for nulls. */
992  if (HeapTupleIsValid(vardata.statsTuple))
993  nullfrac = ((Form_pg_statistic) GETSTRUCT(vardata.statsTuple))->stanullfrac;
994  else
995  nullfrac = 0.0;
996 
997  /*
998  * Now merge the results from the MCV and histogram calculations,
999  * realizing that the histogram covers only the non-null values that
1000  * are not listed in MCV.
1001  */
1002  selec *= 1.0 - nullfrac - mcvsum;
1003  selec += mcvsel;
1004  }
1005  else
1006  {
1007  /* Comparison value is not constant, so we can't do anything */
1008  selec = default_selectivity;
1009  }
1010 
1011  ReleaseVariableStats(vardata);
1012 
1013  /* result should be in range, but make sure... */
1014  CLAMP_PROBABILITY(selec);
1015 
1016  return selec;
1017 }
double mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc, Oid collation, Datum constval, bool varonleft, double *sumcommonp)
Definition: selfuncs.c:729
double histogram_selectivity(VariableStatData *vardata, FmgrInfo *opproc, Oid collation, Datum constval, bool varonleft, int min_hist_size, int n_skip, int *hist_size)
Definition: selfuncs.c:820

References generate_unaccent_rules::args, CLAMP_PROBABILITY, fmgr_info(), get_opcode(), get_restriction_variable(), GETSTRUCT, HeapTupleIsValid, histogram_selectivity(), IsA, mcv_selectivity(), ReleaseVariableStats, and VariableStatData::statsTuple.

Referenced by ltreeparentsel(), and matchingsel().

◆ genericcostestimate()

void genericcostestimate ( PlannerInfo root,
IndexPath path,
double  loop_count,
GenericCosts costs 
)

Definition at line 6400 of file selfuncs.c.

6404 {
6405  IndexOptInfo *index = path->indexinfo;
6406  List *indexQuals = get_quals_from_indexclauses(path->indexclauses);
6407  List *indexOrderBys = path->indexorderbys;
6408  Cost indexStartupCost;
6409  Cost indexTotalCost;
6410  Selectivity indexSelectivity;
6411  double indexCorrelation;
6412  double numIndexPages;
6413  double numIndexTuples;
6414  double spc_random_page_cost;
6415  double num_sa_scans;
6416  double num_outer_scans;
6417  double num_scans;
6418  double qual_op_cost;
6419  double qual_arg_cost;
6420  List *selectivityQuals;
6421  ListCell *l;
6422 
6423  /*
6424  * If the index is partial, AND the index predicate with the explicitly
6425  * given indexquals to produce a more accurate idea of the index
6426  * selectivity.
6427  */
6428  selectivityQuals = add_predicate_to_index_quals(index, indexQuals);
6429 
6430  /*
6431  * Check for ScalarArrayOpExpr index quals, and estimate the number of
6432  * index scans that will be performed.
6433  */
6434  num_sa_scans = 1;
6435  foreach(l, indexQuals)
6436  {
6437  RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
6438 
6439  if (IsA(rinfo->clause, ScalarArrayOpExpr))
6440  {
6441  ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) rinfo->clause;
6442  int alength = estimate_array_length(lsecond(saop->args));
6443 
6444  if (alength > 1)
6445  num_sa_scans *= alength;
6446  }
6447  }
6448 
6449  /* Estimate the fraction of main-table tuples that will be visited */
6450  indexSelectivity = clauselist_selectivity(root, selectivityQuals,
6451  index->rel->relid,
6452  JOIN_INNER,
6453  NULL);
6454 
6455  /*
6456  * If caller didn't give us an estimate, estimate the number of index
6457  * tuples that will be visited. We do it in this rather peculiar-looking
6458  * way in order to get the right answer for partial indexes.
6459  */
6460  numIndexTuples = costs->numIndexTuples;
6461  if (numIndexTuples <= 0.0)
6462  {
6463  numIndexTuples = indexSelectivity * index->rel->tuples;
6464 
6465  /*
6466  * The above calculation counts all the tuples visited across all
6467  * scans induced by ScalarArrayOpExpr nodes. We want to consider the
6468  * average per-indexscan number, so adjust. This is a handy place to
6469  * round to integer, too. (If caller supplied tuple estimate, it's
6470  * responsible for handling these considerations.)
6471  */
6472  numIndexTuples = rint(numIndexTuples / num_sa_scans);
6473  }
6474 
6475  /*
6476  * We can bound the number of tuples by the index size in any case. Also,
6477  * always estimate at least one tuple is touched, even when
6478  * indexSelectivity estimate is tiny.
6479  */
6480  if (numIndexTuples > index->tuples)
6481  numIndexTuples = index->tuples;
6482  if (numIndexTuples < 1.0)
6483  numIndexTuples = 1.0;
6484 
6485  /*
6486  * Estimate the number of index pages that will be retrieved.
6487  *
6488  * We use the simplistic method of taking a pro-rata fraction of the total
6489  * number of index pages. In effect, this counts only leaf pages and not
6490  * any overhead such as index metapage or upper tree levels.
6491  *
6492  * In practice access to upper index levels is often nearly free because
6493  * those tend to stay in cache under load; moreover, the cost involved is
6494  * highly dependent on index type. We therefore ignore such costs here
6495  * and leave it to the caller to add a suitable charge if needed.
6496  */
6497  if (index->pages > 1 && index->tuples > 1)
6498  numIndexPages = ceil(numIndexTuples * index->pages / index->tuples);
6499  else
6500  numIndexPages = 1.0;
6501 
6502  /* fetch estimated page cost for tablespace containing index */
6503  get_tablespace_page_costs(index->reltablespace,
6504  &spc_random_page_cost,
6505  NULL);
6506 
6507  /*
6508  * Now compute the disk access costs.
6509  *
6510  * The above calculations are all per-index-scan. However, if we are in a
6511  * nestloop inner scan, we can expect the scan to be repeated (with
6512  * different search keys) for each row of the outer relation. Likewise,
6513  * ScalarArrayOpExpr quals result in multiple index scans. This creates
6514  * the potential for cache effects to reduce the number of disk page
6515  * fetches needed. We want to estimate the average per-scan I/O cost in
6516  * the presence of caching.
6517  *
6518  * We use the Mackert-Lohman formula (see costsize.c for details) to
6519  * estimate the total number of page fetches that occur. While this
6520  * wasn't what it was designed for, it seems a reasonable model anyway.
6521  * Note that we are counting pages not tuples anymore, so we take N = T =
6522  * index size, as if there were one "tuple" per page.
6523  */
6524  num_outer_scans = loop_count;
6525  num_scans = num_sa_scans * num_outer_scans;
6526 
6527  if (num_scans > 1)
6528  {
6529  double pages_fetched;
6530 
6531  /* total page fetches ignoring cache effects */
6532  pages_fetched = numIndexPages * num_scans;
6533 
6534  /* use Mackert and Lohman formula to adjust for cache effects */
6535  pages_fetched = index_pages_fetched(pages_fetched,
6536  index->pages,
6537  (double) index->pages,
6538  root);
6539 
6540  /*
6541  * Now compute the total disk access cost, and then report a pro-rated
6542  * share for each outer scan. (Don't pro-rate for ScalarArrayOpExpr,
6543  * since that's internal to the indexscan.)
6544  */
6545  indexTotalCost = (pages_fetched * spc_random_page_cost)
6546  / num_outer_scans;
6547  }
6548  else
6549  {
6550  /*
6551  * For a single index scan, we just charge spc_random_page_cost per
6552  * page touched.
6553  */
6554  indexTotalCost = numIndexPages * spc_random_page_cost;
6555  }
6556 
6557  /*
6558  * CPU cost: any complex expressions in the indexquals will need to be
6559  * evaluated once at the start of the scan to reduce them to runtime keys
6560  * to pass to the index AM (see nodeIndexscan.c). We model the per-tuple
6561  * CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per
6562  * indexqual operator. Because we have numIndexTuples as a per-scan
6563  * number, we have to multiply by num_sa_scans to get the correct result
6564  * for ScalarArrayOpExpr cases. Similarly add in costs for any index
6565  * ORDER BY expressions.
6566  *
6567  * Note: this neglects the possible costs of rechecking lossy operators.
6568  * Detecting that that might be needed seems more expensive than it's
6569  * worth, though, considering all the other inaccuracies here ...
6570  */
6571  qual_arg_cost = index_other_operands_eval_cost(root, indexQuals) +
6572  index_other_operands_eval_cost(root, indexOrderBys);
6573  qual_op_cost = cpu_operator_cost *
6574  (list_length(indexQuals) + list_length(indexOrderBys));
6575 
6576  indexStartupCost = qual_arg_cost;
6577  indexTotalCost += qual_arg_cost;
6578  indexTotalCost += numIndexTuples * num_sa_scans * (cpu_index_tuple_cost + qual_op_cost);
6579 
6580  /*
6581  * Generic assumption about index correlation: there isn't any.
6582  */
6583  indexCorrelation = 0.0;
6584 
6585  /*
6586  * Return everything to caller.
6587  */
6588  costs->indexStartupCost = indexStartupCost;
6589  costs->indexTotalCost = indexTotalCost;
6590  costs->indexSelectivity = indexSelectivity;
6591  costs->indexCorrelation = indexCorrelation;
6592  costs->numIndexPages = numIndexPages;
6593  costs->numIndexTuples = numIndexTuples;
6594  costs->spc_random_page_cost = spc_random_page_cost;
6595  costs->num_sa_scans = num_sa_scans;
6596 }
double index_pages_fetched(double tuples_fetched, BlockNumber pages, double index_pages, PlannerInfo *root)
Definition: costsize.c:868
double cpu_index_tuple_cost
Definition: costsize.c:123
double spc_random_page_cost
Definition: selfuncs.h:130
List * indexorderbys
Definition: pathnodes.h:1259

References add_predicate_to_index_quals(), ScalarArrayOpExpr::args, RestrictInfo::clause, clauselist_selectivity(), cpu_index_tuple_cost, cpu_operator_cost, estimate_array_length(), get_quals_from_indexclauses(), get_tablespace_page_costs(), index_other_operands_eval_cost(), index_pages_fetched(), IndexPath::indexclauses, GenericCosts::indexCorrelation, IndexPath::indexinfo, IndexPath::indexorderbys, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, IsA, JOIN_INNER, lfirst, list_length(), lsecond, GenericCosts::num_sa_scans, GenericCosts::numIndexPages, GenericCosts::numIndexTuples, and GenericCosts::spc_random_page_cost.

Referenced by blcostestimate(), btcostestimate(), gistcostestimate(), hashcostestimate(), and spgcostestimate().

◆ get_actual_variable_endpoint()

static bool get_actual_variable_endpoint ( Relation  heapRel,
Relation  indexRel,
ScanDirection  indexscandir,
ScanKey  scankeys,
int16  typLen,
bool  typByVal,
TupleTableSlot tableslot,
MemoryContext  outercontext,
Datum endpointDatum 
)
static

Definition at line 6152 of file selfuncs.c.

6161 {
6162  bool have_data = false;
6163  SnapshotData SnapshotNonVacuumable;
6164  IndexScanDesc index_scan;
6165  Buffer vmbuffer = InvalidBuffer;
6166  ItemPointer tid;
6168  bool isnull[INDEX_MAX_KEYS];
6169  MemoryContext oldcontext;
6170 
6171  /*
6172  * We use the index-only-scan machinery for this. With mostly-static
6173  * tables that's a win because it avoids a heap visit. It's also a win
6174  * for dynamic data, but the reason is less obvious; read on for details.
6175  *
6176  * In principle, we should scan the index with our current active
6177  * snapshot, which is the best approximation we've got to what the query
6178  * will see when executed. But that won't be exact if a new snap is taken
6179  * before running the query, and it can be very expensive if a lot of
6180  * recently-dead or uncommitted rows exist at the beginning or end of the
6181  * index (because we'll laboriously fetch each one and reject it).
6182  * Instead, we use SnapshotNonVacuumable. That will accept recently-dead
6183  * and uncommitted rows as well as normal visible rows. On the other
6184  * hand, it will reject known-dead rows, and thus not give a bogus answer
6185  * when the extreme value has been deleted (unless the deletion was quite
6186  * recent); that case motivates not using SnapshotAny here.
6187  *
6188  * A crucial point here is that SnapshotNonVacuumable, with
6189  * GlobalVisTestFor(heapRel) as horizon, yields the inverse of the
6190  * condition that the indexscan will use to decide that index entries are
6191  * killable (see heap_hot_search_buffer()). Therefore, if the snapshot
6192  * rejects a tuple (or more precisely, all tuples of a HOT chain) and we
6193  * have to continue scanning past it, we know that the indexscan will mark
6194  * that index entry killed. That means that the next
6195  * get_actual_variable_endpoint() call will not have to re-consider that
6196  * index entry. In this way we avoid repetitive work when this function
6197  * is used a lot during planning.
6198  *
6199  * But using SnapshotNonVacuumable creates a hazard of its own. In a
6200  * recently-created index, some index entries may point at "broken" HOT
6201  * chains in which not all the tuple versions contain data matching the
6202  * index entry. The live tuple version(s) certainly do match the index,
6203  * but SnapshotNonVacuumable can accept recently-dead tuple versions that
6204  * don't match. Hence, if we took data from the selected heap tuple, we
6205  * might get a bogus answer that's not close to the index extremal value,
6206  * or could even be NULL. We avoid this hazard because we take the data
6207  * from the index entry not the heap.
6208  */
6209  InitNonVacuumableSnapshot(SnapshotNonVacuumable,
6210  GlobalVisTestFor(heapRel));
6211 
6212  index_scan = index_beginscan(heapRel, indexRel,
6213  &SnapshotNonVacuumable,
6214  1, 0);
6215  /* Set it up for index-only scan */
6216  index_scan->xs_want_itup = true;
6217  index_rescan(index_scan, scankeys, 1, NULL, 0);
6218 
6219  /* Fetch first/next tuple in specified direction */
6220  while ((tid = index_getnext_tid(index_scan, indexscandir)) != NULL)
6221  {
6222  if (!VM_ALL_VISIBLE(heapRel,
6224  &vmbuffer))
6225  {
6226  /* Rats, we have to visit the heap to check visibility */
6227  if (!index_fetch_heap(index_scan, tableslot))
6228  continue; /* no visible tuple, try next index entry */
6229 
6230  /* We don't actually need the heap tuple for anything */
6231  ExecClearTuple(tableslot);
6232 
6233  /*
6234  * We don't care whether there's more than one visible tuple in
6235  * the HOT chain; if any are visible, that's good enough.
6236  */
6237  }
6238 
6239  /*
6240  * We expect that btree will return data in IndexTuple not HeapTuple
6241  * format. It's not lossy either.
6242  */
6243  if (!index_scan->xs_itup)
6244  elog(ERROR, "no data returned for index-only scan");
6245  if (index_scan->xs_recheck)
6246  elog(ERROR, "unexpected recheck indication from btree");
6247 
6248  /* OK to deconstruct the index tuple */
6249  index_deform_tuple(index_scan->xs_itup,
6250  index_scan->xs_itupdesc,
6251  values, isnull);
6252 
6253  /* Shouldn't have got a null, but be careful */
6254  if (isnull[0])
6255  elog(ERROR, "found unexpected null value in index \"%s\"",
6256  RelationGetRelationName(indexRel));
6257 
6258  /* Copy the index column value out to caller's context */
6259  oldcontext = MemoryContextSwitchTo(outercontext);
6260  *endpointDatum = datumCopy(values[0], typByVal, typLen);
6261  MemoryContextSwitchTo(oldcontext);
6262  have_data = true;
6263  break;
6264  }
6265 
6266  if (vmbuffer != InvalidBuffer)
6267  ReleaseBuffer(vmbuffer);
6268  index_endscan(index_scan);
6269 
6270  return have_data;
6271 }
static Datum values[MAXATTR]
Definition: bootstrap.c:156
int Buffer
Definition: buf.h:23
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3915
Datum datumCopy(Datum value, bool typByVal, int typLen)
Definition: datum.c:132
ItemPointer index_getnext_tid(IndexScanDesc scan, ScanDirection direction)
Definition: indexam.c:517
IndexScanDesc index_beginscan(Relation heapRelation, Relation indexRelation, Snapshot snapshot, int nkeys, int norderbys)
Definition: indexam.c:205
bool index_fetch_heap(IndexScanDesc scan, TupleTableSlot *slot)
Definition: indexam.c:575
void index_endscan(IndexScanDesc scan)
Definition: indexam.c:323
void index_rescan(IndexScanDesc scan, ScanKey keys, int nkeys, ScanKey orderbys, int norderbys)
Definition: indexam.c:297
void index_deform_tuple(IndexTuple tup, TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: indextuple.c:437
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
#define INDEX_MAX_KEYS
GlobalVisState * GlobalVisTestFor(Relation rel)
Definition: procarray.c:4066
#define RelationGetRelationName(relation)
Definition: rel.h:523
#define InitNonVacuumableSnapshot(snapshotdata, vistestp)
Definition: snapmgr.h:82
IndexTuple xs_itup
Definition: relscan.h:142
struct TupleDescData * xs_itupdesc
Definition: relscan.h:143
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:425
#define VM_ALL_VISIBLE(r, b, v)
Definition: visibilitymap.h:24

References datumCopy(), elog, ERROR, ExecClearTuple(), GlobalVisTestFor(), index_beginscan(), index_deform_tuple(), index_endscan(), index_fetch_heap(), index_getnext_tid(), INDEX_MAX_KEYS, index_rescan(), InitNonVacuumableSnapshot, InvalidBuffer, ItemPointerGetBlockNumber, MemoryContextSwitchTo(), RelationGetRelationName, ReleaseBuffer(), values, VM_ALL_VISIBLE, IndexScanDescData::xs_itup, IndexScanDescData::xs_itupdesc, IndexScanDescData::xs_recheck, and IndexScanDescData::xs_want_itup.

Referenced by get_actual_variable_range().

◆ get_actual_variable_range()

static bool get_actual_variable_range ( PlannerInfo root,
VariableStatData vardata,
Oid  sortop,
Oid  collation,
Datum min,
Datum max 
)
static

Definition at line 5979 of file selfuncs.c.

5982 {
5983  bool have_data = false;
5984  RelOptInfo *rel = vardata->rel;
5985  RangeTblEntry *rte;
5986  ListCell *lc;
5987 
5988  /* No hope if no relation or it doesn't have indexes */
5989  if (rel == NULL || rel->indexlist == NIL)
5990  return false;
5991  /* If it has indexes it must be a plain relation */
5992  rte = root->simple_rte_array[rel->relid];
5993  Assert(rte->rtekind == RTE_RELATION);
5994 
5995  /* Search through the indexes to see if any match our problem */
5996  foreach(lc, rel->indexlist)
5997  {
5999  ScanDirection indexscandir;
6000 
6001  /* Ignore non-btree indexes */
6002  if (index->relam != BTREE_AM_OID)
6003  continue;
6004 
6005  /*
6006  * Ignore partial indexes --- we only want stats that cover the entire
6007  * relation.
6008  */
6009  if (index->indpred != NIL)
6010  continue;
6011 
6012  /*
6013  * The index list might include hypothetical indexes inserted by a
6014  * get_relation_info hook --- don't try to access them.
6015  */
6016  if (index->hypothetical)
6017  continue;
6018 
6019  /*
6020  * The first index column must match the desired variable, sortop, and
6021  * collation --- but we can use a descending-order index.
6022  */
6023  if (collation != index->indexcollations[0])
6024  continue; /* test first 'cause it's cheapest */
6025  if (!match_index_to_operand(vardata->var, 0, index))
6026  continue;
6027  switch (get_op_opfamily_strategy(sortop, index->sortopfamily[0]))
6028  {
6029  case BTLessStrategyNumber:
6030  if (index->reverse_sort[0])
6031  indexscandir = BackwardScanDirection;
6032  else
6033  indexscandir = ForwardScanDirection;
6034  break;
6036  if (index->reverse_sort[0])
6037  indexscandir = ForwardScanDirection;
6038  else
6039  indexscandir = BackwardScanDirection;
6040  break;
6041  default:
6042  /* index doesn't match the sortop */
6043  continue;
6044  }
6045 
6046  /*
6047  * Found a suitable index to extract data from. Set up some data that
6048  * can be used by both invocations of get_actual_variable_endpoint.
6049  */
6050  {
6051  MemoryContext tmpcontext;
6052  MemoryContext oldcontext;
6053  Relation heapRel;
6054  Relation indexRel;
6055  TupleTableSlot *slot;
6056  int16 typLen;
6057  bool typByVal;
6058  ScanKeyData scankeys[1];
6059 
6060  /* Make sure any cruft gets recycled when we're done */
6062  "get_actual_variable_range workspace",
6064  oldcontext = MemoryContextSwitchTo(tmpcontext);
6065 
6066  /*
6067  * Open the table and index so we can read from them. We should
6068  * already have some type of lock on each.
6069  */
6070  heapRel = table_open(rte->relid, NoLock);
6071  indexRel = index_open(index->indexoid, NoLock);
6072 
6073  /* build some stuff needed for indexscan execution */
6074  slot = table_slot_create(heapRel, NULL);
6075  get_typlenbyval(vardata->atttype, &typLen, &typByVal);
6076 
6077  /* set up an IS NOT NULL scan key so that we ignore nulls */
6078  ScanKeyEntryInitialize(&scankeys[0],
6080  1, /* index col to scan */
6081  InvalidStrategy, /* no strategy */
6082  InvalidOid, /* no strategy subtype */
6083  InvalidOid, /* no collation */
6084  InvalidOid, /* no reg proc for this */
6085  (Datum) 0); /* constant */
6086 
6087  /* If min is requested ... */
6088  if (min)
6089  {
6090  have_data = get_actual_variable_endpoint(heapRel,
6091  indexRel,
6092  indexscandir,
6093  scankeys,
6094  typLen,
6095  typByVal,
6096  slot,
6097  oldcontext,
6098  min);
6099  }
6100  else
6101  {
6102  /* If min not requested, assume index is nonempty */
6103  have_data = true;
6104  }
6105 
6106  /* If max is requested, and we didn't find the index is empty */
6107  if (max && have_data)
6108  {
6109  /* scan in the opposite direction; all else is the same */
6110  have_data = get_actual_variable_endpoint(heapRel,
6111  indexRel,
6112  -indexscandir,
6113  scankeys,
6114  typLen,
6115  typByVal,
6116  slot,
6117  oldcontext,
6118  max);
6119  }
6120 
6121  /* Clean everything up */
6123 
6124  index_close(indexRel, NoLock);
6125  table_close(heapRel, NoLock);
6126 
6127  MemoryContextSwitchTo(oldcontext);
6128  MemoryContextDelete(tmpcontext);
6129 
6130  /* And we're done */
6131  break;
6132  }
6133  }
6134 
6135  return have_data;
6136 }
signed short int16
Definition: c.h:428
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
Definition: execTuples.c:1254
bool match_index_to_operand(Node *operand, int indexcol, IndexOptInfo *index)
Definition: indxpath.c:3720
void get_typlenbyval(Oid typid, int16 *typlen, bool *typbyval)
Definition: lsyscache.c:2208
MemoryContext CurrentMemoryContext
Definition: mcxt.c:42
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:218
#define AllocSetContextCreate
Definition: memutils.h:173
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:197
void ScanKeyEntryInitialize(ScanKey entry, int flags, AttrNumber attributeNumber, StrategyNumber strategy, Oid subtype, Oid collation, RegProcedure procedure, Datum argument)
Definition: scankey.c:32
ScanDirection
Definition: sdir.h:23
@ BackwardScanDirection
Definition: sdir.h:24
@ ForwardScanDirection
Definition: sdir.h:26
static bool get_actual_variable_endpoint(Relation heapRel, Relation indexRel, ScanDirection indexscandir, ScanKey scankeys, int16 typLen, bool typByVal, TupleTableSlot *tableslot, MemoryContext outercontext, Datum *endpointDatum)
Definition: selfuncs.c:6152
#define SK_SEARCHNOTNULL
Definition: skey.h:122
#define SK_ISNULL
Definition: skey.h:115
#define BTGreaterStrategyNumber
Definition: stratnum.h:33
#define InvalidStrategy
Definition: stratnum.h:24
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:167
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:39
TupleTableSlot * table_slot_create(Relation relation, List **reglist)
Definition: tableam.c:91

References ALLOCSET_DEFAULT_SIZES, AllocSetContextCreate, Assert(), VariableStatData::atttype, BackwardScanDirection, BTGreaterStrategyNumber, BTLessStrategyNumber, CurrentMemoryContext, ExecDropSingleTupleTableSlot(), ForwardScanDirection, get_actual_variable_endpoint(), get_op_opfamily_strategy(), get_typlenbyval(), index_close(), index_open(), RelOptInfo::indexlist, InvalidOid, InvalidStrategy, lfirst, match_index_to_operand(), MemoryContextDelete(), MemoryContextSwitchTo(), NIL, NoLock, VariableStatData::rel, RangeTblEntry::relid, RelOptInfo::relid, RTE_RELATION, RangeTblEntry::rtekind, ScanKeyEntryInitialize(), PlannerInfo::simple_rte_array, SK_ISNULL, SK_SEARCHNOTNULL, table_close(), table_open(), table_slot_create(), and VariableStatData::var.

Referenced by get_variable_range(), and ineq_histogram_selectivity().

◆ get_join_variables()

void get_join_variables ( PlannerInfo root,
List args,
SpecialJoinInfo sjinfo,
VariableStatData vardata1,
VariableStatData vardata2,
bool join_is_reversed 
)

Definition at line 4918 of file selfuncs.c.

4921 {
4922  Node *left,
4923  *right;
4924 
4925  if (list_length(args) != 2)
4926  elog(ERROR, "join operator should take two arguments");
4927 
4928  left = (Node *) linitial(args);
4929  right = (Node *) lsecond(args);
4930 
4931  examine_variable(root, left, 0, vardata1);
4932  examine_variable(root, right, 0, vardata2);
4933 
4934  if (vardata1->rel &&
4935  bms_is_subset(vardata1->rel->relids, sjinfo->syn_righthand))
4936  *join_is_reversed = true; /* var1 is on RHS */
4937  else if (vardata2->rel &&
4938  bms_is_subset(vardata2->rel->relids, sjinfo->syn_lefthand))
4939  *join_is_reversed = true; /* var2 is on LHS */
4940  else
4941  *join_is_reversed = false;
4942 }
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:315
Relids relids
Definition: pathnodes.h:682
Relids syn_lefthand
Definition: pathnodes.h:2274
Relids syn_righthand
Definition: pathnodes.h:2275

References generate_unaccent_rules::args, bms_is_subset(), elog, ERROR, examine_variable(), linitial, list_length(), lsecond, VariableStatData::rel, RelOptInfo::relids, SpecialJoinInfo::syn_lefthand, and SpecialJoinInfo::syn_righthand.

Referenced by eqjoinsel(), neqjoinsel(), and networkjoinsel().

◆ get_quals_from_indexclauses()

List* get_quals_from_indexclauses ( List indexclauses)

Definition at line 6316 of file selfuncs.c.

6317 {
6318  List *result = NIL;
6319  ListCell *lc;
6320 
6321  foreach(lc, indexclauses)
6322  {
6323  IndexClause *iclause = lfirst_node(IndexClause, lc);
6324  ListCell *lc2;
6325 
6326  foreach(lc2, iclause->indexquals)
6327  {
6328  RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc2);
6329 
6330  result = lappend(result, rinfo);
6331  }
6332  }
6333  return result;
6334 }

References IndexClause::indexquals, lappend(), lfirst_node, and NIL.

Referenced by brincostestimate(), genericcostestimate(), and gincostestimate().

◆ get_restriction_variable()

bool get_restriction_variable ( PlannerInfo root,
List args,
int  varRelid,
VariableStatData vardata,
Node **  other,
bool varonleft 
)

Definition at line 4858 of file selfuncs.c.

4861 {
4862  Node *left,
4863  *right;
4864  VariableStatData rdata;
4865 
4866  /* Fail if not a binary opclause (probably shouldn't happen) */
4867  if (list_length(args) != 2)
4868  return false;
4869 
4870  left = (Node *) linitial(args);
4871  right = (Node *) lsecond(args);
4872 
4873  /*
4874  * Examine both sides. Note that when varRelid is nonzero, Vars of other
4875  * relations will be treated as pseudoconstants.
4876  */
4877  examine_variable(root, left, varRelid, vardata);
4878  examine_variable(root, right, varRelid, &rdata);
4879 
4880  /*
4881  * If one side is a variable and the other not, we win.
4882  */
4883  if (vardata->rel && rdata.rel == NULL)
4884  {
4885  *varonleft = true;
4886  *other = estimate_expression_value(root, rdata.var);
4887  /* Assume we need no ReleaseVariableStats(rdata) here */
4888  return true;
4889  }
4890 
4891  if (vardata->rel == NULL && rdata.rel)
4892  {
4893  *varonleft = false;
4894  *other = estimate_expression_value(root, vardata->var);
4895  /* Assume we need no ReleaseVariableStats(*vardata) here */
4896  *vardata = rdata;
4897  return true;
4898  }
4899 
4900  /* Oops, clause has wrong structure (probably var op var) */
4901  ReleaseVariableStats(*vardata);
4902  ReleaseVariableStats(rdata);
4903 
4904  return false;
4905 }
Node * estimate_expression_value(PlannerInfo *root, Node *node)
Definition: clauses.c:2290

References generate_unaccent_rules::args, estimate_expression_value(), examine_variable(), linitial, list_length(), lsecond, VariableStatData::rel, ReleaseVariableStats, and VariableStatData::var.

Referenced by _int_matchsel(), arraycontsel(), eqsel_internal(), generic_restriction_selectivity(), multirangesel(), networksel(), patternsel_common(), rangesel(), scalarineqsel_wrapper(), and tsmatchsel().

◆ get_stats_slot_range()

static void get_stats_slot_range ( AttStatsSlot sslot,
Oid  opfuncoid,
FmgrInfo opproc,
Oid  collation,
int16  typLen,
bool  typByVal,
Datum min,
Datum max,
bool p_have_data 
)
static

Definition at line 5916 of file selfuncs.c.

5919 {
5920  Datum tmin = *min;
5921  Datum tmax = *max;
5922  bool have_data = *p_have_data;
5923  bool found_tmin = false;
5924  bool found_tmax = false;
5925 
5926  /* Look up the comparison function, if we didn't already do so */
5927  if (opproc->fn_oid != opfuncoid)
5928  fmgr_info(opfuncoid, opproc);
5929 
5930  /* Scan all the slot's values */
5931  for (int i = 0; i < sslot->nvalues; i++)
5932  {
5933  if (!have_data)
5934  {
5935  tmin = tmax = sslot->values[i];
5936  found_tmin = found_tmax = true;
5937  *p_have_data = have_data = true;
5938  continue;
5939  }
5940  if (DatumGetBool(FunctionCall2Coll(opproc,
5941  collation,
5942  sslot->values[i], tmin)))
5943  {
5944  tmin = sslot->values[i];
5945  found_tmin = true;
5946  }
5947  if (DatumGetBool(FunctionCall2Coll(opproc,
5948  collation,
5949  tmax, sslot->values[i])))
5950  {
5951  tmax = sslot->values[i];
5952  found_tmax = true;
5953  }
5954  }
5955 
5956  /*
5957  * Copy the slot's values, if we found new extreme values.
5958  */
5959  if (found_tmin)
5960  *min = datumCopy(tmin, typByVal, typLen);
5961  if (found_tmax)
5962  *max = datumCopy(tmax, typByVal, typLen);
5963 }
Datum FunctionCall2Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2)
Definition: fmgr.c:1134
Oid fn_oid
Definition: fmgr.h:59

References datumCopy(), DatumGetBool, fmgr_info(), FmgrInfo::fn_oid, FunctionCall2Coll(), i, AttStatsSlot::nvalues, and AttStatsSlot::values.

Referenced by get_variable_range().

◆ get_variable_numdistinct()

double get_variable_numdistinct ( VariableStatData vardata,
bool isdefault 
)

Definition at line 5656 of file selfuncs.c.

5657 {
5658  double stadistinct;
5659  double stanullfrac = 0.0;
5660  double ntuples;
5661 
5662  *isdefault = false;
5663 
5664  /*
5665  * Determine the stadistinct value to use. There are cases where we can
5666  * get an estimate even without a pg_statistic entry, or can get a better
5667  * value than is in pg_statistic. Grab stanullfrac too if we can find it
5668  * (otherwise, assume no nulls, for lack of any better idea).
5669  */
5670  if (HeapTupleIsValid(vardata->statsTuple))
5671  {
5672  /* Use the pg_statistic entry */
5673  Form_pg_statistic stats;
5674 
5675  stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
5676  stadistinct = stats->stadistinct;
5677  stanullfrac = stats->stanullfrac;
5678  }
5679  else if (vardata->vartype == BOOLOID)
5680  {
5681  /*
5682  * Special-case boolean columns: presumably, two distinct values.
5683  *
5684  * Are there any other datatypes we should wire in special estimates
5685  * for?
5686  */
5687  stadistinct = 2.0;
5688  }
5689  else if (vardata->rel && vardata->rel->rtekind == RTE_VALUES)
5690  {
5691  /*
5692  * If the Var represents a column of a VALUES RTE, assume it's unique.
5693  * This could of course be very wrong, but it should tend to be true
5694  * in well-written queries. We could consider examining the VALUES'
5695  * contents to get some real statistics; but that only works if the
5696  * entries are all constants, and it would be pretty expensive anyway.
5697  */
5698  stadistinct = -1.0; /* unique (and all non null) */
5699  }
5700  else
5701  {
5702  /*
5703  * We don't keep statistics for system columns, but in some cases we
5704  * can infer distinctness anyway.
5705  */
5706  if (vardata->var && IsA(vardata->var, Var))
5707  {
5708  switch (((Var *) vardata->var)->varattno)
5709  {
5711  stadistinct = -1.0; /* unique (and all non null) */
5712  break;
5714  stadistinct = 1.0; /* only 1 value */
5715  break;
5716  default:
5717  stadistinct = 0.0; /* means "unknown" */
5718  break;
5719  }
5720  }
5721  else
5722  stadistinct = 0.0; /* means "unknown" */
5723 
5724  /*
5725  * XXX consider using estimate_num_groups on expressions?
5726  */
5727  }
5728 
5729  /*
5730  * If there is a unique index or DISTINCT clause for the variable, assume
5731  * it is unique no matter what pg_statistic says; the statistics could be
5732  * out of date, or we might have found a partial unique index that proves
5733  * the var is unique for this query. However, we'd better still believe
5734  * the null-fraction statistic.
5735  */
5736  if (vardata->isunique)
5737  stadistinct = -1.0 * (1.0 - stanullfrac);
5738 
5739  /*
5740  * If we had an absolute estimate, use that.
5741  */
5742  if (stadistinct > 0.0)
5743  return clamp_row_est(stadistinct);
5744 
5745  /*
5746  * Otherwise we need to get the relation size; punt if not available.
5747  */
5748  if (vardata->rel == NULL)
5749  {
5750  *isdefault = true;
5751  return DEFAULT_NUM_DISTINCT;
5752  }
5753  ntuples = vardata->rel->tuples;
5754  if (ntuples <= 0.0)
5755  {
5756  *isdefault = true;
5757  return DEFAULT_NUM_DISTINCT;
5758  }
5759 
5760  /*
5761  * If we had a relative estimate, use that.
5762  */
5763  if (stadistinct < 0.0)
5764  return clamp_row_est(-stadistinct * ntuples);
5765 
5766  /*
5767  * With no data, estimate ndistinct = ntuples if the table is small, else
5768  * use default. We use DEFAULT_NUM_DISTINCT as the cutoff for "small" so
5769  * that the behavior isn't discontinuous.
5770  */
5771  if (ntuples < DEFAULT_NUM_DISTINCT)
5772  return clamp_row_est(ntuples);
5773 
5774  *isdefault = true;
5775  return DEFAULT_NUM_DISTINCT;
5776 }
@ RTE_VALUES
Definition: parsenodes.h:1003
#define DEFAULT_NUM_DISTINCT
Definition: selfuncs.h:52
RTEKind rtekind
Definition: pathnodes.h:712
#define TableOidAttributeNumber
Definition: sysattr.h:26
#define SelfItemPointerAttributeNumber
Definition: sysattr.h:21

References clamp_row_est(), DEFAULT_NUM_DISTINCT, GETSTRUCT, HeapTupleIsValid, IsA, VariableStatData::isunique, VariableStatData::rel, RTE_VALUES, RelOptInfo::rtekind, SelfItemPointerAttributeNumber, VariableStatData::statsTuple, TableOidAttributeNumber, RelOptInfo::tuples, VariableStatData::var, and VariableStatData::vartype.

Referenced by add_unique_group_var(), eqjoinsel(), estimate_hash_bucket_stats(), ineq_histogram_selectivity(), var_eq_const(), and var_eq_non_const().

◆ get_variable_range()

static bool get_variable_range ( PlannerInfo root,
VariableStatData vardata,
Oid  sortop,
Oid  collation,
Datum min,
Datum max 
)
static

Definition at line 5789 of file selfuncs.c.

5792 {
5793  Datum tmin = 0;
5794  Datum tmax = 0;
5795  bool have_data = false;
5796  int16 typLen;
5797  bool typByVal;
5798  Oid opfuncoid;
5799  FmgrInfo opproc;
5800  AttStatsSlot sslot;
5801 
5802  /*
5803  * XXX It's very tempting to try to use the actual column min and max, if
5804  * we can get them relatively-cheaply with an index probe. However, since
5805  * this function is called many times during join planning, that could
5806  * have unpleasant effects on planning speed. Need more investigation
5807  * before enabling this.
5808  */
5809 #ifdef NOT_USED
5810  if (get_actual_variable_range(root, vardata, sortop, collation, min, max))
5811  return true;
5812 #endif
5813 
5814  if (!HeapTupleIsValid(vardata->statsTuple))
5815  {
5816  /* no stats available, so default result */
5817  return false;
5818  }
5819 
5820  /*
5821  * If we can't apply the sortop to the stats data, just fail. In
5822  * principle, if there's a histogram and no MCVs, we could return the
5823  * histogram endpoints without ever applying the sortop ... but it's
5824  * probably not worth trying, because whatever the caller wants to do with
5825  * the endpoints would likely fail the security check too.
5826  */
5827  if (!statistic_proc_security_check(vardata,
5828  (opfuncoid = get_opcode(sortop))))
5829  return false;
5830 
5831  opproc.fn_oid = InvalidOid; /* mark this as not looked up yet */
5832 
5833  get_typlenbyval(vardata->atttype, &typLen, &typByVal);
5834 
5835  /*
5836  * If there is a histogram with the ordering we want, grab the first and
5837  * last values.
5838  */
5839  if (get_attstatsslot(&sslot, vardata->statsTuple,
5840  STATISTIC_KIND_HISTOGRAM, sortop,
5842  {
5843  if (sslot.stacoll == collation && sslot.nvalues > 0)
5844  {
5845  tmin = datumCopy(sslot.values[0], typByVal, typLen);
5846