PostgreSQL Source Code  git master
selfuncs.c File Reference
#include "postgres.h"
#include <ctype.h>
#include <math.h>
#include "access/brin.h"
#include "access/brin_page.h"
#include "access/gin.h"
#include "access/table.h"
#include "access/tableam.h"
#include "access/visibilitymap.h"
#include "catalog/pg_am.h"
#include "catalog/pg_collation.h"
#include "catalog/pg_operator.h"
#include "catalog/pg_statistic.h"
#include "catalog/pg_statistic_ext.h"
#include "executor/nodeAgg.h"
#include "miscadmin.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
#include "optimizer/clauses.h"
#include "optimizer/cost.h"
#include "optimizer/optimizer.h"
#include "optimizer/pathnode.h"
#include "optimizer/paths.h"
#include "optimizer/plancat.h"
#include "parser/parse_clause.h"
#include "parser/parsetree.h"
#include "statistics/statistics.h"
#include "storage/bufmgr.h"
#include "utils/acl.h"
#include "utils/array.h"
#include "utils/builtins.h"
#include "utils/date.h"
#include "utils/datum.h"
#include "utils/fmgroids.h"
#include "utils/index_selfuncs.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/pg_locale.h"
#include "utils/rel.h"
#include "utils/selfuncs.h"
#include "utils/snapmgr.h"
#include "utils/spccache.h"
#include "utils/syscache.h"
#include "utils/timestamp.h"
#include "utils/typcache.h"
Include dependency graph for selfuncs.c:

Go to the source code of this file.

Data Structures

struct  GroupVarInfo
 
struct  GinQualCounts
 

Macros

#define VISITED_PAGES_LIMIT   100
 

Functions

static double eqsel_internal (PG_FUNCTION_ARGS, bool negate)
 
static double eqjoinsel_inner (Oid opfuncoid, Oid collation, VariableStatData *vardata1, VariableStatData *vardata2, double nd1, double nd2, bool isdefault1, bool isdefault2, AttStatsSlot *sslot1, AttStatsSlot *sslot2, Form_pg_statistic stats1, Form_pg_statistic stats2, bool have_mcvs1, bool have_mcvs2)
 
static double eqjoinsel_semi (Oid opfuncoid, Oid collation, VariableStatData *vardata1, VariableStatData *vardata2, double nd1, double nd2, bool isdefault1, bool isdefault2, AttStatsSlot *sslot1, AttStatsSlot *sslot2, Form_pg_statistic stats1, Form_pg_statistic stats2, bool have_mcvs1, bool have_mcvs2, RelOptInfo *inner_rel)
 
static bool estimate_multivariate_ndistinct (PlannerInfo *root, RelOptInfo *rel, List **varinfos, double *ndistinct)
 
static bool convert_to_scalar (Datum value, Oid valuetypid, Oid collid, double *scaledvalue, Datum lobound, Datum hibound, Oid boundstypid, double *scaledlobound, double *scaledhibound)
 
static double convert_numeric_to_scalar (Datum value, Oid typid, bool *failure)
 
static void convert_string_to_scalar (char *value, double *scaledvalue, char *lobound, double *scaledlobound, char *hibound, double *scaledhibound)
 
static void convert_bytea_to_scalar (Datum value, double *scaledvalue, Datum lobound, double *scaledlobound, Datum hibound, double *scaledhibound)
 
static double convert_one_string_to_scalar (char *value, int rangelo, int rangehi)
 
static double convert_one_bytea_to_scalar (unsigned char *value, int valuelen, int rangelo, int rangehi)
 
static char * convert_string_datum (Datum value, Oid typid, Oid collid, bool *failure)
 
static double convert_timevalue_to_scalar (Datum value, Oid typid, bool *failure)
 
static void examine_simple_variable (PlannerInfo *root, Var *var, VariableStatData *vardata)
 
static bool get_variable_range (PlannerInfo *root, VariableStatData *vardata, Oid sortop, Oid collation, Datum *min, Datum *max)
 
static void get_stats_slot_range (AttStatsSlot *sslot, Oid opfuncoid, FmgrInfo *opproc, Oid collation, int16 typLen, bool typByVal, Datum *min, Datum *max, bool *p_have_data)
 
static bool get_actual_variable_range (PlannerInfo *root, VariableStatData *vardata, Oid sortop, Oid collation, Datum *min, Datum *max)
 
static bool get_actual_variable_endpoint (Relation heapRel, Relation indexRel, ScanDirection indexscandir, ScanKey scankeys, int16 typLen, bool typByVal, TupleTableSlot *tableslot, MemoryContext outercontext, Datum *endpointDatum)
 
static RelOptInfofind_join_input_rel (PlannerInfo *root, Relids relids)
 
Datum eqsel (PG_FUNCTION_ARGS)
 
double var_eq_const (VariableStatData *vardata, Oid oproid, Oid collation, Datum constval, bool constisnull, bool varonleft, bool negate)
 
double var_eq_non_const (VariableStatData *vardata, Oid oproid, Oid collation, Node *other, bool varonleft, bool negate)
 
Datum neqsel (PG_FUNCTION_ARGS)
 
static double scalarineqsel (PlannerInfo *root, Oid operator, bool isgt, bool iseq, Oid collation, VariableStatData *vardata, Datum constval, Oid consttype)
 
double mcv_selectivity (VariableStatData *vardata, FmgrInfo *opproc, Oid collation, Datum constval, bool varonleft, double *sumcommonp)
 
double histogram_selectivity (VariableStatData *vardata, FmgrInfo *opproc, Oid collation, Datum constval, bool varonleft, int min_hist_size, int n_skip, int *hist_size)
 
double generic_restriction_selectivity (PlannerInfo *root, Oid oproid, Oid collation, List *args, int varRelid, double default_selectivity)
 
double ineq_histogram_selectivity (PlannerInfo *root, VariableStatData *vardata, Oid opoid, FmgrInfo *opproc, bool isgt, bool iseq, Oid collation, Datum constval, Oid consttype)
 
static Datum scalarineqsel_wrapper (PG_FUNCTION_ARGS, bool isgt, bool iseq)
 
Datum scalarltsel (PG_FUNCTION_ARGS)
 
Datum scalarlesel (PG_FUNCTION_ARGS)
 
Datum scalargtsel (PG_FUNCTION_ARGS)
 
Datum scalargesel (PG_FUNCTION_ARGS)
 
Selectivity boolvarsel (PlannerInfo *root, Node *arg, int varRelid)
 
Selectivity booltestsel (PlannerInfo *root, BoolTestType booltesttype, Node *arg, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
 
Selectivity nulltestsel (PlannerInfo *root, NullTestType nulltesttype, Node *arg, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
 
static Nodestrip_array_coercion (Node *node)
 
Selectivity scalararraysel (PlannerInfo *root, ScalarArrayOpExpr *clause, bool is_join_clause, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
 
int estimate_array_length (Node *arrayexpr)
 
Selectivity rowcomparesel (PlannerInfo *root, RowCompareExpr *clause, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
 
Datum eqjoinsel (PG_FUNCTION_ARGS)
 
Datum neqjoinsel (PG_FUNCTION_ARGS)
 
Datum scalarltjoinsel (PG_FUNCTION_ARGS)
 
Datum scalarlejoinsel (PG_FUNCTION_ARGS)
 
Datum scalargtjoinsel (PG_FUNCTION_ARGS)
 
Datum scalargejoinsel (PG_FUNCTION_ARGS)
 
void mergejoinscansel (PlannerInfo *root, Node *clause, Oid opfamily, int strategy, bool nulls_first, Selectivity *leftstart, Selectivity *leftend, Selectivity *rightstart, Selectivity *rightend)
 
Datum matchingsel (PG_FUNCTION_ARGS)
 
Datum matchingjoinsel (PG_FUNCTION_ARGS)
 
static Listadd_unique_group_var (PlannerInfo *root, List *varinfos, Node *var, VariableStatData *vardata)
 
double estimate_num_groups (PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo)
 
void estimate_hash_bucket_stats (PlannerInfo *root, Node *hashkey, double nbuckets, Selectivity *mcv_freq, Selectivity *bucketsize_frac)
 
double estimate_hashagg_tablesize (PlannerInfo *root, Path *path, const AggClauseCosts *agg_costs, double dNumGroups)
 
bool get_restriction_variable (PlannerInfo *root, List *args, int varRelid, VariableStatData *vardata, Node **other, bool *varonleft)
 
void get_join_variables (PlannerInfo *root, List *args, SpecialJoinInfo *sjinfo, VariableStatData *vardata1, VariableStatData *vardata2, bool *join_is_reversed)
 
static void ReleaseDummy (HeapTuple tuple)
 
void examine_variable (PlannerInfo *root, Node *node, int varRelid, VariableStatData *vardata)
 
bool statistic_proc_security_check (VariableStatData *vardata, Oid func_oid)
 
double get_variable_numdistinct (VariableStatData *vardata, bool *isdefault)
 
Listget_quals_from_indexclauses (List *indexclauses)
 
Cost index_other_operands_eval_cost (PlannerInfo *root, List *indexquals)
 
void genericcostestimate (PlannerInfo *root, IndexPath *path, double loop_count, GenericCosts *costs)
 
Listadd_predicate_to_index_quals (IndexOptInfo *index, List *indexQuals)
 
void btcostestimate (PlannerInfo *root, IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void hashcostestimate (PlannerInfo *root, IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void gistcostestimate (PlannerInfo *root, IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void spgcostestimate (PlannerInfo *root, IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
static bool gincost_pattern (IndexOptInfo *index, int indexcol, Oid clause_op, Datum query, GinQualCounts *counts)
 
static bool gincost_opexpr (PlannerInfo *root, IndexOptInfo *index, int indexcol, OpExpr *clause, GinQualCounts *counts)
 
static bool gincost_scalararrayopexpr (PlannerInfo *root, IndexOptInfo *index, int indexcol, ScalarArrayOpExpr *clause, double numIndexEntries, GinQualCounts *counts)
 
void gincostestimate (PlannerInfo *root, IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 
void brincostestimate (PlannerInfo *root, IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
 

Variables

get_relation_stats_hook_type get_relation_stats_hook = NULL
 
get_index_stats_hook_type get_index_stats_hook = NULL
 

Macro Definition Documentation

◆ VISITED_PAGES_LIMIT

#define VISITED_PAGES_LIMIT   100

Function Documentation

◆ add_predicate_to_index_quals()

List* add_predicate_to_index_quals ( IndexOptInfo index,
List indexQuals 
)

Definition at line 6654 of file selfuncs.c.

6655 {
6656  List *predExtraQuals = NIL;
6657  ListCell *lc;
6658 
6659  if (index->indpred == NIL)
6660  return indexQuals;
6661 
6662  foreach(lc, index->indpred)
6663  {
6664  Node *predQual = (Node *) lfirst(lc);
6665  List *oneQual = list_make1(predQual);
6666 
6667  if (!predicate_implied_by(oneQual, indexQuals, false))
6668  predExtraQuals = list_concat(predExtraQuals, oneQual);
6669  }
6670  return list_concat(predExtraQuals, indexQuals);
6671 }
List * list_concat(List *list1, const List *list2)
Definition: list.c:560
#define lfirst(lc)
Definition: pg_list.h:170
#define NIL
Definition: pg_list.h:66
#define list_make1(x1)
Definition: pg_list.h:210
bool predicate_implied_by(List *predicate_list, List *clause_list, bool weak)
Definition: predtest.c:152
Definition: pg_list.h:52
Definition: nodes.h:118
Definition: type.h:95

References lfirst, list_concat(), list_make1, NIL, and predicate_implied_by().

Referenced by btcostestimate(), genericcostestimate(), and gincostestimate().

◆ add_unique_group_var()

static List* add_unique_group_var ( PlannerInfo root,
List varinfos,
Node var,
VariableStatData vardata 
)
static

Definition at line 3265 of file selfuncs.c.

3267 {
3268  GroupVarInfo *varinfo;
3269  double ndistinct;
3270  bool isdefault;
3271  ListCell *lc;
3272 
3273  ndistinct = get_variable_numdistinct(vardata, &isdefault);
3274 
3275  foreach(lc, varinfos)
3276  {
3277  varinfo = (GroupVarInfo *) lfirst(lc);
3278 
3279  /* Drop exact duplicates */
3280  if (equal(var, varinfo->var))
3281  return varinfos;
3282 
3283  /*
3284  * Drop known-equal vars, but only if they belong to different
3285  * relations (see comments for estimate_num_groups)
3286  */
3287  if (vardata->rel != varinfo->rel &&
3288  exprs_known_equal(root, var, varinfo->var))
3289  {
3290  if (varinfo->ndistinct <= ndistinct)
3291  {
3292  /* Keep older item, forget new one */
3293  return varinfos;
3294  }
3295  else
3296  {
3297  /* Delete the older item */
3298  varinfos = foreach_delete_current(varinfos, lc);
3299  }
3300  }
3301  }
3302 
3303  varinfo = (GroupVarInfo *) palloc(sizeof(GroupVarInfo));
3304 
3305  varinfo->var = var;
3306  varinfo->rel = vardata->rel;
3307  varinfo->ndistinct = ndistinct;
3308  varinfo->isdefault = isdefault;
3309  varinfos = lappend(varinfos, varinfo);
3310  return varinfos;
3311 }
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:225
bool exprs_known_equal(PlannerInfo *root, Node *item1, Node *item2)
Definition: equivclass.c:2369
List * lappend(List *list, void *datum)
Definition: list.c:338
void * palloc(Size size)
Definition: mcxt.c:1199
#define foreach_delete_current(lst, cell)
Definition: pg_list.h:388
double get_variable_numdistinct(VariableStatData *vardata, bool *isdefault)
Definition: selfuncs.c:5657
RelOptInfo * rel
Definition: selfuncs.c:3259
double ndistinct
Definition: selfuncs.c:3260
bool isdefault
Definition: selfuncs.c:3261
Node * var
Definition: selfuncs.c:3258
RelOptInfo * rel
Definition: selfuncs.h:88

References equal(), exprs_known_equal(), foreach_delete_current, get_variable_numdistinct(), GroupVarInfo::isdefault, lappend(), lfirst, GroupVarInfo::ndistinct, palloc(), GroupVarInfo::rel, VariableStatData::rel, and GroupVarInfo::var.

Referenced by estimate_num_groups().

◆ booltestsel()

Selectivity booltestsel ( PlannerInfo root,
BoolTestType  booltesttype,
Node arg,
int  varRelid,
JoinType  jointype,
SpecialJoinInfo sjinfo 
)

Definition at line 1538 of file selfuncs.c.

1540 {
1541  VariableStatData vardata;
1542  double selec;
1543 
1544  examine_variable(root, arg, varRelid, &vardata);
1545 
1546  if (HeapTupleIsValid(vardata.statsTuple))
1547  {
1548  Form_pg_statistic stats;
1549  double freq_null;
1550  AttStatsSlot sslot;
1551 
1552  stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
1553  freq_null = stats->stanullfrac;
1554 
1555  if (get_attstatsslot(&sslot, vardata.statsTuple,
1556  STATISTIC_KIND_MCV, InvalidOid,
1558  && sslot.nnumbers > 0)
1559  {
1560  double freq_true;
1561  double freq_false;
1562 
1563  /*
1564  * Get first MCV frequency and derive frequency for true.
1565  */
1566  if (DatumGetBool(sslot.values[0]))
1567  freq_true = sslot.numbers[0];
1568  else
1569  freq_true = 1.0 - sslot.numbers[0] - freq_null;
1570 
1571  /*
1572  * Next derive frequency for false. Then use these as appropriate
1573  * to derive frequency for each case.
1574  */
1575  freq_false = 1.0 - freq_true - freq_null;
1576 
1577  switch (booltesttype)
1578  {
1579  case IS_UNKNOWN:
1580  /* select only NULL values */
1581  selec = freq_null;
1582  break;
1583  case IS_NOT_UNKNOWN:
1584  /* select non-NULL values */
1585  selec = 1.0 - freq_null;
1586  break;
1587  case IS_TRUE:
1588  /* select only TRUE values */
1589  selec = freq_true;
1590  break;
1591  case IS_NOT_TRUE:
1592  /* select non-TRUE values */
1593  selec = 1.0 - freq_true;
1594  break;
1595  case IS_FALSE:
1596  /* select only FALSE values */
1597  selec = freq_false;
1598  break;
1599  case IS_NOT_FALSE:
1600  /* select non-FALSE values */
1601  selec = 1.0 - freq_false;
1602  break;
1603  default:
1604  elog(ERROR, "unrecognized booltesttype: %d",
1605  (int) booltesttype);
1606  selec = 0.0; /* Keep compiler quiet */
1607  break;
1608  }
1609 
1610  free_attstatsslot(&sslot);
1611  }
1612  else
1613  {
1614  /*
1615  * No most-common-value info available. Still have null fraction
1616  * information, so use it for IS [NOT] UNKNOWN. Otherwise adjust
1617  * for null fraction and assume a 50-50 split of TRUE and FALSE.
1618  */
1619  switch (booltesttype)
1620  {
1621  case IS_UNKNOWN:
1622  /* select only NULL values */
1623  selec = freq_null;
1624  break;
1625  case IS_NOT_UNKNOWN:
1626  /* select non-NULL values */
1627  selec = 1.0 - freq_null;
1628  break;
1629  case IS_TRUE:
1630  case IS_FALSE:
1631  /* Assume we select half of the non-NULL values */
1632  selec = (1.0 - freq_null) / 2.0;
1633  break;
1634  case IS_NOT_TRUE:
1635  case IS_NOT_FALSE:
1636  /* Assume we select NULLs plus half of the non-NULLs */
1637  /* equiv. to freq_null + (1.0 - freq_null) / 2.0 */
1638  selec = (freq_null + 1.0) / 2.0;
1639  break;
1640  default:
1641  elog(ERROR, "unrecognized booltesttype: %d",
1642  (int) booltesttype);
1643  selec = 0.0; /* Keep compiler quiet */
1644  break;
1645  }
1646  }
1647  }
1648  else
1649  {
1650  /*
1651  * If we can't get variable statistics for the argument, perhaps
1652  * clause_selectivity can do something with it. We ignore the
1653  * possibility of a NULL value when using clause_selectivity, and just
1654  * assume the value is either TRUE or FALSE.
1655  */
1656  switch (booltesttype)
1657  {
1658  case IS_UNKNOWN:
1659  selec = DEFAULT_UNK_SEL;
1660  break;
1661  case IS_NOT_UNKNOWN:
1662  selec = DEFAULT_NOT_UNK_SEL;
1663  break;
1664  case IS_TRUE:
1665  case IS_NOT_FALSE:
1666  selec = (double) clause_selectivity(root, arg,
1667  varRelid,
1668  jointype, sjinfo);
1669  break;
1670  case IS_FALSE:
1671  case IS_NOT_TRUE:
1672  selec = 1.0 - (double) clause_selectivity(root, arg,
1673  varRelid,
1674  jointype, sjinfo);
1675  break;
1676  default:
1677  elog(ERROR, "unrecognized booltesttype: %d",
1678  (int) booltesttype);
1679  selec = 0.0; /* Keep compiler quiet */
1680  break;
1681  }
1682  }
1683 
1684  ReleaseVariableStats(vardata);
1685 
1686  /* result should be in range, but make sure... */
1687  CLAMP_PROBABILITY(selec);
1688 
1689  return (Selectivity) selec;
1690 }
Selectivity clause_selectivity(PlannerInfo *root, Node *clause, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
Definition: clausesel.c:690
#define ERROR
Definition: elog.h:39
#define HeapTupleIsValid(tuple)
Definition: htup.h:78
#define GETSTRUCT(TUP)
Definition: htup_details.h:649
void free_attstatsslot(AttStatsSlot *sslot)
Definition: lsyscache.c:3309
bool get_attstatsslot(AttStatsSlot *sslot, HeapTuple statstuple, int reqkind, Oid reqop, int flags)
Definition: lsyscache.c:3192
#define ATTSTATSSLOT_NUMBERS
Definition: lsyscache.h:43
#define ATTSTATSSLOT_VALUES
Definition: lsyscache.h:42
double Selectivity
Definition: nodes.h:250
void * arg
FormData_pg_statistic * Form_pg_statistic
Definition: pg_statistic.h:135
static bool DatumGetBool(Datum X)
Definition: postgres.h:438
#define InvalidOid
Definition: postgres_ext.h:36
@ IS_NOT_TRUE
Definition: primnodes.h:1382
@ IS_NOT_FALSE
Definition: primnodes.h:1382
@ IS_NOT_UNKNOWN
Definition: primnodes.h:1382
@ IS_TRUE
Definition: primnodes.h:1382
@ IS_UNKNOWN
Definition: primnodes.h:1382
@ IS_FALSE
Definition: primnodes.h:1382
void examine_variable(PlannerInfo *root, Node *node, int varRelid, VariableStatData *vardata)
Definition: selfuncs.c:4977
#define DEFAULT_NOT_UNK_SEL
Definition: selfuncs.h:56
#define ReleaseVariableStats(vardata)
Definition: selfuncs.h:99
#define CLAMP_PROBABILITY(p)
Definition: selfuncs.h:63
#define DEFAULT_UNK_SEL
Definition: selfuncs.h:55
Datum * values
Definition: lsyscache.h:53
float4 * numbers
Definition: lsyscache.h:56
int nnumbers
Definition: lsyscache.h:57
HeapTuple statsTuple
Definition: selfuncs.h:89

References arg, ATTSTATSSLOT_NUMBERS, ATTSTATSSLOT_VALUES, CLAMP_PROBABILITY, clause_selectivity(), DatumGetBool(), DEFAULT_NOT_UNK_SEL, DEFAULT_UNK_SEL, elog(), ERROR, examine_variable(), free_attstatsslot(), get_attstatsslot(), GETSTRUCT, HeapTupleIsValid, InvalidOid, IS_FALSE, IS_NOT_FALSE, IS_NOT_TRUE, IS_NOT_UNKNOWN, IS_TRUE, IS_UNKNOWN, AttStatsSlot::nnumbers, AttStatsSlot::numbers, ReleaseVariableStats, VariableStatData::statsTuple, and AttStatsSlot::values.

Referenced by clause_selectivity_ext().

◆ boolvarsel()

Selectivity boolvarsel ( PlannerInfo root,
Node arg,
int  varRelid 
)

Definition at line 1510 of file selfuncs.c.

1511 {
1512  VariableStatData vardata;
1513  double selec;
1514 
1515  examine_variable(root, arg, varRelid, &vardata);
1516  if (HeapTupleIsValid(vardata.statsTuple))
1517  {
1518  /*
1519  * A boolean variable V is equivalent to the clause V = 't', so we
1520  * compute the selectivity as if that is what we have.
1521  */
1522  selec = var_eq_const(&vardata, BooleanEqualOperator, InvalidOid,
1523  BoolGetDatum(true), false, true, false);
1524  }
1525  else
1526  {
1527  /* Otherwise, the default estimate is 0.5 */
1528  selec = 0.5;
1529  }
1530  ReleaseVariableStats(vardata);
1531  return selec;
1532 }
static Datum BoolGetDatum(bool X)
Definition: postgres.h:450
double var_eq_const(VariableStatData *vardata, Oid oproid, Oid collation, Datum constval, bool constisnull, bool varonleft, bool negate)
Definition: selfuncs.c:293

References arg, BoolGetDatum(), examine_variable(), HeapTupleIsValid, InvalidOid, ReleaseVariableStats, VariableStatData::statsTuple, and var_eq_const().

Referenced by clause_selectivity_ext().

◆ brincostestimate()

void brincostestimate ( PlannerInfo root,
IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 7761 of file selfuncs.c.

7765 {
7766  IndexOptInfo *index = path->indexinfo;
7767  List *indexQuals = get_quals_from_indexclauses(path->indexclauses);
7768  double numPages = index->pages;
7769  RelOptInfo *baserel = index->rel;
7770  RangeTblEntry *rte = planner_rt_fetch(baserel->relid, root);
7771  Cost spc_seq_page_cost;
7772  Cost spc_random_page_cost;
7773  double qual_arg_cost;
7774  double qualSelectivity;
7775  BrinStatsData statsData;
7776  double indexRanges;
7777  double minimalRanges;
7778  double estimatedRanges;
7779  double selec;
7780  Relation indexRel;
7781  ListCell *l;
7782  VariableStatData vardata;
7783 
7784  Assert(rte->rtekind == RTE_RELATION);
7785 
7786  /* fetch estimated page cost for the tablespace containing the index */
7787  get_tablespace_page_costs(index->reltablespace,
7788  &spc_random_page_cost,
7789  &spc_seq_page_cost);
7790 
7791  /*
7792  * Obtain some data from the index itself, if possible. Otherwise invent
7793  * some plausible internal statistics based on the relation page count.
7794  */
7795  if (!index->hypothetical)
7796  {
7797  /*
7798  * A lock should have already been obtained on the index in plancat.c.
7799  */
7800  indexRel = index_open(index->indexoid, NoLock);
7801  brinGetStats(indexRel, &statsData);
7802  index_close(indexRel, NoLock);
7803 
7804  /* work out the actual number of ranges in the index */
7805  indexRanges = Max(ceil((double) baserel->pages /
7806  statsData.pagesPerRange), 1.0);
7807  }
7808  else
7809  {
7810  /*
7811  * Assume default number of pages per range, and estimate the number
7812  * of ranges based on that.
7813  */
7814  indexRanges = Max(ceil((double) baserel->pages /
7816 
7818  statsData.revmapNumPages = (indexRanges / REVMAP_PAGE_MAXITEMS) + 1;
7819  }
7820 
7821  /*
7822  * Compute index correlation
7823  *
7824  * Because we can use all index quals equally when scanning, we can use
7825  * the largest correlation (in absolute value) among columns used by the
7826  * query. Start at zero, the worst possible case. If we cannot find any
7827  * correlation statistics, we will keep it as 0.
7828  */
7829  *indexCorrelation = 0;
7830 
7831  foreach(l, path->indexclauses)
7832  {
7833  IndexClause *iclause = lfirst_node(IndexClause, l);
7834  AttrNumber attnum = index->indexkeys[iclause->indexcol];
7835 
7836  /* attempt to lookup stats in relation for this index column */
7837  if (attnum != 0)
7838  {
7839  /* Simple variable -- look to stats for the underlying table */
7841  (*get_relation_stats_hook) (root, rte, attnum, &vardata))
7842  {
7843  /*
7844  * The hook took control of acquiring a stats tuple. If it
7845  * did supply a tuple, it'd better have supplied a freefunc.
7846  */
7847  if (HeapTupleIsValid(vardata.statsTuple) && !vardata.freefunc)
7848  elog(ERROR,
7849  "no function provided to release variable stats with");
7850  }
7851  else
7852  {
7853  vardata.statsTuple =
7855  ObjectIdGetDatum(rte->relid),
7857  BoolGetDatum(false));
7858  vardata.freefunc = ReleaseSysCache;
7859  }
7860  }
7861  else
7862  {
7863  /*
7864  * Looks like we've found an expression column in the index. Let's
7865  * see if there's any stats for it.
7866  */
7867 
7868  /* get the attnum from the 0-based index. */
7869  attnum = iclause->indexcol + 1;
7870 
7871  if (get_index_stats_hook &&
7872  (*get_index_stats_hook) (root, index->indexoid, attnum, &vardata))
7873  {
7874  /*
7875  * The hook took control of acquiring a stats tuple. If it
7876  * did supply a tuple, it'd better have supplied a freefunc.
7877  */
7878  if (HeapTupleIsValid(vardata.statsTuple) &&
7879  !vardata.freefunc)
7880  elog(ERROR, "no function provided to release variable stats with");
7881  }
7882  else
7883  {
7885  ObjectIdGetDatum(index->indexoid),
7887  BoolGetDatum(false));
7888  vardata.freefunc = ReleaseSysCache;
7889  }
7890  }
7891 
7892  if (HeapTupleIsValid(vardata.statsTuple))
7893  {
7894  AttStatsSlot sslot;
7895 
7896  if (get_attstatsslot(&sslot, vardata.statsTuple,
7897  STATISTIC_KIND_CORRELATION, InvalidOid,
7899  {
7900  double varCorrelation = 0.0;
7901 
7902  if (sslot.nnumbers > 0)
7903  varCorrelation = fabs(sslot.numbers[0]);
7904 
7905  if (varCorrelation > *indexCorrelation)
7906  *indexCorrelation = varCorrelation;
7907 
7908  free_attstatsslot(&sslot);
7909  }
7910  }
7911 
7912  ReleaseVariableStats(vardata);
7913  }
7914 
7915  qualSelectivity = clauselist_selectivity(root, indexQuals,
7916  baserel->relid,
7917  JOIN_INNER, NULL);
7918 
7919  /*
7920  * Now calculate the minimum possible ranges we could match with if all of
7921  * the rows were in the perfect order in the table's heap.
7922  */
7923  minimalRanges = ceil(indexRanges * qualSelectivity);
7924 
7925  /*
7926  * Now estimate the number of ranges that we'll touch by using the
7927  * indexCorrelation from the stats. Careful not to divide by zero (note
7928  * we're using the absolute value of the correlation).
7929  */
7930  if (*indexCorrelation < 1.0e-10)
7931  estimatedRanges = indexRanges;
7932  else
7933  estimatedRanges = Min(minimalRanges / *indexCorrelation, indexRanges);
7934 
7935  /* we expect to visit this portion of the table */
7936  selec = estimatedRanges / indexRanges;
7937 
7938  CLAMP_PROBABILITY(selec);
7939 
7940  *indexSelectivity = selec;
7941 
7942  /*
7943  * Compute the index qual costs, much as in genericcostestimate, to add to
7944  * the index costs. We can disregard indexorderbys, since BRIN doesn't
7945  * support those.
7946  */
7947  qual_arg_cost = index_other_operands_eval_cost(root, indexQuals);
7948 
7949  /*
7950  * Compute the startup cost as the cost to read the whole revmap
7951  * sequentially, including the cost to execute the index quals.
7952  */
7953  *indexStartupCost =
7954  spc_seq_page_cost * statsData.revmapNumPages * loop_count;
7955  *indexStartupCost += qual_arg_cost;
7956 
7957  /*
7958  * To read a BRIN index there might be a bit of back and forth over
7959  * regular pages, as revmap might point to them out of sequential order;
7960  * calculate the total cost as reading the whole index in random order.
7961  */
7962  *indexTotalCost = *indexStartupCost +
7963  spc_random_page_cost * (numPages - statsData.revmapNumPages) * loop_count;
7964 
7965  /*
7966  * Charge a small amount per range tuple which we expect to match to. This
7967  * is meant to reflect the costs of manipulating the bitmap. The BRIN scan
7968  * will set a bit for each page in the range when we find a matching
7969  * range, so we must multiply the charge by the number of pages in the
7970  * range.
7971  */
7972  *indexTotalCost += 0.1 * cpu_operator_cost * estimatedRanges *
7973  statsData.pagesPerRange;
7974 
7975  *indexPages = index->pages;
7976 }
int16 AttrNumber
Definition: attnum.h:21
void brinGetStats(Relation index, BrinStatsData *stats)
Definition: brin.c:1254
#define BRIN_DEFAULT_PAGES_PER_RANGE
Definition: brin.h:38
#define REVMAP_PAGE_MAXITEMS
Definition: brin_page.h:93
#define Min(x, y)
Definition: c.h:937
#define Max(x, y)
Definition: c.h:931
Selectivity clauselist_selectivity(PlannerInfo *root, List *clauses, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
Definition: clausesel.c:102
double cpu_operator_cost
Definition: costsize.c:124
void index_close(Relation relation, LOCKMODE lockmode)
Definition: indexam.c:158
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition: indexam.c:132
Assert(fmt[strlen(fmt) - 1] !='\n')
#define NoLock
Definition: lockdefs.h:34
double Cost
Definition: nodes.h:251
@ JOIN_INNER
Definition: nodes.h:293
@ RTE_RELATION
Definition: parsenodes.h:982
#define planner_rt_fetch(rti, root)
Definition: pathnodes.h:523
int16 attnum
Definition: pg_attribute.h:83
#define lfirst_node(type, lc)
Definition: pg_list.h:174
static Datum Int16GetDatum(int16 X)
Definition: postgres.h:520
static Datum ObjectIdGetDatum(Oid X)
Definition: postgres.h:600
List * get_quals_from_indexclauses(List *indexclauses)
Definition: selfuncs.c:6352
get_index_stats_hook_type get_index_stats_hook
Definition: selfuncs.c:146
Cost index_other_operands_eval_cost(PlannerInfo *root, List *indexquals)
Definition: selfuncs.c:6382
get_relation_stats_hook_type get_relation_stats_hook
Definition: selfuncs.c:145
void get_tablespace_page_costs(Oid spcid, double *spc_random_page_cost, double *spc_seq_page_cost)
Definition: spccache.c:181
BlockNumber revmapNumPages
Definition: brin.h:34
BlockNumber pagesPerRange
Definition: brin.h:33
AttrNumber indexcol
Definition: pathnodes.h:1651
List * indexclauses
Definition: pathnodes.h:1601
IndexOptInfo * indexinfo
Definition: pathnodes.h:1600
RTEKind rtekind
Definition: parsenodes.h:1001
Index relid
Definition: pathnodes.h:871
BlockNumber pages
Definition: pathnodes.h:893
void(* freefunc)(HeapTuple tuple)
Definition: selfuncs.h:91
void ReleaseSysCache(HeapTuple tuple)
Definition: syscache.c:1221
HeapTuple SearchSysCache3(int cacheId, Datum key1, Datum key2, Datum key3)
Definition: syscache.c:1195
@ STATRELATTINH
Definition: syscache.h:97

References Assert(), attnum, ATTSTATSSLOT_NUMBERS, BoolGetDatum(), BRIN_DEFAULT_PAGES_PER_RANGE, brinGetStats(), CLAMP_PROBABILITY, clauselist_selectivity(), cpu_operator_cost, elog(), ERROR, free_attstatsslot(), VariableStatData::freefunc, get_attstatsslot(), get_index_stats_hook, get_quals_from_indexclauses(), get_relation_stats_hook, get_tablespace_page_costs(), HeapTupleIsValid, index_close(), index_open(), index_other_operands_eval_cost(), IndexPath::indexclauses, IndexClause::indexcol, IndexPath::indexinfo, Int16GetDatum(), InvalidOid, JOIN_INNER, lfirst_node, Max, Min, AttStatsSlot::nnumbers, NoLock, AttStatsSlot::numbers, ObjectIdGetDatum(), RelOptInfo::pages, BrinStatsData::pagesPerRange, planner_rt_fetch, ReleaseSysCache(), ReleaseVariableStats, RangeTblEntry::relid, RelOptInfo::relid, REVMAP_PAGE_MAXITEMS, BrinStatsData::revmapNumPages, RTE_RELATION, RangeTblEntry::rtekind, SearchSysCache3(), STATRELATTINH, and VariableStatData::statsTuple.

Referenced by brinhandler().

◆ btcostestimate()

void btcostestimate ( PlannerInfo root,
IndexPath path,
double  loop_count,
Cost indexStartupCost,
Cost indexTotalCost,
Selectivity indexSelectivity,
double *  indexCorrelation,
double *  indexPages 
)

Definition at line 6675 of file selfuncs.c.

6679 {
6680  IndexOptInfo *index = path->indexinfo;
6681  GenericCosts costs = {0};
6682  Oid relid;
6683  AttrNumber colnum;
6684  VariableStatData vardata = {0};
6685  double numIndexTuples;
6686  Cost descentCost;
6687  List *indexBoundQuals;
6688  int indexcol;
6689  bool eqQualHere;
6690  bool found_saop;
6691  bool found_is_null_op;
6692  double num_sa_scans;
6693  ListCell *lc;
6694 
6695  /*
6696  * For a btree scan, only leading '=' quals plus inequality quals for the
6697  * immediately next attribute contribute to index selectivity (these are
6698  * the "boundary quals" that determine the starting and stopping points of
6699  * the index scan). Additional quals can suppress visits to the heap, so
6700  * it's OK to count them in indexSelectivity, but they should not count
6701  * for estimating numIndexTuples. So we must examine the given indexquals
6702  * to find out which ones count as boundary quals. We rely on the
6703  * knowledge that they are given in index column order.
6704  *
6705  * For a RowCompareExpr, we consider only the first column, just as
6706  * rowcomparesel() does.
6707  *
6708  * If there's a ScalarArrayOpExpr in the quals, we'll actually perform N
6709  * index scans not one, but the ScalarArrayOpExpr's operator can be
6710  * considered to act the same as it normally does.
6711  */
6712  indexBoundQuals = NIL;
6713  indexcol = 0;
6714  eqQualHere = false;
6715  found_saop = false;
6716  found_is_null_op = false;
6717  num_sa_scans = 1;
6718  foreach(lc, path->indexclauses)
6719  {
6720  IndexClause *iclause = lfirst_node(IndexClause, lc);
6721  ListCell *lc2;
6722 
6723  if (indexcol != iclause->indexcol)
6724  {
6725  /* Beginning of a new column's quals */
6726  if (!eqQualHere)
6727  break; /* done if no '=' qual for indexcol */
6728  eqQualHere = false;
6729  indexcol++;
6730  if (indexcol != iclause->indexcol)
6731  break; /* no quals at all for indexcol */
6732  }
6733 
6734  /* Examine each indexqual associated with this index clause */
6735  foreach(lc2, iclause->indexquals)
6736  {
6737  RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc2);
6738  Expr *clause = rinfo->clause;
6739  Oid clause_op = InvalidOid;
6740  int op_strategy;
6741 
6742  if (IsA(clause, OpExpr))
6743  {
6744  OpExpr *op = (OpExpr *) clause;
6745 
6746  clause_op = op->opno;
6747  }
6748  else if (IsA(clause, RowCompareExpr))
6749  {
6750  RowCompareExpr *rc = (RowCompareExpr *) clause;
6751 
6752  clause_op = linitial_oid(rc->opnos);
6753  }
6754  else if (IsA(clause, ScalarArrayOpExpr))
6755  {
6756  ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
6757  Node *other_operand = (Node *) lsecond(saop->args);
6758  int alength = estimate_array_length(other_operand);
6759 
6760  clause_op = saop->opno;
6761  found_saop = true;
6762  /* count number of SA scans induced by indexBoundQuals only */
6763  if (alength > 1)
6764  num_sa_scans *= alength;
6765  }
6766  else if (IsA(clause, NullTest))
6767  {
6768  NullTest *nt = (NullTest *) clause;
6769 
6770  if (nt->nulltesttype == IS_NULL)
6771  {
6772  found_is_null_op = true;
6773  /* IS NULL is like = for selectivity purposes */
6774  eqQualHere = true;
6775  }
6776  }
6777  else
6778  elog(ERROR, "unsupported indexqual type: %d",
6779  (int) nodeTag(clause));
6780 
6781  /* check for equality operator */
6782  if (OidIsValid(clause_op))
6783  {
6784  op_strategy = get_op_opfamily_strategy(clause_op,
6785  index->opfamily[indexcol]);
6786  Assert(op_strategy != 0); /* not a member of opfamily?? */
6787  if (op_strategy == BTEqualStrategyNumber)
6788  eqQualHere = true;
6789  }
6790 
6791  indexBoundQuals = lappend(indexBoundQuals, rinfo);
6792  }
6793  }
6794 
6795  /*
6796  * If index is unique and we found an '=' clause for each column, we can
6797  * just assume numIndexTuples = 1 and skip the expensive
6798  * clauselist_selectivity calculations. However, a ScalarArrayOp or
6799  * NullTest invalidates that theory, even though it sets eqQualHere.
6800  */
6801  if (index->unique &&
6802  indexcol == index->nkeycolumns - 1 &&
6803  eqQualHere &&
6804  !found_saop &&
6805  !found_is_null_op)
6806  numIndexTuples = 1.0;
6807  else
6808  {
6809  List *selectivityQuals;
6810  Selectivity btreeSelectivity;
6811 
6812  /*
6813  * If the index is partial, AND the index predicate with the
6814  * index-bound quals to produce a more accurate idea of the number of
6815  * rows covered by the bound conditions.
6816  */
6817  selectivityQuals = add_predicate_to_index_quals(index, indexBoundQuals);
6818 
6819  btreeSelectivity = clauselist_selectivity(root, selectivityQuals,
6820  index->rel->relid,
6821  JOIN_INNER,
6822  NULL);
6823  numIndexTuples = btreeSelectivity * index->rel->tuples;
6824 
6825  /*
6826  * As in genericcostestimate(), we have to adjust for any
6827  * ScalarArrayOpExpr quals included in indexBoundQuals, and then round
6828  * to integer.
6829  */
6830  numIndexTuples = rint(numIndexTuples / num_sa_scans);
6831  }
6832 
6833  /*
6834  * Now do generic index cost estimation.
6835  */
6836  costs.numIndexTuples = numIndexTuples;
6837 
6838  genericcostestimate(root, path, loop_count, &costs);
6839 
6840  /*
6841  * Add a CPU-cost component to represent the costs of initial btree
6842  * descent. We don't charge any I/O cost for touching upper btree levels,
6843  * since they tend to stay in cache, but we still have to do about log2(N)
6844  * comparisons to descend a btree of N leaf tuples. We charge one
6845  * cpu_operator_cost per comparison.
6846  *
6847  * If there are ScalarArrayOpExprs, charge this once per SA scan. The
6848  * ones after the first one are not startup cost so far as the overall
6849  * plan is concerned, so add them only to "total" cost.
6850  */
6851  if (index->tuples > 1) /* avoid computing log(0) */
6852  {
6853  descentCost = ceil(log(index->tuples) / log(2.0)) * cpu_operator_cost;
6854  costs.indexStartupCost += descentCost;
6855  costs.indexTotalCost += costs.num_sa_scans * descentCost;
6856  }
6857 
6858  /*
6859  * Even though we're not charging I/O cost for touching upper btree pages,
6860  * it's still reasonable to charge some CPU cost per page descended
6861  * through. Moreover, if we had no such charge at all, bloated indexes
6862  * would appear to have the same search cost as unbloated ones, at least
6863  * in cases where only a single leaf page is expected to be visited. This
6864  * cost is somewhat arbitrarily set at 50x cpu_operator_cost per page
6865  * touched. The number of such pages is btree tree height plus one (ie,
6866  * we charge for the leaf page too). As above, charge once per SA scan.
6867  */
6868  descentCost = (index->tree_height + 1) * 50.0 * cpu_operator_cost;
6869  costs.indexStartupCost += descentCost;
6870  costs.indexTotalCost += costs.num_sa_scans * descentCost;
6871 
6872  /*
6873  * If we can get an estimate of the first column's ordering correlation C
6874  * from pg_statistic, estimate the index correlation as C for a
6875  * single-column index, or C * 0.75 for multiple columns. (The idea here
6876  * is that multiple columns dilute the importance of the first column's
6877  * ordering, but don't negate it entirely. Before 8.0 we divided the
6878  * correlation by the number of columns, but that seems too strong.)
6879  */
6880  if (index->indexkeys[0] != 0)
6881  {
6882  /* Simple variable --- look to stats for the underlying table */
6883  RangeTblEntry *rte = planner_rt_fetch(index->rel->relid, root);
6884 
6885  Assert(rte->rtekind == RTE_RELATION);
6886  relid = rte->relid;
6887  Assert(relid != InvalidOid);
6888  colnum = index->indexkeys[0];
6889 
6891  (*get_relation_stats_hook) (root, rte, colnum, &vardata))
6892  {
6893  /*
6894  * The hook took control of acquiring a stats tuple. If it did
6895  * supply a tuple, it'd better have supplied a freefunc.
6896  */
6897  if (HeapTupleIsValid(vardata.statsTuple) &&
6898  !vardata.freefunc)
6899  elog(ERROR, "no function provided to release variable stats with");
6900  }
6901  else
6902  {
6904  ObjectIdGetDatum(relid),
6905  Int16GetDatum(colnum),
6906  BoolGetDatum(rte->inh));
6907  vardata.freefunc = ReleaseSysCache;
6908  }
6909  }
6910  else
6911  {
6912  /* Expression --- maybe there are stats for the index itself */
6913  relid = index->indexoid;
6914  colnum = 1;
6915 
6916  if (get_index_stats_hook &&
6917  (*get_index_stats_hook) (root, relid, colnum, &vardata))
6918  {
6919  /*
6920  * The hook took control of acquiring a stats tuple. If it did
6921  * supply a tuple, it'd better have supplied a freefunc.
6922  */
6923  if (HeapTupleIsValid(vardata.statsTuple) &&
6924  !vardata.freefunc)
6925  elog(ERROR, "no function provided to release variable stats with");
6926  }
6927  else
6928  {
6930  ObjectIdGetDatum(relid),
6931  Int16GetDatum(colnum),
6932  BoolGetDatum(false));
6933  vardata.freefunc = ReleaseSysCache;
6934  }
6935  }
6936 
6937  if (HeapTupleIsValid(vardata.statsTuple))
6938  {
6939  Oid sortop;
6940  AttStatsSlot sslot;
6941 
6942  sortop = get_opfamily_member(index->opfamily[0],
6943  index->opcintype[0],
6944  index->opcintype[0],
6946  if (OidIsValid(sortop) &&
6947  get_attstatsslot(&sslot, vardata.statsTuple,
6948  STATISTIC_KIND_CORRELATION, sortop,
6950  {
6951  double varCorrelation;
6952 
6953  Assert(sslot.nnumbers == 1);
6954  varCorrelation = sslot.numbers[0];
6955 
6956  if (index->reverse_sort[0])
6957  varCorrelation = -varCorrelation;
6958 
6959  if (index->nkeycolumns > 1)
6960  costs.indexCorrelation = varCorrelation * 0.75;
6961  else
6962  costs.indexCorrelation = varCorrelation;
6963 
6964  free_attstatsslot(&sslot);
6965  }
6966  }
6967 
6968  ReleaseVariableStats(vardata);
6969 
6970  *indexStartupCost = costs.indexStartupCost;
6971  *indexTotalCost = costs.indexTotalCost;
6972  *indexSelectivity = costs.indexSelectivity;
6973  *indexCorrelation = costs.indexCorrelation;
6974  *indexPages = costs.numIndexPages;
6975 }
#define OidIsValid(objectId)
Definition: c.h:711
int get_op_opfamily_strategy(Oid opno, Oid opfamily)
Definition: lsyscache.c:82
Oid get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype, int16 strategy)
Definition: lsyscache.c:165
#define IsA(nodeptr, _type_)
Definition: nodes.h:168
#define nodeTag(nodeptr)
Definition: nodes.h:122
#define lsecond(l)
Definition: pg_list.h:181
#define linitial_oid(l)
Definition: pg_list.h:178
unsigned int Oid
Definition: postgres_ext.h:31
@ IS_NULL
Definition: primnodes.h:1359
int estimate_array_length(Node *arrayexpr)
Definition: selfuncs.c:2133
void genericcostestimate(PlannerInfo *root, IndexPath *path, double loop_count, GenericCosts *costs)
Definition: selfuncs.c:6436
List * add_predicate_to_index_quals(IndexOptInfo *index, List *indexQuals)
Definition: selfuncs.c:6654
#define BTLessStrategyNumber
Definition: stratnum.h:29
#define BTEqualStrategyNumber
Definition: stratnum.h:31
Selectivity indexSelectivity
Definition: selfuncs.h:124
Cost indexStartupCost
Definition: selfuncs.h:122
double indexCorrelation
Definition: selfuncs.h:125
double num_sa_scans
Definition: selfuncs.h:131
Cost indexTotalCost
Definition: selfuncs.h:123
double numIndexPages
Definition: selfuncs.h:128
double numIndexTuples
Definition: selfuncs.h:129
List * indexquals
Definition: pathnodes.h:1649
NullTestType nulltesttype
Definition: primnodes.h:1366
Oid opno
Definition: primnodes.h:648
Expr * clause
Definition: pathnodes.h:2435

References add_predicate_to_index_quals(), ScalarArrayOpExpr::args, Assert(), ATTSTATSSLOT_NUMBERS, BoolGetDatum(), BTEqualStrategyNumber, BTLessStrategyNumber, RestrictInfo::clause, clauselist_selectivity(), cpu_operator_cost, elog(), ERROR, estimate_array_length(), free_attstatsslot(), VariableStatData::freefunc, genericcostestimate(), get_attstatsslot(), get_index_stats_hook, get_op_opfamily_strategy(), get_opfamily_member(), get_relation_stats_hook, HeapTupleIsValid, IndexPath::indexclauses, IndexClause::indexcol, GenericCosts::indexCorrelation, IndexPath::indexinfo, IndexClause::indexquals, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, RangeTblEntry::inh, Int16GetDatum(), InvalidOid, IS_NULL, IsA, JOIN_INNER, lappend(), lfirst_node, linitial_oid, lsecond, NIL, AttStatsSlot::nnumbers, nodeTag, NullTest::nulltesttype, GenericCosts::num_sa_scans, AttStatsSlot::numbers, GenericCosts::numIndexPages, GenericCosts::numIndexTuples, ObjectIdGetDatum(), OidIsValid, OpExpr::opno, ScalarArrayOpExpr::opno, RowCompareExpr::opnos, planner_rt_fetch, ReleaseSysCache(), ReleaseVariableStats, RangeTblEntry::relid, RTE_RELATION, RangeTblEntry::rtekind, SearchSysCache3(), STATRELATTINH, and VariableStatData::statsTuple.

Referenced by bthandler().

◆ convert_bytea_to_scalar()

static void convert_bytea_to_scalar ( Datum  value,
double *  scaledvalue,
Datum  lobound,
double *  scaledlobound,
Datum  hibound,
double *  scaledhibound 
)
static

Definition at line 4695 of file selfuncs.c.

4701 {
4702  bytea *valuep = DatumGetByteaPP(value);
4703  bytea *loboundp = DatumGetByteaPP(lobound);
4704  bytea *hiboundp = DatumGetByteaPP(hibound);
4705  int rangelo,
4706  rangehi,
4707  valuelen = VARSIZE_ANY_EXHDR(valuep),
4708  loboundlen = VARSIZE_ANY_EXHDR(loboundp),
4709  hiboundlen = VARSIZE_ANY_EXHDR(hiboundp),
4710  i,
4711  minlen;
4712  unsigned char *valstr = (unsigned char *) VARDATA_ANY(valuep);
4713  unsigned char *lostr = (unsigned char *) VARDATA_ANY(loboundp);
4714  unsigned char *histr = (unsigned char *) VARDATA_ANY(hiboundp);
4715 
4716  /*
4717  * Assume bytea data is uniformly distributed across all byte values.
4718  */
4719  rangelo = 0;
4720  rangehi = 255;
4721 
4722  /*
4723  * Now strip any common prefix of the three strings.
4724  */
4725  minlen = Min(Min(valuelen, loboundlen), hiboundlen);
4726  for (i = 0; i < minlen; i++)
4727  {
4728  if (*lostr != *histr || *lostr != *valstr)
4729  break;
4730  lostr++, histr++, valstr++;
4731  loboundlen--, hiboundlen--, valuelen--;
4732  }
4733 
4734  /*
4735  * Now we can do the conversions.
4736  */
4737  *scaledvalue = convert_one_bytea_to_scalar(valstr, valuelen, rangelo, rangehi);
4738  *scaledlobound = convert_one_bytea_to_scalar(lostr, loboundlen, rangelo, rangehi);
4739  *scaledhibound = convert_one_bytea_to_scalar(histr, hiboundlen, rangelo, rangehi);
4740 }
#define DatumGetByteaPP(X)
Definition: fmgr.h:291
static struct @143 value
int i
Definition: isn.c:73
#define VARDATA_ANY(PTR)
Definition: postgres.h:362
#define VARSIZE_ANY_EXHDR(PTR)
Definition: postgres.h:355
static double convert_one_bytea_to_scalar(unsigned char *value, int valuelen, int rangelo, int rangehi)
Definition: selfuncs.c:4743
Definition: c.h:623

References convert_one_bytea_to_scalar(), DatumGetByteaPP, i, Min, value, VARDATA_ANY, and VARSIZE_ANY_EXHDR.

Referenced by convert_to_scalar().

◆ convert_numeric_to_scalar()

static double convert_numeric_to_scalar ( Datum  value,
Oid  typid,
bool failure 
)
static

Definition at line 4421 of file selfuncs.c.

4422 {
4423  switch (typid)
4424  {
4425  case BOOLOID:
4426  return (double) DatumGetBool(value);
4427  case INT2OID:
4428  return (double) DatumGetInt16(value);
4429  case INT4OID:
4430  return (double) DatumGetInt32(value);
4431  case INT8OID:
4432  return (double) DatumGetInt64(value);
4433  case FLOAT4OID:
4434  return (double) DatumGetFloat4(value);
4435  case FLOAT8OID:
4436  return (double) DatumGetFloat8(value);
4437  case NUMERICOID:
4438  /* Note: out-of-range values will be clamped to +-HUGE_VAL */
4439  return (double)
4441  value));
4442  case OIDOID:
4443  case REGPROCOID:
4444  case REGPROCEDUREOID:
4445  case REGOPEROID:
4446  case REGOPERATOROID:
4447  case REGCLASSOID:
4448  case REGTYPEOID:
4449  case REGCOLLATIONOID:
4450  case REGCONFIGOID:
4451  case REGDICTIONARYOID:
4452  case REGROLEOID:
4453  case REGNAMESPACEOID:
4454  /* we can treat OIDs as integers... */
4455  return (double) DatumGetObjectId(value);
4456  }
4457 
4458  *failure = true;
4459  return 0;
4460 }
Datum numeric_float8_no_overflow(PG_FUNCTION_ARGS)
Definition: numeric.c:4471
#define DirectFunctionCall1(func, arg1)
Definition: fmgr.h:642
static int64 DatumGetInt64(Datum X)
Definition: postgres.h:733
static float4 DatumGetFloat4(Datum X)
Definition: postgres.h:806
static Oid DatumGetObjectId(Datum X)
Definition: postgres.h:590
static float8 DatumGetFloat8(Datum X)
Definition: postgres.h:842
static int16 DatumGetInt16(Datum X)
Definition: postgres.h:510
static int32 DatumGetInt32(Datum X)
Definition: postgres.h:550

References DatumGetBool(), DatumGetFloat4(), DatumGetFloat8(), DatumGetInt16(), DatumGetInt32(), DatumGetInt64(), DatumGetObjectId(), DirectFunctionCall1, numeric_float8_no_overflow(), and value.

Referenced by convert_to_scalar().

◆ convert_one_bytea_to_scalar()

static double convert_one_bytea_to_scalar ( unsigned char *  value,
int  valuelen,
int  rangelo,
int  rangehi 
)
static

Definition at line 4743 of file selfuncs.c.

4745 {
4746  double num,
4747  denom,
4748  base;
4749 
4750  if (valuelen <= 0)
4751  return 0.0; /* empty string has scalar value 0 */
4752 
4753  /*
4754  * Since base is 256, need not consider more than about 10 chars (even
4755  * this many seems like overkill)
4756  */
4757  if (valuelen > 10)
4758  valuelen = 10;
4759 
4760  /* Convert initial characters to fraction */
4761  base = rangehi - rangelo + 1;
4762  num = 0.0;
4763  denom = base;
4764  while (valuelen-- > 0)
4765  {
4766  int ch = *value++;
4767 
4768  if (ch < rangelo)
4769  ch = rangelo - 1;
4770  else if (ch > rangehi)
4771  ch = rangehi + 1;
4772  num += ((double) (ch - rangelo)) / denom;
4773  denom *= base;
4774  }
4775 
4776  return num;
4777 }

References value.

Referenced by convert_bytea_to_scalar().

◆ convert_one_string_to_scalar()

static double convert_one_string_to_scalar ( char *  value,
int  rangelo,
int  rangehi 
)
static

Definition at line 4563 of file selfuncs.c.

4564 {
4565  int slen = strlen(value);
4566  double num,
4567  denom,
4568  base;
4569 
4570  if (slen <= 0)
4571  return 0.0; /* empty string has scalar value 0 */
4572 
4573  /*
4574  * There seems little point in considering more than a dozen bytes from
4575  * the string. Since base is at least 10, that will give us nominal
4576  * resolution of at least 12 decimal digits, which is surely far more
4577  * precision than this estimation technique has got anyway (especially in
4578  * non-C locales). Also, even with the maximum possible base of 256, this
4579  * ensures denom cannot grow larger than 256^13 = 2.03e31, which will not
4580  * overflow on any known machine.
4581  */
4582  if (slen > 12)
4583  slen = 12;
4584 
4585  /* Convert initial characters to fraction */
4586  base = rangehi - rangelo + 1;
4587  num = 0.0;
4588  denom = base;
4589  while (slen-- > 0)
4590  {
4591  int ch = (unsigned char) *value++;
4592 
4593  if (ch < rangelo)
4594  ch = rangelo - 1;
4595  else if (ch > rangehi)
4596  ch = rangehi + 1;
4597  num += ((double) (ch - rangelo)) / denom;
4598  denom *= base;
4599  }
4600 
4601  return num;
4602 }

References value.

Referenced by convert_string_to_scalar().

◆ convert_string_datum()

static char * convert_string_datum ( Datum  value,
Oid  typid,
Oid  collid,
bool failure 
)
static

Definition at line 4614 of file selfuncs.c.

4615 {
4616  char *val;
4617 
4618  switch (typid)
4619  {
4620  case CHAROID:
4621  val = (char *) palloc(2);
4622  val[0] = DatumGetChar(value);
4623  val[1] = '\0';
4624  break;
4625  case BPCHAROID:
4626  case VARCHAROID:
4627  case TEXTOID:
4629  break;
4630  case NAMEOID:
4631  {
4633 
4634  val = pstrdup(NameStr(*nm));
4635  break;
4636  }
4637  default:
4638  *failure = true;
4639  return NULL;
4640  }
4641 
4642  if (!lc_collate_is_c(collid))
4643  {
4644  char *xfrmstr;
4645  size_t xfrmlen;
4646  size_t xfrmlen2 PG_USED_FOR_ASSERTS_ONLY;
4647 
4648  /*
4649  * XXX: We could guess at a suitable output buffer size and only call
4650  * strxfrm twice if our guess is too small.
4651  *
4652  * XXX: strxfrm doesn't support UTF-8 encoding on Win32, it can return
4653  * bogus data or set an error. This is not really a problem unless it
4654  * crashes since it will only give an estimation error and nothing
4655  * fatal.
4656  */
4657  xfrmlen = strxfrm(NULL, val, 0);
4658 #ifdef WIN32
4659 
4660  /*
4661  * On Windows, strxfrm returns INT_MAX when an error occurs. Instead
4662  * of trying to allocate this much memory (and fail), just return the
4663  * original string unmodified as if we were in the C locale.
4664  */
4665  if (xfrmlen == INT_MAX)
4666  return val;
4667 #endif
4668  xfrmstr = (char *) palloc(xfrmlen + 1);
4669  xfrmlen2 = strxfrm(xfrmstr, val, xfrmlen + 1);
4670 
4671  /*
4672  * Some systems (e.g., glibc) can return a smaller value from the
4673  * second call than the first; thus the Assert must be <= not ==.
4674  */
4675  Assert(xfrmlen2 <= xfrmlen);
4676  pfree(val);
4677  val = xfrmstr;
4678  }
4679 
4680  return val;
4681 }
#define TextDatumGetCString(d)
Definition: builtins.h:89
#define NameStr(name)
Definition: c.h:682
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:166
long val
Definition: informix.c:664
char * pstrdup(const char *in)
Definition: mcxt.c:1483
void pfree(void *pointer)
Definition: mcxt.c:1306
bool lc_collate_is_c(Oid collation)
Definition: pg_locale.c:1299
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:660
static char DatumGetChar(Datum X)
Definition: postgres.h:460
Definition: c.h:677

References Assert(), DatumGetChar(), DatumGetPointer(), lc_collate_is_c(), NameStr, palloc(), pfree(), PG_USED_FOR_ASSERTS_ONLY, pstrdup(), TextDatumGetCString, val, and value.

Referenced by convert_to_scalar().

◆ convert_string_to_scalar()

static void convert_string_to_scalar ( char *  value,
double *  scaledvalue,
char *  lobound,
double *  scaledlobound,
char *  hibound,
double *  scaledhibound 
)
static

Definition at line 4483 of file selfuncs.c.

4489 {
4490  int rangelo,
4491  rangehi;
4492  char *sptr;
4493 
4494  rangelo = rangehi = (unsigned char) hibound[0];
4495  for (sptr = lobound; *sptr; sptr++)
4496  {
4497  if (rangelo > (unsigned char) *sptr)
4498  rangelo = (unsigned char) *sptr;
4499  if (rangehi < (unsigned char) *sptr)
4500  rangehi = (unsigned char) *sptr;
4501  }
4502  for (sptr = hibound; *sptr; sptr++)
4503  {
4504  if (rangelo > (unsigned char) *sptr)
4505  rangelo = (unsigned char) *sptr;
4506  if (rangehi < (unsigned char) *sptr)
4507  rangehi = (unsigned char) *sptr;
4508  }
4509  /* If range includes any upper-case ASCII chars, make it include all */
4510  if (rangelo <= 'Z' && rangehi >= 'A')
4511  {
4512  if (rangelo > 'A')
4513  rangelo = 'A';
4514  if (rangehi < 'Z')
4515  rangehi = 'Z';
4516  }
4517  /* Ditto lower-case */
4518  if (rangelo <= 'z' && rangehi >= 'a')
4519  {
4520  if (rangelo > 'a')
4521  rangelo = 'a';
4522  if (rangehi < 'z')
4523  rangehi = 'z';
4524  }
4525  /* Ditto digits */
4526  if (rangelo <= '9' && rangehi >= '0')
4527  {
4528  if (rangelo > '0')
4529  rangelo = '0';
4530  if (rangehi < '9')
4531  rangehi = '9';
4532  }
4533 
4534  /*
4535  * If range includes less than 10 chars, assume we have not got enough
4536  * data, and make it include regular ASCII set.
4537  */
4538  if (rangehi - rangelo < 9)
4539  {
4540  rangelo = ' ';
4541  rangehi = 127;
4542  }
4543 
4544  /*
4545  * Now strip any common prefix of the three strings.
4546  */
4547  while (*lobound)
4548  {
4549  if (*lobound != *hibound || *lobound != *value)
4550  break;
4551  lobound++, hibound++, value++;
4552  }
4553 
4554  /*
4555  * Now we can do the conversions.
4556  */
4557  *scaledvalue = convert_one_string_to_scalar(value, rangelo, rangehi);
4558  *scaledlobound = convert_one_string_to_scalar(lobound, rangelo, rangehi);
4559  *scaledhibound = convert_one_string_to_scalar(hibound, rangelo, rangehi);
4560 }
static double convert_one_string_to_scalar(char *value, int rangelo, int rangehi)
Definition: selfuncs.c:4563

References convert_one_string_to_scalar(), and value.

Referenced by convert_to_scalar().

◆ convert_timevalue_to_scalar()

static double convert_timevalue_to_scalar ( Datum  value,
Oid  typid,
bool failure 
)
static

Definition at line 4786 of file selfuncs.c.

4787 {
4788  switch (typid)
4789  {
4790  case TIMESTAMPOID:
4791  return DatumGetTimestamp(value);
4792  case TIMESTAMPTZOID:
4793  return DatumGetTimestampTz(value);
4794  case DATEOID:
4796  case INTERVALOID:
4797  {
4799 
4800  /*
4801  * Convert the month part of Interval to days using assumed
4802  * average month length of 365.25/12.0 days. Not too
4803  * accurate, but plenty good enough for our purposes.
4804  */
4805  return interval->time + interval->day * (double) USECS_PER_DAY +
4807  }
4808  case TIMEOID:
4809  return DatumGetTimeADT(value);
4810  case TIMETZOID:
4811  {
4812  TimeTzADT *timetz = DatumGetTimeTzADTP(value);
4813 
4814  /* use GMT-equivalent time */
4815  return (double) (timetz->time + (timetz->zone * 1000000.0));
4816  }
4817  }
4818 
4819  *failure = true;
4820  return 0;
4821 }
#define MONTHS_PER_YEAR
Definition: timestamp.h:108
#define USECS_PER_DAY
Definition: timestamp.h:130
#define DAYS_PER_YEAR
Definition: timestamp.h:107
double date2timestamp_no_overflow(DateADT dateVal)
Definition: date.c:728
static DateADT DatumGetDateADT(Datum X)
Definition: date.h:54
static TimeADT DatumGetTimeADT(Datum X)
Definition: date.h:60
static TimeTzADT * DatumGetTimeTzADTP(Datum X)
Definition: date.h:66
Definition: date.h:28
TimeADT time
Definition: date.h:29
int32 zone
Definition: date.h:30
static Interval * DatumGetIntervalP(Datum X)
Definition: timestamp.h:40
static Timestamp DatumGetTimestamp(Datum X)
Definition: timestamp.h:28
static TimestampTz DatumGetTimestampTz(Datum X)
Definition: timestamp.h:34

References date2timestamp_no_overflow(), DatumGetDateADT(), DatumGetIntervalP(), DatumGetTimeADT(), DatumGetTimestamp(), DatumGetTimestampTz(), DatumGetTimeTzADTP(), DAYS_PER_YEAR, interval::month, MONTHS_PER_YEAR, TimeTzADT::time, interval::time, USECS_PER_DAY, value, and TimeTzADT::zone.

Referenced by convert_to_scalar().

◆ convert_to_scalar()

static bool convert_to_scalar ( Datum  value,
Oid  valuetypid,
Oid  collid,
double *  scaledvalue,
Datum  lobound,
Datum  hibound,
Oid  boundstypid,
double *  scaledlobound,
double *  scaledhibound 
)
static

Definition at line 4274 of file selfuncs.c.

4277 {
4278  bool failure = false;
4279 
4280  /*
4281  * Both the valuetypid and the boundstypid should exactly match the
4282  * declared input type(s) of the operator we are invoked for. However,
4283  * extensions might try to use scalarineqsel as estimator for operators
4284  * with input type(s) we don't handle here; in such cases, we want to
4285  * return false, not fail. In any case, we mustn't assume that valuetypid
4286  * and boundstypid are identical.
4287  *
4288  * XXX The histogram we are interpolating between points of could belong
4289  * to a column that's only binary-compatible with the declared type. In
4290  * essence we are assuming that the semantics of binary-compatible types
4291  * are enough alike that we can use a histogram generated with one type's
4292  * operators to estimate selectivity for the other's. This is outright
4293  * wrong in some cases --- in particular signed versus unsigned
4294  * interpretation could trip us up. But it's useful enough in the
4295  * majority of cases that we do it anyway. Should think about more
4296  * rigorous ways to do it.
4297  */
4298  switch (valuetypid)
4299  {
4300  /*
4301  * Built-in numeric types
4302  */
4303  case BOOLOID:
4304  case INT2OID:
4305  case INT4OID:
4306  case INT8OID:
4307  case FLOAT4OID:
4308  case FLOAT8OID:
4309  case NUMERICOID:
4310  case OIDOID:
4311  case REGPROCOID:
4312  case REGPROCEDUREOID:
4313  case REGOPEROID:
4314  case REGOPERATOROID:
4315  case REGCLASSOID:
4316  case REGTYPEOID:
4317  case REGCOLLATIONOID:
4318  case REGCONFIGOID:
4319  case REGDICTIONARYOID:
4320  case REGROLEOID:
4321  case REGNAMESPACEOID:
4322  *scaledvalue = convert_numeric_to_scalar(value, valuetypid,
4323  &failure);
4324  *scaledlobound = convert_numeric_to_scalar(lobound, boundstypid,
4325  &failure);
4326  *scaledhibound = convert_numeric_to_scalar(hibound, boundstypid,
4327  &failure);
4328  return !failure;
4329 
4330  /*
4331  * Built-in string types
4332  */
4333  case CHAROID:
4334  case BPCHAROID:
4335  case VARCHAROID:
4336  case TEXTOID:
4337  case NAMEOID:
4338  {
4339  char *valstr = convert_string_datum(value, valuetypid,
4340  collid, &failure);
4341  char *lostr = convert_string_datum(lobound, boundstypid,
4342  collid, &failure);
4343  char *histr = convert_string_datum(hibound, boundstypid,
4344  collid, &failure);
4345 
4346  /*
4347  * Bail out if any of the values is not of string type. We
4348  * might leak converted strings for the other value(s), but
4349  * that's not worth troubling over.
4350  */
4351  if (failure)
4352  return false;
4353 
4354  convert_string_to_scalar(valstr, scaledvalue,
4355  lostr, scaledlobound,
4356  histr, scaledhibound);
4357  pfree(valstr);
4358  pfree(lostr);
4359  pfree(histr);
4360  return true;
4361  }
4362 
4363  /*
4364  * Built-in bytea type
4365  */
4366  case BYTEAOID:
4367  {
4368  /* We only support bytea vs bytea comparison */
4369  if (boundstypid != BYTEAOID)
4370  return false;
4371  convert_bytea_to_scalar(value, scaledvalue,
4372  lobound, scaledlobound,
4373  hibound, scaledhibound);
4374  return true;
4375  }
4376 
4377  /*
4378  * Built-in time types
4379  */
4380  case TIMESTAMPOID:
4381  case TIMESTAMPTZOID:
4382  case DATEOID:
4383  case INTERVALOID:
4384  case TIMEOID:
4385  case TIMETZOID:
4386  *scaledvalue = convert_timevalue_to_scalar(value, valuetypid,
4387  &failure);
4388  *scaledlobound = convert_timevalue_to_scalar(lobound, boundstypid,
4389  &failure);
4390  *scaledhibound = convert_timevalue_to_scalar(hibound, boundstypid,
4391  &failure);
4392  return !failure;
4393 
4394  /*
4395  * Built-in network types
4396  */
4397  case INETOID:
4398  case CIDROID:
4399  case MACADDROID:
4400  case MACADDR8OID:
4401  *scaledvalue = convert_network_to_scalar(value, valuetypid,
4402  &failure);
4403  *scaledlobound = convert_network_to_scalar(lobound, boundstypid,
4404  &failure);
4405  *scaledhibound = convert_network_to_scalar(hibound, boundstypid,
4406  &failure);
4407  return !failure;
4408  }
4409  /* Don't know how to convert */
4410  *scaledvalue = *scaledlobound = *scaledhibound = 0;
4411  return false;
4412 }
double convert_network_to_scalar(Datum value, Oid typid, bool *failure)
Definition: network.c:1502
static void convert_string_to_scalar(char *value, double *scaledvalue, char *lobound, double *scaledlobound, char *hibound, double *scaledhibound)
Definition: selfuncs.c:4483
static double convert_timevalue_to_scalar(Datum value, Oid typid, bool *failure)
Definition: selfuncs.c:4786
static double convert_numeric_to_scalar(Datum value, Oid typid, bool *failure)
Definition: selfuncs.c:4421
static void convert_bytea_to_scalar(Datum value, double *scaledvalue, Datum lobound, double *scaledlobound, Datum hibound, double *scaledhibound)
Definition: selfuncs.c:4695
static char * convert_string_datum(Datum value, Oid typid, Oid collid, bool *failure)
Definition: selfuncs.c:4614

References convert_bytea_to_scalar(), convert_network_to_scalar(), convert_numeric_to_scalar(), convert_string_datum(), convert_string_to_scalar(), convert_timevalue_to_scalar(), pfree(), and value.

Referenced by ineq_histogram_selectivity().

◆ eqjoinsel()

Datum eqjoinsel ( PG_FUNCTION_ARGS  )

Definition at line 2238 of file selfuncs.c.

2239 {
2240  PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
2241  Oid operator = PG_GETARG_OID(1);
2242  List *args = (List *) PG_GETARG_POINTER(2);
2243 
2244 #ifdef NOT_USED
2245  JoinType jointype = (JoinType) PG_GETARG_INT16(3);
2246 #endif
2248  Oid collation = PG_GET_COLLATION();
2249  double selec;
2250  double selec_inner;
2251  VariableStatData vardata1;
2252  VariableStatData vardata2;
2253  double nd1;
2254  double nd2;
2255  bool isdefault1;
2256  bool isdefault2;
2257  Oid opfuncoid;
2258  AttStatsSlot sslot1;
2259  AttStatsSlot sslot2;
2260  Form_pg_statistic stats1 = NULL;
2261  Form_pg_statistic stats2 = NULL;
2262  bool have_mcvs1 = false;
2263  bool have_mcvs2 = false;
2264  bool get_mcv_stats;
2265  bool join_is_reversed;
2266  RelOptInfo *inner_rel;
2267 
2268  get_join_variables(root, args, sjinfo,
2269  &vardata1, &vardata2, &join_is_reversed);
2270 
2271  nd1 = get_variable_numdistinct(&vardata1, &isdefault1);
2272  nd2 = get_variable_numdistinct(&vardata2, &isdefault2);
2273 
2274  opfuncoid = get_opcode(operator);
2275 
2276  memset(&sslot1, 0, sizeof(sslot1));
2277  memset(&sslot2, 0, sizeof(sslot2));
2278 
2279  /*
2280  * There is no use in fetching one side's MCVs if we lack MCVs for the
2281  * other side, so do a quick check to verify that both stats exist.
2282  */
2283  get_mcv_stats = (HeapTupleIsValid(vardata1.statsTuple) &&
2284  HeapTupleIsValid(vardata2.statsTuple) &&
2285  get_attstatsslot(&sslot1, vardata1.statsTuple,
2286  STATISTIC_KIND_MCV, InvalidOid,
2287  0) &&
2288  get_attstatsslot(&sslot2, vardata2.statsTuple,
2289  STATISTIC_KIND_MCV, InvalidOid,
2290  0));
2291 
2292  if (HeapTupleIsValid(vardata1.statsTuple))
2293  {
2294  /* note we allow use of nullfrac regardless of security check */
2295  stats1 = (Form_pg_statistic) GETSTRUCT(vardata1.statsTuple);
2296  if (get_mcv_stats &&
2297  statistic_proc_security_check(&vardata1, opfuncoid))
2298  have_mcvs1 = get_attstatsslot(&sslot1, vardata1.statsTuple,
2299  STATISTIC_KIND_MCV, InvalidOid,
2301  }
2302 
2303  if (HeapTupleIsValid(vardata2.statsTuple))
2304  {
2305  /* note we allow use of nullfrac regardless of security check */
2306  stats2 = (Form_pg_statistic) GETSTRUCT(vardata2.statsTuple);
2307  if (get_mcv_stats &&
2308  statistic_proc_security_check(&vardata2, opfuncoid))
2309  have_mcvs2 = get_attstatsslot(&sslot2, vardata2.statsTuple,
2310  STATISTIC_KIND_MCV, InvalidOid,
2312  }
2313 
2314  /* We need to compute the inner-join selectivity in all cases */
2315  selec_inner = eqjoinsel_inner(opfuncoid, collation,
2316  &vardata1, &vardata2,
2317  nd1, nd2,
2318  isdefault1, isdefault2,
2319  &sslot1, &sslot2,
2320  stats1, stats2,
2321  have_mcvs1, have_mcvs2);
2322 
2323  switch (sjinfo->jointype)
2324  {
2325  case JOIN_INNER:
2326  case JOIN_LEFT:
2327  case JOIN_FULL:
2328  selec = selec_inner;
2329  break;
2330  case JOIN_SEMI:
2331  case JOIN_ANTI:
2332 
2333  /*
2334  * Look up the join's inner relation. min_righthand is sufficient
2335  * information because neither SEMI nor ANTI joins permit any
2336  * reassociation into or out of their RHS, so the righthand will
2337  * always be exactly that set of rels.
2338  */
2339  inner_rel = find_join_input_rel(root, sjinfo->min_righthand);
2340 
2341  if (!join_is_reversed)
2342  selec = eqjoinsel_semi(opfuncoid, collation,
2343  &vardata1, &vardata2,
2344  nd1, nd2,
2345  isdefault1, isdefault2,
2346  &sslot1, &sslot2,
2347  stats1, stats2,
2348  have_mcvs1, have_mcvs2,
2349  inner_rel);
2350  else
2351  {
2352  Oid commop = get_commutator(operator);
2353  Oid commopfuncoid = OidIsValid(commop) ? get_opcode(commop) : InvalidOid;
2354 
2355  selec = eqjoinsel_semi(commopfuncoid, collation,
2356  &vardata2, &vardata1,
2357  nd2, nd1,
2358  isdefault2, isdefault1,
2359  &sslot2, &sslot1,
2360  stats2, stats1,
2361  have_mcvs2, have_mcvs1,
2362  inner_rel);
2363  }
2364 
2365  /*
2366  * We should never estimate the output of a semijoin to be more
2367  * rows than we estimate for an inner join with the same input
2368  * rels and join condition; it's obviously impossible for that to
2369  * happen. The former estimate is N1 * Ssemi while the latter is
2370  * N1 * N2 * Sinner, so we may clamp Ssemi <= N2 * Sinner. Doing
2371  * this is worthwhile because of the shakier estimation rules we
2372  * use in eqjoinsel_semi, particularly in cases where it has to
2373  * punt entirely.
2374  */
2375  selec = Min(selec, inner_rel->rows * selec_inner);
2376  break;
2377  default:
2378  /* other values not expected here */
2379  elog(ERROR, "unrecognized join type: %d",
2380  (int) sjinfo->jointype);
2381  selec = 0; /* keep compiler quiet */
2382  break;
2383  }
2384 
2385  free_attstatsslot(&sslot1);
2386  free_attstatsslot(&sslot2);
2387 
2388  ReleaseVariableStats(vardata1);
2389  ReleaseVariableStats(vardata2);
2390 
2391  CLAMP_PROBABILITY(selec);
2392 
2393  PG_RETURN_FLOAT8((float8) selec);
2394 }
double float8
Definition: c.h:566
#define PG_GETARG_OID(n)
Definition: fmgr.h:275
#define PG_RETURN_FLOAT8(x)
Definition: fmgr.h:367
#define PG_GETARG_POINTER(n)
Definition: fmgr.h:276
#define PG_GET_COLLATION()
Definition: fmgr.h:198
#define PG_GETARG_INT16(n)
Definition: fmgr.h:271
RegProcedure get_opcode(Oid opno)
Definition: lsyscache.c:1267
Oid get_commutator(Oid opno)
Definition: lsyscache.c:1491
JoinType
Definition: nodes.h:288
@ JOIN_SEMI
Definition: nodes.h:307
@ JOIN_FULL
Definition: nodes.h:295
@ JOIN_LEFT
Definition: nodes.h:294
@ JOIN_ANTI
Definition: nodes.h:308
static RelOptInfo * find_join_input_rel(PlannerInfo *root, Relids relids)
Definition: selfuncs.c:6317
static double eqjoinsel_inner(Oid opfuncoid, Oid collation, VariableStatData *vardata1, VariableStatData *vardata2, double nd1, double nd2, bool isdefault1, bool isdefault2, AttStatsSlot *sslot1, AttStatsSlot *sslot2, Form_pg_statistic stats1, Form_pg_statistic stats2, bool have_mcvs1, bool have_mcvs2)
Definition: selfuncs.c:2403
static double eqjoinsel_semi(Oid opfuncoid, Oid collation, VariableStatData *vardata1, VariableStatData *vardata2, double nd1, double nd2, bool isdefault1, bool isdefault2, AttStatsSlot *sslot1, AttStatsSlot *sslot2, Form_pg_statistic stats1, Form_pg_statistic stats2, bool have_mcvs1, bool have_mcvs2, RelOptInfo *inner_rel)
Definition: selfuncs.c:2600
bool statistic_proc_security_check(VariableStatData *vardata, Oid func_oid)
Definition: selfuncs.c:5628
void get_join_variables(PlannerInfo *root, List *args, SpecialJoinInfo *sjinfo, VariableStatData *vardata1, VariableStatData *vardata2, bool *join_is_reversed)
Definition: selfuncs.c:4908
Cardinality rows
Definition: pathnodes.h:830
Relids min_righthand
Definition: pathnodes.h:2704
JoinType jointype
Definition: pathnodes.h:2707

References generate_unaccent_rules::args, ATTSTATSSLOT_NUMBERS, ATTSTATSSLOT_VALUES, CLAMP_PROBABILITY, elog(), eqjoinsel_inner(), eqjoinsel_semi(), ERROR, find_join_input_rel(), free_attstatsslot(), get_attstatsslot(), get_commutator(), get_join_variables(), get_opcode(), get_variable_numdistinct(), GETSTRUCT, HeapTupleIsValid, InvalidOid, JOIN_ANTI, JOIN_FULL, JOIN_INNER, JOIN_LEFT, JOIN_SEMI, SpecialJoinInfo::jointype, Min, SpecialJoinInfo::min_righthand, OidIsValid, PG_GET_COLLATION, PG_GETARG_INT16, PG_GETARG_OID, PG_GETARG_POINTER, PG_RETURN_FLOAT8, ReleaseVariableStats, RelOptInfo::rows, statistic_proc_security_check(), and VariableStatData::statsTuple.

Referenced by neqjoinsel().

◆ eqjoinsel_inner()

static double eqjoinsel_inner ( Oid  opfuncoid,
Oid  collation,
VariableStatData vardata1,
VariableStatData vardata2,
double  nd1,
double  nd2,
bool  isdefault1,
bool  isdefault2,
AttStatsSlot sslot1,
AttStatsSlot sslot2,
Form_pg_statistic  stats1,
Form_pg_statistic  stats2,
bool  have_mcvs1,
bool  have_mcvs2 
)
static

Definition at line 2403 of file selfuncs.c.

2410 {
2411  double selec;
2412 
2413  if (have_mcvs1 && have_mcvs2)
2414  {
2415  /*
2416  * We have most-common-value lists for both relations. Run through
2417  * the lists to see which MCVs actually join to each other with the
2418  * given operator. This allows us to determine the exact join
2419  * selectivity for the portion of the relations represented by the MCV
2420  * lists. We still have to estimate for the remaining population, but
2421  * in a skewed distribution this gives us a big leg up in accuracy.
2422  * For motivation see the analysis in Y. Ioannidis and S.
2423  * Christodoulakis, "On the propagation of errors in the size of join
2424  * results", Technical Report 1018, Computer Science Dept., University
2425  * of Wisconsin, Madison, March 1991 (available from ftp.cs.wisc.edu).
2426  */
2427  LOCAL_FCINFO(fcinfo, 2);
2428  FmgrInfo eqproc;
2429  bool *hasmatch1;
2430  bool *hasmatch2;
2431  double nullfrac1 = stats1->stanullfrac;
2432  double nullfrac2 = stats2->stanullfrac;
2433  double matchprodfreq,
2434  matchfreq1,
2435  matchfreq2,
2436  unmatchfreq1,
2437  unmatchfreq2,
2438  otherfreq1,
2439  otherfreq2,
2440  totalsel1,
2441  totalsel2;
2442  int i,
2443  nmatches;
2444 
2445  fmgr_info(opfuncoid, &eqproc);
2446 
2447  /*
2448  * Save a few cycles by setting up the fcinfo struct just once. Using
2449  * FunctionCallInvoke directly also avoids failure if the eqproc
2450  * returns NULL, though really equality functions should never do
2451  * that.
2452  */
2453  InitFunctionCallInfoData(*fcinfo, &eqproc, 2, collation,
2454  NULL, NULL);
2455  fcinfo->args[0].isnull = false;
2456  fcinfo->args[1].isnull = false;
2457 
2458  hasmatch1 = (bool *) palloc0(sslot1->nvalues * sizeof(bool));
2459  hasmatch2 = (bool *) palloc0(sslot2->nvalues * sizeof(bool));
2460 
2461  /*
2462  * Note we assume that each MCV will match at most one member of the
2463  * other MCV list. If the operator isn't really equality, there could
2464  * be multiple matches --- but we don't look for them, both for speed
2465  * and because the math wouldn't add up...
2466  */
2467  matchprodfreq = 0.0;
2468  nmatches = 0;
2469  for (i = 0; i < sslot1->nvalues; i++)
2470  {
2471  int j;
2472 
2473  fcinfo->args[0].value = sslot1->values[i];
2474 
2475  for (j = 0; j < sslot2->nvalues; j++)
2476  {
2477  Datum fresult;
2478 
2479  if (hasmatch2[j])
2480  continue;
2481  fcinfo->args[1].value = sslot2->values[j];
2482  fcinfo->isnull = false;
2483  fresult = FunctionCallInvoke(fcinfo);
2484  if (!fcinfo->isnull && DatumGetBool(fresult))
2485  {
2486  hasmatch1[i] = hasmatch2[j] = true;
2487  matchprodfreq += sslot1->numbers[i] * sslot2->numbers[j];
2488  nmatches++;
2489  break;
2490  }
2491  }
2492  }
2493  CLAMP_PROBABILITY(matchprodfreq);
2494  /* Sum up frequencies of matched and unmatched MCVs */
2495  matchfreq1 = unmatchfreq1 = 0.0;
2496  for (i = 0; i < sslot1->nvalues; i++)
2497  {
2498  if (hasmatch1[i])
2499  matchfreq1 += sslot1->numbers[i];
2500  else
2501  unmatchfreq1 += sslot1->numbers[i];
2502  }
2503  CLAMP_PROBABILITY(matchfreq1);
2504  CLAMP_PROBABILITY(unmatchfreq1);
2505  matchfreq2 = unmatchfreq2 = 0.0;
2506  for (i = 0; i < sslot2->nvalues; i++)
2507  {
2508  if (hasmatch2[i])
2509  matchfreq2 += sslot2->numbers[i];
2510  else
2511  unmatchfreq2 += sslot2->numbers[i];
2512  }
2513  CLAMP_PROBABILITY(matchfreq2);
2514  CLAMP_PROBABILITY(unmatchfreq2);
2515  pfree(hasmatch1);
2516  pfree(hasmatch2);
2517 
2518  /*
2519  * Compute total frequency of non-null values that are not in the MCV
2520  * lists.
2521  */
2522  otherfreq1 = 1.0 - nullfrac1 - matchfreq1 - unmatchfreq1;
2523  otherfreq2 = 1.0 - nullfrac2 - matchfreq2 - unmatchfreq2;
2524  CLAMP_PROBABILITY(otherfreq1);
2525  CLAMP_PROBABILITY(otherfreq2);
2526 
2527  /*
2528  * We can estimate the total selectivity from the point of view of
2529  * relation 1 as: the known selectivity for matched MCVs, plus
2530  * unmatched MCVs that are assumed to match against random members of
2531  * relation 2's non-MCV population, plus non-MCV values that are
2532  * assumed to match against random members of relation 2's unmatched
2533  * MCVs plus non-MCV values.
2534  */
2535  totalsel1 = matchprodfreq;
2536  if (nd2 > sslot2->nvalues)
2537  totalsel1 += unmatchfreq1 * otherfreq2 / (nd2 - sslot2->nvalues);
2538  if (nd2 > nmatches)
2539  totalsel1 += otherfreq1 * (otherfreq2 + unmatchfreq2) /
2540  (nd2 - nmatches);
2541  /* Same estimate from the point of view of relation 2. */
2542  totalsel2 = matchprodfreq;
2543  if (nd1 > sslot1->nvalues)
2544  totalsel2 += unmatchfreq2 * otherfreq1 / (nd1 - sslot1->nvalues);
2545  if (nd1 > nmatches)
2546  totalsel2 += otherfreq2 * (otherfreq1 + unmatchfreq1) /
2547  (nd1 - nmatches);
2548 
2549  /*
2550  * Use the smaller of the two estimates. This can be justified in
2551  * essentially the same terms as given below for the no-stats case: to
2552  * a first approximation, we are estimating from the point of view of
2553  * the relation with smaller nd.
2554  */
2555  selec = (totalsel1 < totalsel2) ? totalsel1 : totalsel2;
2556  }
2557  else
2558  {
2559  /*
2560  * We do not have MCV lists for both sides. Estimate the join
2561  * selectivity as MIN(1/nd1,1/nd2)*(1-nullfrac1)*(1-nullfrac2). This
2562  * is plausible if we assume that the join operator is strict and the
2563  * non-null values are about equally distributed: a given non-null
2564  * tuple of rel1 will join to either zero or N2*(1-nullfrac2)/nd2 rows
2565  * of rel2, so total join rows are at most
2566  * N1*(1-nullfrac1)*N2*(1-nullfrac2)/nd2 giving a join selectivity of
2567  * not more than (1-nullfrac1)*(1-nullfrac2)/nd2. By the same logic it
2568  * is not more than (1-nullfrac1)*(1-nullfrac2)/nd1, so the expression
2569  * with MIN() is an upper bound. Using the MIN() means we estimate
2570  * from the point of view of the relation with smaller nd (since the
2571  * larger nd is determining the MIN). It is reasonable to assume that
2572  * most tuples in this rel will have join partners, so the bound is
2573  * probably reasonably tight and should be taken as-is.
2574  *
2575  * XXX Can we be smarter if we have an MCV list for just one side? It
2576  * seems that if we assume equal distribution for the other side, we
2577  * end up with the same answer anyway.
2578  */
2579  double nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
2580  double nullfrac2 = stats2 ? stats2->stanullfrac : 0.0;
2581 
2582  selec = (1.0 - nullfrac1) * (1.0 - nullfrac2);
2583  if (nd1 > nd2)
2584  selec /= nd1;
2585  else
2586  selec /= nd2;
2587  }
2588 
2589  return selec;
2590 }
void fmgr_info(Oid functionId, FmgrInfo *finfo)
Definition: fmgr.c:127
#define InitFunctionCallInfoData(Fcinfo, Flinfo, Nargs, Collation, Context, Resultinfo)
Definition: fmgr.h:150
#define LOCAL_FCINFO(name, nargs)
Definition: fmgr.h:110
#define FunctionCallInvoke(fcinfo)
Definition: fmgr.h:172
int j
Definition: isn.c:74
void * palloc0(Size size)
Definition: mcxt.c:1230
uintptr_t Datum
Definition: postgres.h:412
Definition: fmgr.h:57

References CLAMP_PROBABILITY, DatumGetBool(), fmgr_info(), FunctionCallInvoke, i, InitFunctionCallInfoData, j, LOCAL_FCINFO, AttStatsSlot::numbers, AttStatsSlot::nvalues, palloc0(), pfree(), and AttStatsSlot::values.

Referenced by eqjoinsel().

◆ eqjoinsel_semi()

static double eqjoinsel_semi ( Oid  opfuncoid,
Oid  collation,
VariableStatData vardata1,
VariableStatData vardata2,
double  nd1,
double  nd2,
bool  isdefault1,
bool  isdefault2,
AttStatsSlot sslot1,
AttStatsSlot sslot2,
Form_pg_statistic  stats1,
Form_pg_statistic  stats2,
bool  have_mcvs1,
bool  have_mcvs2,
RelOptInfo inner_rel 
)
static

Definition at line 2600 of file selfuncs.c.

2608 {
2609  double selec;
2610 
2611  /*
2612  * We clamp nd2 to be not more than what we estimate the inner relation's
2613  * size to be. This is intuitively somewhat reasonable since obviously
2614  * there can't be more than that many distinct values coming from the
2615  * inner rel. The reason for the asymmetry (ie, that we don't clamp nd1
2616  * likewise) is that this is the only pathway by which restriction clauses
2617  * applied to the inner rel will affect the join result size estimate,
2618  * since set_joinrel_size_estimates will multiply SEMI/ANTI selectivity by
2619  * only the outer rel's size. If we clamped nd1 we'd be double-counting
2620  * the selectivity of outer-rel restrictions.
2621  *
2622  * We can apply this clamping both with respect to the base relation from
2623  * which the join variable comes (if there is just one), and to the
2624  * immediate inner input relation of the current join.
2625  *
2626  * If we clamp, we can treat nd2 as being a non-default estimate; it's not
2627  * great, maybe, but it didn't come out of nowhere either. This is most
2628  * helpful when the inner relation is empty and consequently has no stats.
2629  */
2630  if (vardata2->rel)
2631  {
2632  if (nd2 >= vardata2->rel->rows)
2633  {
2634  nd2 = vardata2->rel->rows;
2635  isdefault2 = false;
2636  }
2637  }
2638  if (nd2 >= inner_rel->rows)
2639  {
2640  nd2 = inner_rel->rows;
2641  isdefault2 = false;
2642  }
2643 
2644  if (have_mcvs1 && have_mcvs2 && OidIsValid(opfuncoid))
2645  {
2646  /*
2647  * We have most-common-value lists for both relations. Run through
2648  * the lists to see which MCVs actually join to each other with the
2649  * given operator. This allows us to determine the exact join
2650  * selectivity for the portion of the relations represented by the MCV
2651  * lists. We still have to estimate for the remaining population, but
2652  * in a skewed distribution this gives us a big leg up in accuracy.
2653  */
2654  LOCAL_FCINFO(fcinfo, 2);
2655  FmgrInfo eqproc;
2656  bool *hasmatch1;
2657  bool *hasmatch2;
2658  double nullfrac1 = stats1->stanullfrac;
2659  double matchfreq1,
2660  uncertainfrac,
2661  uncertain;
2662  int i,
2663  nmatches,
2664  clamped_nvalues2;
2665 
2666  /*
2667  * The clamping above could have resulted in nd2 being less than
2668  * sslot2->nvalues; in which case, we assume that precisely the nd2
2669  * most common values in the relation will appear in the join input,
2670  * and so compare to only the first nd2 members of the MCV list. Of
2671  * course this is frequently wrong, but it's the best bet we can make.
2672  */
2673  clamped_nvalues2 = Min(sslot2->nvalues, nd2);
2674 
2675  fmgr_info(opfuncoid, &eqproc);
2676 
2677  /*
2678  * Save a few cycles by setting up the fcinfo struct just once. Using
2679  * FunctionCallInvoke directly also avoids failure if the eqproc
2680  * returns NULL, though really equality functions should never do
2681  * that.
2682  */
2683  InitFunctionCallInfoData(*fcinfo, &eqproc, 2, collation,
2684  NULL, NULL);
2685  fcinfo->args[0].isnull = false;
2686  fcinfo->args[1].isnull = false;
2687 
2688  hasmatch1 = (bool *) palloc0(sslot1->nvalues * sizeof(bool));
2689  hasmatch2 = (bool *) palloc0(clamped_nvalues2 * sizeof(bool));
2690 
2691  /*
2692  * Note we assume that each MCV will match at most one member of the
2693  * other MCV list. If the operator isn't really equality, there could
2694  * be multiple matches --- but we don't look for them, both for speed
2695  * and because the math wouldn't add up...
2696  */
2697  nmatches = 0;
2698  for (i = 0; i < sslot1->nvalues; i++)
2699  {
2700  int j;
2701 
2702  fcinfo->args[0].value = sslot1->values[i];
2703 
2704  for (j = 0; j < clamped_nvalues2; j++)
2705  {
2706  Datum fresult;
2707 
2708  if (hasmatch2[j])
2709  continue;
2710  fcinfo->args[1].value = sslot2->values[j];
2711  fcinfo->isnull = false;
2712  fresult = FunctionCallInvoke(fcinfo);
2713  if (!fcinfo->isnull && DatumGetBool(fresult))
2714  {
2715  hasmatch1[i] = hasmatch2[j] = true;
2716  nmatches++;
2717  break;
2718  }
2719  }
2720  }
2721  /* Sum up frequencies of matched MCVs */
2722  matchfreq1 = 0.0;
2723  for (i = 0; i < sslot1->nvalues; i++)
2724  {
2725  if (hasmatch1[i])
2726  matchfreq1 += sslot1->numbers[i];
2727  }
2728  CLAMP_PROBABILITY(matchfreq1);
2729  pfree(hasmatch1);
2730  pfree(hasmatch2);
2731 
2732  /*
2733  * Now we need to estimate the fraction of relation 1 that has at
2734  * least one join partner. We know for certain that the matched MCVs
2735  * do, so that gives us a lower bound, but we're really in the dark
2736  * about everything else. Our crude approach is: if nd1 <= nd2 then
2737  * assume all non-null rel1 rows have join partners, else assume for
2738  * the uncertain rows that a fraction nd2/nd1 have join partners. We
2739  * can discount the known-matched MCVs from the distinct-values counts
2740  * before doing the division.
2741  *
2742  * Crude as the above is, it's completely useless if we don't have
2743  * reliable ndistinct values for both sides. Hence, if either nd1 or
2744  * nd2 is default, punt and assume half of the uncertain rows have
2745  * join partners.
2746  */
2747  if (!isdefault1 && !isdefault2)
2748  {
2749  nd1 -= nmatches;
2750  nd2 -= nmatches;
2751  if (nd1 <= nd2 || nd2 < 0)
2752  uncertainfrac = 1.0;
2753  else
2754  uncertainfrac = nd2 / nd1;
2755  }
2756  else
2757  uncertainfrac = 0.5;
2758  uncertain = 1.0 - matchfreq1 - nullfrac1;
2759  CLAMP_PROBABILITY(uncertain);
2760  selec = matchfreq1 + uncertainfrac * uncertain;
2761  }
2762  else
2763  {
2764  /*
2765  * Without MCV lists for both sides, we can only use the heuristic
2766  * about nd1 vs nd2.
2767  */
2768  double nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
2769 
2770  if (!isdefault1 && !isdefault2)
2771  {
2772  if (nd1 <= nd2 || nd2 < 0)
2773  selec = 1.0 - nullfrac1;
2774  else
2775  selec = (nd2 / nd1) * (1.0 - nullfrac1);
2776  }
2777  else
2778  selec = 0.5 * (1.0 - nullfrac1);
2779  }
2780 
2781  return selec;
2782 }

References CLAMP_PROBABILITY, DatumGetBool(), fmgr_info(), FunctionCallInvoke, i, InitFunctionCallInfoData, j, LOCAL_FCINFO, Min, AttStatsSlot::numbers, AttStatsSlot::nvalues, OidIsValid, palloc0(), pfree(), VariableStatData::rel, RelOptInfo::rows, and AttStatsSlot::values.

Referenced by eqjoinsel().

◆ eqsel()

Datum eqsel ( PG_FUNCTION_ARGS  )

Definition at line 225 of file selfuncs.c.

226 {
227  PG_RETURN_FLOAT8((float8) eqsel_internal(fcinfo, false));
228 }
static double eqsel_internal(PG_FUNCTION_ARGS, bool negate)
Definition: selfuncs.c:234

References eqsel_internal(), and PG_RETURN_FLOAT8.

◆ eqsel_internal()

static double eqsel_internal ( PG_FUNCTION_ARGS  ,
bool  negate 
)
static

Definition at line 234 of file selfuncs.c.

235 {
237  Oid operator = PG_GETARG_OID(1);
238  List *args = (List *) PG_GETARG_POINTER(2);
239  int varRelid = PG_GETARG_INT32(3);
240  Oid collation = PG_GET_COLLATION();
241  VariableStatData vardata;
242  Node *other;
243  bool varonleft;
244  double selec;
245 
246  /*
247  * When asked about <>, we do the estimation using the corresponding =
248  * operator, then convert to <> via "1.0 - eq_selectivity - nullfrac".
249  */
250  if (negate)
251  {
252  operator = get_negator(operator);
253  if (!OidIsValid(operator))
254  {
255  /* Use default selectivity (should we raise an error instead?) */
256  return 1.0 - DEFAULT_EQ_SEL;
257  }
258  }
259 
260  /*
261  * If expression is not variable = something or something = variable, then
262  * punt and return a default estimate.
263  */
264  if (!get_restriction_variable(root, args, varRelid,
265  &vardata, &other, &varonleft))
266  return negate ? (1.0 - DEFAULT_EQ_SEL) : DEFAULT_EQ_SEL;
267 
268  /*
269  * We can do a lot better if the something is a constant. (Note: the
270  * Const might result from estimation rather than being a simple constant
271  * in the query.)
272  */
273  if (IsA(other, Const))
274  selec = var_eq_const(&vardata, operator, collation,
275  ((Const *) other)->constvalue,
276  ((Const *) other)->constisnull,
277  varonleft, negate);
278  else
279  selec = var_eq_non_const(&vardata, operator, collation, other,
280  varonleft, negate);
281 
282  ReleaseVariableStats(vardata);
283 
284  return selec;
285 }
#define PG_GETARG_INT32(n)
Definition: fmgr.h:269
Oid get_negator(Oid opno)
Definition: lsyscache.c:1515
bool get_restriction_variable(PlannerInfo *root, List *args, int varRelid, VariableStatData *vardata, Node **other, bool *varonleft)
Definition: selfuncs.c:4848
double var_eq_non_const(VariableStatData *vardata, Oid oproid, Oid collation, Node *other, bool varonleft, bool negate)
Definition: selfuncs.c:464
#define DEFAULT_EQ_SEL
Definition: selfuncs.h:34

References generate_unaccent_rules::args, DEFAULT_EQ_SEL, get_negator(), get_restriction_variable(), IsA, OidIsValid, PG_GET_COLLATION, PG_GETARG_INT32, PG_GETARG_OID, PG_GETARG_POINTER, ReleaseVariableStats, var_eq_const(), and var_eq_non_const().

Referenced by eqsel(), and neqsel().

◆ estimate_array_length()

int estimate_array_length ( Node arrayexpr)

Definition at line 2133 of file selfuncs.c.

2134 {
2135  /* look through any binary-compatible relabeling of arrayexpr */
2136  arrayexpr = strip_array_coercion(arrayexpr);
2137 
2138  if (arrayexpr && IsA(arrayexpr, Const))
2139  {
2140  Datum arraydatum = ((Const *) arrayexpr)->constvalue;
2141  bool arrayisnull = ((Const *) arrayexpr)->constisnull;
2142  ArrayType *arrayval;
2143 
2144  if (arrayisnull)
2145  return 0;
2146  arrayval = DatumGetArrayTypeP(arraydatum);
2147  return ArrayGetNItems(ARR_NDIM(arrayval), ARR_DIMS(arrayval));
2148  }
2149  else if (arrayexpr && IsA(arrayexpr, ArrayExpr) &&
2150  !((ArrayExpr *) arrayexpr)->multidims)
2151  {
2152  return list_length(((ArrayExpr *) arrayexpr)->elements);
2153  }
2154  else
2155  {
2156  /* default guess --- see also scalararraysel */
2157  return 10;
2158  }
2159 }
#define ARR_NDIM(a)
Definition: array.h:283
#define DatumGetArrayTypeP(X)
Definition: array.h:254
#define ARR_DIMS(a)
Definition: array.h:287
int ArrayGetNItems(int ndim, const int *dims)
Definition: arrayutils.c:76
static int list_length(const List *l)
Definition: pg_list.h:150
static Node * strip_array_coercion(Node *node)
Definition: selfuncs.c:1781

References ARR_DIMS, ARR_NDIM, ArrayGetNItems(), DatumGetArrayTypeP, IsA, list_length(), and strip_array_coercion().

Referenced by array_unnest_support(), btcostestimate(), cost_qual_eval_walker(), cost_tidscan(), genericcostestimate(), and gincost_scalararrayopexpr().

◆ estimate_hash_bucket_stats()

void estimate_hash_bucket_stats ( PlannerInfo root,
Node hashkey,
double  nbuckets,
Selectivity mcv_freq,
Selectivity bucketsize_frac 
)

Definition at line 3767 of file selfuncs.c.

3770 {
3771  VariableStatData vardata;
3772  double estfract,
3773  ndistinct,
3774  stanullfrac,
3775  avgfreq;
3776  bool isdefault;
3777  AttStatsSlot sslot;
3778 
3779  examine_variable(root, hashkey, 0, &vardata);
3780 
3781  /* Look up the frequency of the most common value, if available */
3782  *mcv_freq = 0.0;
3783 
3784  if (HeapTupleIsValid(vardata.statsTuple))
3785  {
3786  if (get_attstatsslot(&sslot, vardata.statsTuple,
3787  STATISTIC_KIND_MCV, InvalidOid,
3789  {
3790  /*
3791  * The first MCV stat is for the most common value.
3792  */
3793  if (sslot.nnumbers > 0)
3794  *mcv_freq = sslot.numbers[0];
3795  free_attstatsslot(&sslot);
3796  }
3797  }
3798 
3799  /* Get number of distinct values */
3800  ndistinct = get_variable_numdistinct(&vardata, &isdefault);
3801 
3802  /*
3803  * If ndistinct isn't real, punt. We normally return 0.1, but if the
3804  * mcv_freq is known to be even higher than that, use it instead.
3805  */
3806  if (isdefault)
3807  {
3808  *bucketsize_frac = (Selectivity) Max(0.1, *mcv_freq);
3809  ReleaseVariableStats(vardata);
3810  return;
3811  }
3812 
3813  /* Get fraction that are null */
3814  if (HeapTupleIsValid(vardata.statsTuple))
3815  {
3816  Form_pg_statistic stats;
3817 
3818  stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
3819  stanullfrac = stats->stanullfrac;
3820  }
3821  else
3822  stanullfrac = 0.0;
3823 
3824  /* Compute avg freq of all distinct data values in raw relation */
3825  avgfreq = (1.0 - stanullfrac) / ndistinct;
3826 
3827  /*
3828  * Adjust ndistinct to account for restriction clauses. Observe we are
3829  * assuming that the data distribution is affected uniformly by the
3830  * restriction clauses!
3831  *
3832  * XXX Possibly better way, but much more expensive: multiply by
3833  * selectivity of rel's restriction clauses that mention the target Var.
3834  */
3835  if (vardata.rel && vardata.rel->tuples > 0)
3836  {
3837  ndistinct *= vardata.rel->rows / vardata.rel->tuples;
3838  ndistinct = clamp_row_est(ndistinct);
3839  }
3840 
3841  /*
3842  * Initial estimate of bucketsize fraction is 1/nbuckets as long as the
3843  * number of buckets is less than the expected number of distinct values;
3844  * otherwise it is 1/ndistinct.
3845  */
3846  if (ndistinct > nbuckets)
3847  estfract = 1.0 / nbuckets;
3848  else
3849  estfract = 1.0 / ndistinct;
3850 
3851  /*
3852  * Adjust estimated bucketsize upward to account for skewed distribution.
3853  */
3854  if (avgfreq > 0.0 && *mcv_freq > avgfreq)
3855  estfract *= *mcv_freq / avgfreq;
3856 
3857  /*
3858  * Clamp bucketsize to sane range (the above adjustment could easily
3859  * produce an out-of-range result). We set the lower bound a little above
3860  * zero, since zero isn't a very sane result.
3861  */
3862  if (estfract < 1.0e-6)
3863  estfract = 1.0e-6;
3864  else if (estfract > 1.0)
3865  estfract = 1.0;
3866 
3867  *bucketsize_frac = (Selectivity) estfract;
3868 
3869  ReleaseVariableStats(vardata);
3870 }
double clamp_row_est(double nrows)
Definition: costsize.c:201
Cardinality tuples
Definition: pathnodes.h:894

References ATTSTATSSLOT_NUMBERS, clamp_row_est(), examine_variable(), free_attstatsslot(), get_attstatsslot(), get_variable_numdistinct(), GETSTRUCT, HeapTupleIsValid, InvalidOid, Max, AttStatsSlot::nnumbers, AttStatsSlot::numbers, VariableStatData::rel, ReleaseVariableStats, RelOptInfo::rows, VariableStatData::statsTuple, and RelOptInfo::tuples.

Referenced by final_cost_hashjoin().

◆ estimate_hashagg_tablesize()

double estimate_hashagg_tablesize ( PlannerInfo root,
Path path,
const AggClauseCosts agg_costs,
double  dNumGroups 
)

Definition at line 3886 of file selfuncs.c.

3888 {
3889  Size hashentrysize;
3890 
3891  hashentrysize = hash_agg_entry_size(list_length(root->aggtransinfos),
3892  path->pathtarget->width,
3893  agg_costs->transitionSpace);
3894 
3895  /*
3896  * Note that this disregards the effect of fill-factor and growth policy
3897  * of the hash table. That's probably ok, given that the default
3898  * fill-factor is relatively high. It'd be hard to meaningfully factor in
3899  * "double-in-size" growth policies here.
3900  */
3901  return hashentrysize * dNumGroups;
3902 }
size_t Size
Definition: c.h:541
Size hash_agg_entry_size(int numTrans, Size tupleWidth, Size transitionSpace)
Definition: nodeAgg.c:1686
Size transitionSpace
Definition: pathnodes.h:62
List * aggtransinfos
Definition: pathnodes.h:474

References PlannerInfo::aggtransinfos, hash_agg_entry_size(), list_length(), and AggClauseCosts::transitionSpace.

Referenced by consider_groupingsets_paths().

◆ estimate_multivariate_ndistinct()

static bool estimate_multivariate_ndistinct ( PlannerInfo root,
RelOptInfo rel,
List **  varinfos,
double *  ndistinct 
)
static

Definition at line 3923 of file selfuncs.c.

3925 {
3926  ListCell *lc;
3927  int nmatches_vars;
3928  int nmatches_exprs;
3929  Oid statOid = InvalidOid;
3930  MVNDistinct *stats;
3931  StatisticExtInfo *matched_info = NULL;
3932  RangeTblEntry *rte = planner_rt_fetch(rel->relid, root);
3933 
3934  /* bail out immediately if the table has no extended statistics */
3935  if (!rel->statlist)
3936  return false;
3937 
3938  /* look for the ndistinct statistics object matching the most vars */
3939  nmatches_vars = 0; /* we require at least two matches */
3940  nmatches_exprs = 0;
3941  foreach(lc, rel->statlist)
3942  {
3943  ListCell *lc2;
3944  StatisticExtInfo *info = (StatisticExtInfo *) lfirst(lc);
3945  int nshared_vars = 0;
3946  int nshared_exprs = 0;
3947 
3948  /* skip statistics of other kinds */
3949  if (info->kind != STATS_EXT_NDISTINCT)
3950  continue;
3951 
3952  /* skip statistics with mismatching stxdinherit value */
3953  if (info->inherit != rte->inh)
3954  continue;
3955 
3956  /*
3957  * Determine how many expressions (and variables in non-matched
3958  * expressions) match. We'll then use these numbers to pick the
3959  * statistics object that best matches the clauses.
3960  */
3961  foreach(lc2, *varinfos)
3962  {
3963  ListCell *lc3;
3964  GroupVarInfo *varinfo = (GroupVarInfo *) lfirst(lc2);
3966 
3967  Assert(varinfo->rel == rel);
3968 
3969  /* simple Var, search in statistics keys directly */
3970  if (IsA(varinfo->var, Var))
3971  {
3972  attnum = ((Var *) varinfo->var)->varattno;
3973 
3974  /*
3975  * Ignore system attributes - we don't support statistics on
3976  * them, so can't match them (and it'd fail as the values are
3977  * negative).
3978  */
3980  continue;
3981 
3982  if (bms_is_member(attnum, info->keys))
3983  nshared_vars++;
3984 
3985  continue;
3986  }
3987 
3988  /* expression - see if it's in the statistics object */
3989  foreach(lc3, info->exprs)
3990  {
3991  Node *expr = (Node *) lfirst(lc3);
3992 
3993  if (equal(varinfo->var, expr))
3994  {
3995  nshared_exprs++;
3996  break;
3997  }
3998  }
3999  }
4000 
4001  if (nshared_vars + nshared_exprs < 2)
4002  continue;
4003 
4004  /*
4005  * Does this statistics object match more columns than the currently
4006  * best object? If so, use this one instead.
4007  *
4008  * XXX This should break ties using name of the object, or something
4009  * like that, to make the outcome stable.
4010  */
4011  if ((nshared_exprs > nmatches_exprs) ||
4012  (((nshared_exprs == nmatches_exprs)) && (nshared_vars > nmatches_vars)))
4013  {
4014  statOid = info->statOid;
4015  nmatches_vars = nshared_vars;
4016  nmatches_exprs = nshared_exprs;
4017  matched_info = info;
4018  }
4019  }
4020 
4021  /* No match? */
4022  if (statOid == InvalidOid)
4023  return false;
4024 
4025  Assert(nmatches_vars + nmatches_exprs > 1);
4026 
4027  stats = statext_ndistinct_load(statOid, rte->inh);
4028 
4029  /*
4030  * If we have a match, search it for the specific item that matches (there
4031  * must be one), and construct the output values.
4032  */
4033  if (stats)
4034  {
4035  int i;
4036  List *newlist = NIL;
4037  MVNDistinctItem *item = NULL;
4038  ListCell *lc2;
4039  Bitmapset *matched = NULL;
4040  AttrNumber attnum_offset;
4041 
4042  /*
4043  * How much we need to offset the attnums? If there are no
4044  * expressions, no offset is needed. Otherwise offset enough to move
4045  * the lowest one (which is equal to number of expressions) to 1.
4046  */
4047  if (matched_info->exprs)
4048  attnum_offset = (list_length(matched_info->exprs) + 1);
4049  else
4050  attnum_offset = 0;
4051 
4052  /* see what actually matched */
4053  foreach(lc2, *varinfos)
4054  {
4055  ListCell *lc3;
4056  int idx;
4057  bool found = false;
4058 
4059  GroupVarInfo *varinfo = (GroupVarInfo *) lfirst(lc2);
4060 
4061  /*
4062  * Process a simple Var expression, by matching it to keys
4063  * directly. If there's a matching expression, we'll try matching
4064  * it later.
4065  */
4066  if (IsA(varinfo->var, Var))
4067  {
4068  AttrNumber attnum = ((Var *) varinfo->var)->varattno;
4069 
4070  /*
4071  * Ignore expressions on system attributes. Can't rely on the
4072  * bms check for negative values.
4073  */
4075  continue;
4076 
4077  /* Is the variable covered by the statistics object? */
4078  if (!bms_is_member(attnum, matched_info->keys))
4079  continue;
4080 
4081  attnum = attnum + attnum_offset;
4082 
4083  /* ensure sufficient offset */
4085 
4086  matched = bms_add_member(matched, attnum);
4087 
4088  found = true;
4089  }
4090 
4091  /*
4092  * XXX Maybe we should allow searching the expressions even if we
4093  * found an attribute matching the expression? That would handle
4094  * trivial expressions like "(a)" but it seems fairly useless.
4095  */
4096  if (found)
4097  continue;
4098 
4099  /* expression - see if it's in the statistics object */
4100  idx = 0;
4101  foreach(lc3, matched_info->exprs)
4102  {
4103  Node *expr = (Node *) lfirst(lc3);
4104 
4105  if (equal(varinfo->var, expr))
4106  {
4107  AttrNumber attnum = -(idx + 1);
4108 
4109  attnum = attnum + attnum_offset;
4110 
4111  /* ensure sufficient offset */
4113 
4114  matched = bms_add_member(matched, attnum);
4115 
4116  /* there should be just one matching expression */
4117  break;
4118  }
4119 
4120  idx++;
4121  }
4122  }
4123 
4124  /* Find the specific item that exactly matches the combination */
4125  for (i = 0; i < stats->nitems; i++)
4126  {
4127  int j;
4128  MVNDistinctItem *tmpitem = &stats->items[i];
4129 
4130  if (tmpitem->nattributes != bms_num_members(matched))
4131  continue;
4132 
4133  /* assume it's the right item */
4134  item = tmpitem;
4135 
4136  /* check that all item attributes/expressions fit the match */
4137  for (j = 0; j < tmpitem->nattributes; j++)
4138  {
4139  AttrNumber attnum = tmpitem->attributes[j];
4140 
4141  /*
4142  * Thanks to how we constructed the matched bitmap above, we
4143  * can just offset all attnums the same way.
4144  */
4145  attnum = attnum + attnum_offset;
4146 
4147  if (!bms_is_member(attnum, matched))
4148  {
4149  /* nah, it's not this item */
4150  item = NULL;
4151  break;
4152  }
4153  }
4154 
4155  /*
4156  * If the item has all the matched attributes, we know it's the
4157  * right one - there can't be a better one. matching more.
4158  */
4159  if (item)
4160  break;
4161  }
4162 
4163  /*
4164  * Make sure we found an item. There has to be one, because ndistinct
4165  * statistics includes all combinations of attributes.
4166  */
4167  if (!item)
4168  elog(ERROR, "corrupt MVNDistinct entry");
4169 
4170  /* Form the output varinfo list, keeping only unmatched ones */
4171  foreach(lc, *varinfos)
4172  {
4173  GroupVarInfo *varinfo = (GroupVarInfo *) lfirst(lc);
4174  ListCell *lc3;
4175  bool found = false;
4176 
4177  /*
4178  * Let's look at plain variables first, because it's the most
4179  * common case and the check is quite cheap. We can simply get the
4180  * attnum and check (with an offset) matched bitmap.
4181  */
4182  if (IsA(varinfo->var, Var))
4183  {
4184  AttrNumber attnum = ((Var *) varinfo->var)->varattno;
4185 
4186  /*
4187  * If it's a system attribute, we're done. We don't support
4188  * extended statistics on system attributes, so it's clearly
4189  * not matched. Just keep the expression and continue.
4190  */
4192  {
4193  newlist = lappend(newlist, varinfo);
4194  continue;
4195  }
4196 
4197  /* apply the same offset as above */
4198  attnum += attnum_offset;
4199 
4200  /* if it's not matched, keep the varinfo */
4201  if (!bms_is_member(attnum, matched))
4202  newlist = lappend(newlist, varinfo);
4203 
4204  /* The rest of the loop deals with complex expressions. */
4205  continue;
4206  }
4207 
4208  /*
4209  * Process complex expressions, not just simple Vars.
4210  *
4211  * First, we search for an exact match of an expression. If we
4212  * find one, we can just discard the whole GroupExprInfo, with all
4213  * the variables we extracted from it.
4214  *
4215  * Otherwise we inspect the individual vars, and try matching it
4216  * to variables in the item.
4217  */
4218  foreach(lc3, matched_info->exprs)
4219  {
4220  Node *expr = (Node *) lfirst(lc3);
4221 
4222  if (equal(varinfo->var, expr))
4223  {
4224  found = true;
4225  break;
4226  }
4227  }
4228 
4229  /* found exact match, skip */
4230  if (found)
4231  continue;
4232 
4233  newlist = lappend(newlist, varinfo);
4234  }
4235 
4236  *varinfos = newlist;
4237  *ndistinct = item->ndistinct;
4238  return true;
4239  }
4240 
4241  return false;
4242 }
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:259
#define AttrNumberIsForUserDefinedAttr(attributeNumber)
Definition: attnum.h:41
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:649
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:428
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:739
MVNDistinct * statext_ndistinct_load(Oid mvoid, bool inh)
Definition: mvdistinct.c:149
double ndistinct
Definition: statistics.h:28
AttrNumber * attributes
Definition: statistics.h:30
uint32 nitems
Definition: statistics.h:38
MVNDistinctItem items[FLEXIBLE_ARRAY_MEMBER]
Definition: statistics.h:39
List * statlist
Definition: pathnodes.h:891
Bitmapset * keys
Definition: pathnodes.h:1237
Definition: primnodes.h:205

References Assert(), attnum, MVNDistinctItem::attributes, AttrNumberIsForUserDefinedAttr, bms_add_member(), bms_is_member(), bms_num_members(), elog(), equal(), ERROR, StatisticExtInfo::exprs, i, idx(), RangeTblEntry::inh, StatisticExtInfo::inherit, InvalidOid, IsA, MVNDistinct::items, j, StatisticExtInfo::keys, StatisticExtInfo::kind, lappend(), lfirst, list_length(), MVNDistinctItem::nattributes, MVNDistinctItem::ndistinct, NIL, MVNDistinct::nitems, planner_rt_fetch, GroupVarInfo::rel, RelOptInfo::relid, statext_ndistinct_load(), RelOptInfo::statlist, StatisticExtInfo::statOid, and GroupVarInfo::var.

Referenced by estimate_num_groups().

◆ estimate_num_groups()

double estimate_num_groups ( PlannerInfo root,
List groupExprs,
double  input_rows,
List **  pgset,
EstimationInfo estinfo 
)

Definition at line 3385 of file selfuncs.c.

3387 {
3388  List *varinfos = NIL;
3389  double srf_multiplier = 1.0;
3390  double numdistinct;
3391  ListCell *l;
3392  int i;
3393 
3394  /* Zero the estinfo output parameter, if non-NULL */
3395  if (estinfo != NULL)
3396  memset(estinfo, 0, sizeof(EstimationInfo));
3397 
3398  /*
3399  * We don't ever want to return an estimate of zero groups, as that tends
3400  * to lead to division-by-zero and other unpleasantness. The input_rows
3401  * estimate is usually already at least 1, but clamp it just in case it
3402  * isn't.
3403  */
3404  input_rows = clamp_row_est(input_rows);
3405 
3406  /*
3407  * If no grouping columns, there's exactly one group. (This can't happen
3408  * for normal cases with GROUP BY or DISTINCT, but it is possible for
3409  * corner cases with set operations.)
3410  */
3411  if (groupExprs == NIL || (pgset && *pgset == NIL))
3412  return 1.0;
3413 
3414  /*
3415  * Count groups derived from boolean grouping expressions. For other
3416  * expressions, find the unique Vars used, treating an expression as a Var
3417  * if we can find stats for it. For each one, record the statistical
3418  * estimate of number of distinct values (total in its table, without
3419  * regard for filtering).
3420  */
3421  numdistinct = 1.0;
3422 
3423  i = 0;
3424  foreach(l, groupExprs)
3425  {
3426  Node *groupexpr = (Node *) lfirst(l);
3427  double this_srf_multiplier;
3428  VariableStatData vardata;
3429  List *varshere;
3430  ListCell *l2;
3431 
3432  /* is expression in this grouping set? */
3433  if (pgset && !list_member_int(*pgset, i++))
3434  continue;
3435 
3436  /*
3437  * Set-returning functions in grouping columns are a bit problematic.
3438  * The code below will effectively ignore their SRF nature and come up
3439  * with a numdistinct estimate as though they were scalar functions.
3440  * We compensate by scaling up the end result by the largest SRF
3441  * rowcount estimate. (This will be an overestimate if the SRF
3442  * produces multiple copies of any output value, but it seems best to
3443  * assume the SRF's outputs are distinct. In any case, it's probably
3444  * pointless to worry too much about this without much better
3445  * estimates for SRF output rowcounts than we have today.)
3446  */
3447  this_srf_multiplier = expression_returns_set_rows(root, groupexpr);
3448  if (srf_multiplier < this_srf_multiplier)
3449  srf_multiplier = this_srf_multiplier;
3450 
3451  /* Short-circuit for expressions returning boolean */
3452  if (exprType(groupexpr) == BOOLOID)
3453  {
3454  numdistinct *= 2.0;
3455  continue;
3456  }
3457 
3458  /*
3459  * If examine_variable is able to deduce anything about the GROUP BY
3460  * expression, treat it as a single variable even if it's really more
3461  * complicated.
3462  *
3463  * XXX This has the consequence that if there's a statistics object on
3464  * the expression, we don't split it into individual Vars. This
3465  * affects our selection of statistics in
3466  * estimate_multivariate_ndistinct, because it's probably better to
3467  * use more accurate estimate for each expression and treat them as
3468  * independent, than to combine estimates for the extracted variables
3469  * when we don't know how that relates to the expressions.
3470  */
3471  examine_variable(root, groupexpr, 0, &vardata);
3472  if (HeapTupleIsValid(vardata.statsTuple) || vardata.isunique)
3473  {
3474  varinfos = add_unique_group_var(root, varinfos,
3475  groupexpr, &vardata);
3476  ReleaseVariableStats(vardata);
3477  continue;
3478  }
3479  ReleaseVariableStats(vardata);
3480 
3481  /*
3482  * Else pull out the component Vars. Handle PlaceHolderVars by
3483  * recursing into their arguments (effectively assuming that the
3484  * PlaceHolderVar doesn't change the number of groups, which boils
3485  * down to ignoring the possible addition of nulls to the result set).
3486  */
3487  varshere = pull_var_clause(groupexpr,
3491 
3492  /*
3493  * If we find any variable-free GROUP BY item, then either it is a
3494  * constant (and we can ignore it) or it contains a volatile function;
3495  * in the latter case we punt and assume that each input row will
3496  * yield a distinct group.
3497  */
3498  if (varshere == NIL)
3499  {
3500  if (contain_volatile_functions(groupexpr))
3501  return input_rows;
3502  continue;
3503  }
3504 
3505  /*
3506  * Else add variables to varinfos list
3507  */
3508  foreach(l2, varshere)
3509  {
3510  Node *var = (Node *) lfirst(l2);
3511 
3512  examine_variable(root, var, 0, &vardata);
3513  varinfos = add_unique_group_var(root, varinfos, var, &vardata);
3514  ReleaseVariableStats(vardata);
3515  }
3516  }
3517 
3518  /*
3519  * If now no Vars, we must have an all-constant or all-boolean GROUP BY
3520  * list.
3521  */
3522  if (varinfos == NIL)
3523  {
3524  /* Apply SRF multiplier as we would do in the long path */
3525  numdistinct *= srf_multiplier;
3526  /* Round off */
3527  numdistinct = ceil(numdistinct);
3528  /* Guard against out-of-range answers */
3529  if (numdistinct > input_rows)
3530  numdistinct = input_rows;
3531  if (numdistinct < 1.0)
3532  numdistinct = 1.0;
3533  return numdistinct;
3534  }
3535 
3536  /*
3537  * Group Vars by relation and estimate total numdistinct.
3538  *
3539  * For each iteration of the outer loop, we process the frontmost Var in
3540  * varinfos, plus all other Vars in the same relation. We remove these
3541  * Vars from the newvarinfos list for the next iteration. This is the
3542  * easiest way to group Vars of same rel together.
3543  */
3544  do
3545  {
3546  GroupVarInfo *varinfo1 = (GroupVarInfo *) linitial(varinfos);
3547  RelOptInfo *rel = varinfo1->rel;
3548  double reldistinct = 1;
3549  double relmaxndistinct = reldistinct;
3550  int relvarcount = 0;
3551  List *newvarinfos = NIL;
3552  List *relvarinfos = NIL;
3553 
3554  /*
3555  * Split the list of varinfos in two - one for the current rel, one
3556  * for remaining Vars on other rels.
3557  */
3558  relvarinfos = lappend(relvarinfos, varinfo1);
3559  for_each_from(l, varinfos, 1)
3560  {
3561  GroupVarInfo *varinfo2 = (GroupVarInfo *) lfirst(l);
3562 
3563  if (varinfo2->rel == varinfo1->rel)
3564  {
3565  /* varinfos on current rel */
3566  relvarinfos = lappend(relvarinfos, varinfo2);
3567  }
3568  else
3569  {
3570  /* not time to process varinfo2 yet */
3571  newvarinfos = lappend(newvarinfos, varinfo2);
3572  }
3573  }
3574 
3575  /*
3576  * Get the numdistinct estimate for the Vars of this rel. We
3577  * iteratively search for multivariate n-distinct with maximum number
3578  * of vars; assuming that each var group is independent of the others,
3579  * we multiply them together. Any remaining relvarinfos after no more
3580  * multivariate matches are found are assumed independent too, so
3581  * their individual ndistinct estimates are multiplied also.
3582  *
3583  * While iterating, count how many separate numdistinct values we
3584  * apply. We apply a fudge factor below, but only if we multiplied
3585  * more than one such values.
3586  */
3587  while (relvarinfos)
3588  {
3589  double mvndistinct;
3590 
3591  if (estimate_multivariate_ndistinct(root, rel, &relvarinfos,
3592  &mvndistinct))
3593  {
3594  reldistinct *= mvndistinct;
3595  if (relmaxndistinct < mvndistinct)
3596  relmaxndistinct = mvndistinct;
3597  relvarcount++;
3598  }
3599  else
3600  {
3601  foreach(l, relvarinfos)
3602  {
3603  GroupVarInfo *varinfo2 = (GroupVarInfo *) lfirst(l);
3604 
3605  reldistinct *= varinfo2->ndistinct;
3606  if (relmaxndistinct < varinfo2->ndistinct)
3607  relmaxndistinct = varinfo2->ndistinct;
3608  relvarcount++;
3609 
3610  /*
3611  * When varinfo2's isdefault is set then we'd better set
3612  * the SELFLAG_USED_DEFAULT bit in the EstimationInfo.
3613  */
3614  if (estinfo != NULL && varinfo2->isdefault)
3615  estinfo->flags |= SELFLAG_USED_DEFAULT;
3616  }
3617 
3618  /* we're done with this relation */
3619  relvarinfos = NIL;
3620  }
3621  }
3622 
3623  /*
3624  * Sanity check --- don't divide by zero if empty relation.
3625  */
3626  Assert(IS_SIMPLE_REL(rel));
3627  if (rel->tuples > 0)
3628  {
3629  /*
3630  * Clamp to size of rel, or size of rel / 10 if multiple Vars. The
3631  * fudge factor is because the Vars are probably correlated but we
3632  * don't know by how much. We should never clamp to less than the
3633  * largest ndistinct value for any of the Vars, though, since
3634  * there will surely be at least that many groups.
3635  */
3636  double clamp = rel->tuples;
3637 
3638  if (relvarcount > 1)
3639  {
3640  clamp *= 0.1;
3641  if (clamp < relmaxndistinct)
3642  {
3643  clamp = relmaxndistinct;
3644  /* for sanity in case some ndistinct is too large: */
3645  if (clamp > rel->tuples)
3646  clamp = rel->tuples;
3647  }
3648  }
3649  if (reldistinct > clamp)
3650  reldistinct = clamp;
3651 
3652  /*
3653  * Update the estimate based on the restriction selectivity,
3654  * guarding against division by zero when reldistinct is zero.
3655  * Also skip this if we know that we are returning all rows.
3656  */
3657  if (reldistinct > 0 && rel->rows < rel->tuples)
3658  {
3659  /*
3660  * Given a table containing N rows with n distinct values in a
3661  * uniform distribution, if we select p rows at random then
3662  * the expected number of distinct values selected is
3663  *
3664  * n * (1 - product((N-N/n-i)/(N-i), i=0..p-1))
3665  *
3666  * = n * (1 - (N-N/n)! / (N-N/n-p)! * (N-p)! / N!)
3667  *
3668  * See "Approximating block accesses in database
3669  * organizations", S. B. Yao, Communications of the ACM,
3670  * Volume 20 Issue 4, April 1977 Pages 260-261.
3671  *
3672  * Alternatively, re-arranging the terms from the factorials,
3673  * this may be written as
3674  *
3675  * n * (1 - product((N-p-i)/(N-i), i=0..N/n-1))
3676  *
3677  * This form of the formula is more efficient to compute in
3678  * the common case where p is larger than N/n. Additionally,
3679  * as pointed out by Dell'Era, if i << N for all terms in the
3680  * product, it can be approximated by
3681  *
3682  * n * (1 - ((N-p)/N)^(N/n))
3683  *
3684  * See "Expected distinct values when selecting from a bag
3685  * without replacement", Alberto Dell'Era,
3686  * http://www.adellera.it/investigations/distinct_balls/.
3687  *
3688  * The condition i << N is equivalent to n >> 1, so this is a
3689  * good approximation when the number of distinct values in
3690  * the table is large. It turns out that this formula also
3691  * works well even when n is small.
3692  */
3693  reldistinct *=
3694  (1 - pow((rel->tuples - rel->rows) / rel->tuples,
3695  rel->tuples / reldistinct));
3696  }
3697  reldistinct = clamp_row_est(reldistinct);
3698 
3699  /*
3700  * Update estimate of total distinct groups.
3701  */
3702  numdistinct *= reldistinct;
3703  }
3704 
3705  varinfos = newvarinfos;
3706  } while (varinfos != NIL);
3707 
3708  /* Now we can account for the effects of any SRFs */
3709  numdistinct *= srf_multiplier;
3710 
3711  /* Round off */
3712  numdistinct = ceil(numdistinct);
3713 
3714  /* Guard against out-of-range answers */
3715  if (numdistinct > input_rows)
3716  numdistinct = input_rows;
3717  if (numdistinct < 1.0)
3718  numdistinct = 1.0;
3719 
3720  return numdistinct;
3721 }
bool contain_volatile_functions(Node *clause)
Definition: clauses.c:448
double expression_returns_set_rows(PlannerInfo *root, Node *clause)
Definition: clauses.c:289
bool list_member_int(const List *list, int datum)
Definition: list.c:701
Oid exprType(const Node *expr)
Definition: nodeFuncs.c:43
#define PVC_RECURSE_AGGREGATES
Definition: optimizer.h:184
#define PVC_RECURSE_PLACEHOLDERS
Definition: optimizer.h:188
#define PVC_RECURSE_WINDOWFUNCS
Definition: optimizer.h:186
#define IS_SIMPLE_REL(rel)
Definition: pathnodes.h:792
#define for_each_from(cell, lst, N)
Definition: pg_list.h:412
#define linitial(l)
Definition: pg_list.h:176
static bool estimate_multivariate_ndistinct(PlannerInfo *root, RelOptInfo *rel, List **varinfos, double *ndistinct)
Definition: selfuncs.c:3923
static List * add_unique_group_var(PlannerInfo *root, List *varinfos, Node *var, VariableStatData *vardata)
Definition: selfuncs.c:3265
#define SELFLAG_USED_DEFAULT
Definition: selfuncs.h:76
uint32 flags
Definition: selfuncs.h:80
List * pull_var_clause(Node *node, int flags)
Definition: var.c:597

References add_unique_group_var(), Assert(), clamp_row_est(), contain_volatile_functions(), estimate_multivariate_ndistinct(), examine_variable(), expression_returns_set_rows(), exprType(), EstimationInfo::flags, for_each_from, HeapTupleIsValid, i, IS_SIMPLE_REL, GroupVarInfo::isdefault, VariableStatData::isunique, lappend(), lfirst, linitial, list_member_int(), GroupVarInfo::ndistinct, NIL, pull_var_clause(), PVC_RECURSE_AGGREGATES, PVC_RECURSE_PLACEHOLDERS, PVC_RECURSE_WINDOWFUNCS, GroupVarInfo::rel, ReleaseVariableStats, RelOptInfo::rows, SELFLAG_USED_DEFAULT, VariableStatData::statsTuple, and RelOptInfo::tuples.

Referenced by adjust_rowcount_for_semijoins(), cost_incremental_sort(), cost_memoize_rescan(), create_final_distinct_paths(), create_partial_distinct_paths(), create_unique_path(), estimate_path_cost_size(), get_number_of_groups(), and recurse_set_operations().

◆ examine_simple_variable()

static void examine_simple_variable ( PlannerInfo root,
Var var,
VariableStatData vardata 
)
static

Definition at line 5377 of file selfuncs.c.

5379 {
5380  RangeTblEntry *rte = root->simple_rte_array[var->varno];
5381 
5382  Assert(IsA(rte, RangeTblEntry));
5383 
5385  (*get_relation_stats_hook) (root, rte, var->varattno, vardata))
5386  {
5387  /*
5388  * The hook took control of acquiring a stats tuple. If it did supply
5389  * a tuple, it'd better have supplied a freefunc.
5390  */
5391  if (HeapTupleIsValid(vardata->statsTuple) &&
5392  !vardata->freefunc)
5393  elog(ERROR, "no function provided to release variable stats with");
5394  }
5395  else if (rte->rtekind == RTE_RELATION)
5396  {
5397  /*
5398  * Plain table or parent of an inheritance appendrel, so look up the
5399  * column in pg_statistic
5400  */
5402  ObjectIdGetDatum(rte->relid),
5403  Int16GetDatum(var->varattno),
5404  BoolGetDatum(rte->inh));
5405  vardata->freefunc = ReleaseSysCache;
5406 
5407  if (HeapTupleIsValid(vardata->statsTuple))
5408  {
5409  RelOptInfo *onerel = find_base_rel(root, var->varno);
5410  Oid userid;
5411 
5412  /*
5413  * Check if user has permission to read this column. We require
5414  * all rows to be accessible, so there must be no securityQuals
5415  * from security barrier views or RLS policies. Use
5416  * onerel->userid if it's set, in case we're accessing the table
5417  * via a view.
5418  */
5419  userid = OidIsValid(onerel->userid) ? onerel->userid : GetUserId();
5420 
5421  vardata->acl_ok =
5422  rte->securityQuals == NIL &&
5423  ((pg_class_aclcheck(rte->relid, userid,
5424  ACL_SELECT) == ACLCHECK_OK) ||
5425  (pg_attribute_aclcheck(rte->relid, var->varattno, userid,
5426  ACL_SELECT) == ACLCHECK_OK));
5427 
5428  /*
5429  * If the user doesn't have permissions to access an inheritance
5430  * child relation or specifically this attribute, check the
5431  * permissions of the table/column actually mentioned in the
5432  * query, since most likely the user does have that permission
5433  * (else the query will fail at runtime), and if the user can read
5434  * the column there then he can get the values of the child table
5435  * too. To do that, we must find out which of the root parent's
5436  * attributes the child relation's attribute corresponds to.
5437  */
5438  if (!vardata->acl_ok && var->varattno > 0 &&
5439  root->append_rel_array != NULL)
5440  {
5441  AppendRelInfo *appinfo;
5442  Index varno = var->varno;
5443  int varattno = var->varattno;
5444  bool found = false;
5445 
5446  appinfo = root->append_rel_array[varno];
5447 
5448  /*
5449  * Partitions are mapped to their immediate parent, not the
5450  * root parent, so must be ready to walk up multiple
5451  * AppendRelInfos. But stop if we hit a parent that is not
5452  * RTE_RELATION --- that's a flattened UNION ALL subquery, not
5453  * an inheritance parent.
5454  */
5455  while (appinfo &&
5456  planner_rt_fetch(appinfo->parent_relid,
5457  root)->rtekind == RTE_RELATION)
5458  {
5459  int parent_varattno;
5460 
5461  found = false;
5462  if (varattno <= 0 || varattno > appinfo->num_child_cols)
5463  break; /* safety check */
5464  parent_varattno = appinfo->parent_colnos[varattno - 1];
5465  if (parent_varattno == 0)
5466  break; /* Var is local to child */
5467 
5468  varno = appinfo->parent_relid;
5469  varattno = parent_varattno;
5470  found = true;
5471 
5472  /* If the parent is itself a child, continue up. */
5473  appinfo = root->append_rel_array[varno];
5474  }
5475 
5476  /*
5477  * In rare cases, the Var may be local to the child table, in
5478  * which case, we've got to live with having no access to this
5479  * column's stats.
5480  */
5481  if (!found)
5482  return;
5483 
5484  /* Repeat the access check on this parent rel & column */
5485  rte = planner_rt_fetch(varno, root);
5486  Assert(rte->rtekind == RTE_RELATION);
5487 
5488  userid = OidIsValid(onerel->userid) ?
5489  onerel->userid : GetUserId();
5490 
5491  vardata->acl_ok =
5492  rte->securityQuals == NIL &&
5493  ((pg_class_aclcheck(rte->relid, userid,
5494  ACL_SELECT) == ACLCHECK_OK) ||
5495  (pg_attribute_aclcheck(rte->relid, varattno, userid,
5496  ACL_SELECT) == ACLCHECK_OK));
5497  }
5498  }
5499  else
5500  {
5501  /* suppress any possible leakproofness checks later */
5502  vardata->acl_ok = true;
5503  }
5504  }
5505  else if (rte->rtekind == RTE_SUBQUERY && !rte->inh)
5506  {
5507  /*
5508  * Plain subquery (not one that was converted to an appendrel).
5509  */
5510  Query *subquery = rte->subquery;
5511  RelOptInfo *rel;
5512  TargetEntry *ste;
5513 
5514  /*
5515  * Punt if it's a whole-row var rather than a plain column reference.
5516  */
5517  if (var->varattno == InvalidAttrNumber)
5518  return;
5519 
5520  /*
5521  * Punt if subquery uses set operations or GROUP BY, as these will
5522  * mash underlying columns' stats beyond recognition. (Set ops are
5523  * particularly nasty; if we forged ahead, we would return stats
5524  * relevant to only the leftmost subselect...) DISTINCT is also
5525  * problematic, but we check that later because there is a possibility
5526  * of learning something even with it.
5527  */
5528  if (subquery->setOperations ||
5529  subquery->groupClause ||
5530  subquery->groupingSets)
5531  return;
5532 
5533  /*
5534  * OK, fetch RelOptInfo for subquery. Note that we don't change the
5535  * rel returned in vardata, since caller expects it to be a rel of the
5536  * caller's query level. Because we might already be recursing, we
5537  * can't use that rel pointer either, but have to look up the Var's
5538  * rel afresh.
5539  */
5540  rel = find_base_rel(root, var->varno);
5541 
5542  /* If the subquery hasn't been planned yet, we have to punt */
5543  if (rel->subroot == NULL)
5544  return;
5545  Assert(IsA(rel->subroot, PlannerInfo));
5546 
5547  /*
5548  * Switch our attention to the subquery as mangled by the planner. It
5549  * was okay to look at the pre-planning version for the tests above,
5550  * but now we need a Var that will refer to the subroot's live
5551  * RelOptInfos. For instance, if any subquery pullup happened during
5552  * planning, Vars in the targetlist might have gotten replaced, and we
5553  * need to see the replacement expressions.
5554  */
5555  subquery = rel->subroot->parse;
5556  Assert(IsA(subquery, Query));
5557 
5558  /* Get the subquery output expression referenced by the upper Var */
5559  ste = get_tle_by_resno(subquery->targetList, var->varattno);
5560  if (ste == NULL || ste->resjunk)
5561  elog(ERROR, "subquery %s does not have attribute %d",
5562  rte->eref->aliasname, var->varattno);
5563  var = (Var *) ste->expr;
5564 
5565  /*
5566  * If subquery uses DISTINCT, we can't make use of any stats for the
5567  * variable ... but, if it's the only DISTINCT column, we are entitled
5568  * to consider it unique. We do the test this way so that it works
5569  * for cases involving DISTINCT ON.
5570  */
5571  if (subquery->distinctClause)
5572  {
5573  if (list_length(subquery->distinctClause) == 1 &&
5574  targetIsInSortList(ste, InvalidOid, subquery->distinctClause))
5575  vardata->isunique = true;
5576  /* cannot go further */
5577  return;
5578  }
5579 
5580  /*
5581  * If the sub-query originated from a view with the security_barrier
5582  * attribute, we must not look at the variable's statistics, though it
5583  * seems all right to notice the existence of a DISTINCT clause. So
5584  * stop here.
5585  *
5586  * This is probably a harsher restriction than necessary; it's
5587  * certainly OK for the selectivity estimator (which is a C function,
5588  * and therefore omnipotent anyway) to look at the statistics. But
5589  * many selectivity estimators will happily *invoke the operator
5590  * function* to try to work out a good estimate - and that's not OK.
5591  * So for now, don't dig down for stats.
5592  */
5593  if (rte->security_barrier)
5594  return;
5595 
5596  /* Can only handle a simple Var of subquery's query level */
5597  if (var && IsA(var, Var) &&
5598  var->varlevelsup == 0)
5599  {
5600  /*
5601  * OK, recurse into the subquery. Note that the original setting
5602  * of vardata->isunique (which will surely be false) is left
5603  * unchanged in this situation. That's what we want, since even
5604  * if the underlying column is unique, the subquery may have
5605  * joined to other tables in a way that creates duplicates.
5606  */
5607  examine_simple_variable(rel->subroot, var, vardata);
5608  }
5609  }
5610  else
5611  {
5612  /*
5613  * Otherwise, the Var comes from a FUNCTION, VALUES, or CTE RTE. (We
5614  * won't see RTE_JOIN here because join alias Vars have already been
5615  * flattened.) There's not much we can do with function outputs, but
5616  * maybe someday try to be smarter about VALUES and/or CTEs.
5617  */
5618  }
5619 }
@ ACLCHECK_OK
Definition: acl.h:184
AclResult pg_attribute_aclcheck(Oid table_oid, AttrNumber attnum, Oid roleid, AclMode mode)
Definition: aclchk.c:4617
AclResult pg_class_aclcheck(Oid table_oid, Oid roleid, AclMode mode)
Definition: aclchk.c:4746
#define InvalidAttrNumber
Definition: attnum.h:23
unsigned int Index
Definition: c.h:550
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:77
Oid GetUserId(void)
Definition: miscinit.c:497
bool targetIsInSortList(TargetEntry *tle, Oid sortop, List *sortList)
TargetEntry * get_tle_by_resno(List *tlist, AttrNumber resno)
@ RTE_SUBQUERY
Definition: parsenodes.h:983
#define ACL_SELECT
Definition: parsenodes.h:84
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition: relnode.c:379
static void examine_simple_variable(PlannerInfo *root, Var *var, VariableStatData *vardata)
Definition: selfuncs.c:5377
char * aliasname
Definition: primnodes.h:42
Index parent_relid
Definition: pathnodes.h:2757
int num_child_cols
Definition: pathnodes.h:2793
Query * parse
Definition: pathnodes.h:202
Node * setOperations
Definition: parsenodes.h:191
List * groupClause
Definition: parsenodes.h:172
List * targetList
Definition: parsenodes.h:164
List * groupingSets
Definition: parsenodes.h:175
List * distinctClause
Definition: parsenodes.h:181
bool security_barrier
Definition: parsenodes.h:1042
List * securityQuals
Definition: parsenodes.h:1157
Query * subquery
Definition: parsenodes.h:1041
Alias * eref
Definition: parsenodes.h:1152
Oid userid
Definition: pathnodes.h:914
PlannerInfo * subroot
Definition: pathnodes.h:901
Expr * expr
Definition: primnodes.h:1555
bool resjunk
Definition: primnodes.h:1562
AttrNumber varattno
Definition: primnodes.h:217
int varno
Definition: primnodes.h:212
Index varlevelsup
Definition: primnodes.h:230

References VariableStatData::acl_ok, ACL_SELECT, ACLCHECK_OK, Alias::aliasname, Assert(), BoolGetDatum(), Query::distinctClause, elog(), RangeTblEntry::eref, ERROR, TargetEntry::expr, find_base_rel(), VariableStatData::freefunc, get_relation_stats_hook, get_tle_by_resno(), GetUserId(), Query::groupClause, Query::groupingSets, HeapTupleIsValid, if(), RangeTblEntry::inh, Int16GetDatum(), InvalidAttrNumber, InvalidOid, IsA, VariableStatData::isunique, list_length(), NIL, AppendRelInfo::num_child_cols, ObjectIdGetDatum(), OidIsValid, AppendRelInfo::parent_relid, PlannerInfo::parse, pg_attribute_aclcheck(), pg_class_aclcheck(), planner_rt_fetch, ReleaseSysCache(), RangeTblEntry::relid, TargetEntry::resjunk, RTE_RELATION, RTE_SUBQUERY, RangeTblEntry::rtekind, SearchSysCache3(), RangeTblEntry::security_barrier, RangeTblEntry::securityQuals, Query::setOperations, STATRELATTINH, VariableStatData::statsTuple, RangeTblEntry::subquery, RelOptInfo::subroot, targetIsInSortList(), Query::targetList, RelOptInfo::userid, Var::varattno, Var::varlevelsup, and Var::varno.

Referenced by examine_variable().

◆ examine_variable()

void examine_variable ( PlannerInfo root,
Node node,
int  varRelid,
VariableStatData vardata 
)

Definition at line 4977 of file selfuncs.c.

4979 {
4980  Node *basenode;
4981  Relids varnos;
4982  RelOptInfo *onerel;
4983 
4984  /* Make sure we don't return dangling pointers in vardata */
4985  MemSet(vardata, 0, sizeof(VariableStatData));
4986 
4987  /* Save the exposed type of the expression */
4988  vardata->vartype = exprType(node);
4989 
4990  /* Look inside any binary-compatible relabeling */
4991 
4992  if (IsA(node, RelabelType))
4993  basenode = (Node *) ((RelabelType *) node)->arg;
4994  else
4995  basenode = node;
4996 
4997  /* Fast path for a simple Var */
4998 
4999  if (IsA(basenode, Var) &&
5000  (varRelid == 0 || varRelid == ((Var *) basenode)->varno))
5001  {
5002  Var *var = (Var *) basenode;
5003 
5004  /* Set up result fields other than the stats tuple */
5005  vardata->var = basenode; /* return Var without relabeling */
5006  vardata->rel = find_base_rel(root, var->varno);
5007  vardata->atttype = var->vartype;
5008  vardata->atttypmod = var->vartypmod;
5009  vardata->isunique = has_unique_index(vardata->rel, var->varattno);
5010 
5011  /* Try to locate some stats */
5012  examine_simple_variable(root, var, vardata);
5013 
5014  return;
5015  }
5016 
5017  /*
5018  * Okay, it's a more complicated expression. Determine variable
5019  * membership. Note that when varRelid isn't zero, only vars of that
5020  * relation are considered "real" vars.
5021  */
5022  varnos = pull_varnos(root, basenode);
5023 
5024  onerel = NULL;
5025 
5026  switch (bms_membership(varnos))
5027  {
5028  case BMS_EMPTY_SET:
5029  /* No Vars at all ... must be pseudo-constant clause */
5030  break;
5031  case BMS_SINGLETON:
5032  if (varRelid == 0 || bms_is_member(varRelid, varnos))
5033  {
5034  onerel = find_base_rel(root,
5035  (varRelid ? varRelid : bms_singleton_member(varnos)));
5036  vardata->rel = onerel;
5037  node = basenode; /* strip any relabeling */
5038  }
5039  /* else treat it as a constant */
5040  break;
5041  case BMS_MULTIPLE:
5042  if (varRelid == 0)
5043  {
5044  /* treat it as a variable of a join relation */
5045  vardata->rel = find_join_rel(root, varnos);
5046  node = basenode; /* strip any relabeling */
5047  }
5048  else if (bms_is_member(varRelid, varnos))
5049  {
5050  /* ignore the vars belonging to other relations */
5051  vardata->rel = find_base_rel(root, varRelid);
5052  node = basenode; /* strip any relabeling */
5053  /* note: no point in expressional-index search here */
5054  }
5055  /* else treat it as a constant */
5056  break;
5057  }
5058 
5059  bms_free(varnos);
5060 
5061  vardata->var = node;
5062  vardata->atttype = exprType(node);
5063  vardata->atttypmod = exprTypmod(node);
5064 
5065  if (onerel)
5066  {
5067  /*
5068  * We have an expression in vars of a single relation. Try to match
5069  * it to expressional index columns, in hopes of finding some
5070  * statistics.
5071  *
5072  * Note that we consider all index columns including INCLUDE columns,
5073  * since there could be stats for such columns. But the test for
5074  * uniqueness needs to be warier.
5075  *
5076  * XXX it's conceivable that there are multiple matches with different
5077  * index opfamilies; if so, we need to pick one that matches the
5078  * operator we are estimating for. FIXME later.
5079  */
5080  ListCell *ilist;
5081  ListCell *slist;
5082 
5083  foreach(ilist, onerel->indexlist)
5084  {
5085  IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
5086  ListCell *indexpr_item;
5087  int pos;
5088 
5089  indexpr_item = list_head(index->indexprs);
5090  if (indexpr_item == NULL)
5091  continue; /* no expressions here... */
5092 
5093  for (pos = 0; pos < index->ncolumns; pos++)
5094  {
5095  if (index->indexkeys[pos] == 0)
5096  {
5097  Node *indexkey;
5098 
5099  if (indexpr_item == NULL)
5100  elog(ERROR, "too few entries in indexprs list");
5101  indexkey = (Node *) lfirst(indexpr_item);
5102  if (indexkey && IsA(indexkey, RelabelType))
5103  indexkey = (Node *) ((RelabelType *) indexkey)->arg;
5104  if (equal(node, indexkey))
5105  {
5106  /*
5107  * Found a match ... is it a unique index? Tests here
5108  * should match has_unique_index().
5109  */
5110  if (index->unique &&
5111  index->nkeycolumns == 1 &&
5112  pos == 0 &&
5113  (index->indpred == NIL || index->predOK))
5114  vardata->isunique = true;
5115 
5116  /*
5117  * Has it got stats? We only consider stats for
5118  * non-partial indexes, since partial indexes probably
5119  * don't reflect whole-relation statistics; the above
5120  * check for uniqueness is the only info we take from
5121  * a partial index.
5122  *
5123  * An index stats hook, however, must make its own
5124  * decisions about what to do with partial indexes.
5125  */
5127  (*get_index_stats_hook) (root, index->indexoid,
5128  pos + 1, vardata))
5129  {
5130  /*
5131  * The hook took control of acquiring a stats
5132  * tuple. If it did supply a tuple, it'd better
5133  * have supplied a freefunc.
5134  */
5135  if (HeapTupleIsValid(vardata->statsTuple) &&
5136  !vardata->freefunc)
5137  elog(ERROR, "no function provided to release variable stats with");
5138  }
5139  else if (index->indpred == NIL)
5140  {
5141  vardata->statsTuple =
5143  ObjectIdGetDatum(index->indexoid),
5144  Int16GetDatum(pos + 1),
5145  BoolGetDatum(false));
5146  vardata->freefunc = ReleaseSysCache;
5147 
5148  if (HeapTupleIsValid(vardata->statsTuple))
5149  {
5150  /* Get index's table for permission check */
5151  RangeTblEntry *rte;
5152  Oid userid;
5153 
5154  rte = planner_rt_fetch(index->rel->relid, root);
5155  Assert(rte->rtekind == RTE_RELATION);
5156 
5157  /*
5158  * Use onerel->userid if it's set, in case
5159  * we're accessing the table via a view.
5160  */
5161  userid = OidIsValid(onerel->userid) ?
5162  onerel->userid : GetUserId();
5163 
5164  /*
5165  * For simplicity, we insist on the whole
5166  * table being selectable, rather than trying
5167  * to identify which column(s) the index
5168  * depends on. Also require all rows to be
5169  * selectable --- there must be no
5170  * securityQuals from security barrier views
5171  * or RLS policies.
5172  */
5173  vardata->acl_ok =
5174  rte->securityQuals == NIL &&
5175  (pg_class_aclcheck(rte->relid, userid,
5176  ACL_SELECT) == ACLCHECK_OK);
5177 
5178  /*
5179  * If the user doesn't have permissions to
5180  * access an inheritance child relation, check
5181  * the permissions of the table actually
5182  * mentioned in the query, since most likely
5183  * the user does have that permission. Note
5184  * that whole-table select privilege on the
5185  * parent doesn't quite guarantee that the
5186  * user could read all columns of the child.
5187  * But in practice it's unlikely that any
5188  * interesting security violation could result
5189  * from allowing access to the expression
5190  * index's stats, so we allow it anyway. See
5191  * similar code in examine_simple_variable()
5192  * for additional comments.
5193  */
5194  if (!vardata->acl_ok &&
5195  root->append_rel_array != NULL)
5196  {
5197  AppendRelInfo *appinfo;
5198  Index varno = index->rel->relid;
5199 
5200  appinfo = root->append_rel_array[varno];
5201  while (appinfo &&
5202  planner_rt_fetch(appinfo->parent_relid,
5203  root)->rtekind == RTE_RELATION)
5204  {
5205  varno = appinfo->parent_relid;
5206  appinfo = root->append_rel_array[varno];
5207  }
5208  if (varno != index->rel->relid)
5209  {
5210  /* Repeat access check on this rel */
5211  rte = planner_rt_fetch(varno, root);
5212  Assert(rte->rtekind == RTE_RELATION);
5213 
5214  userid = OidIsValid(onerel->userid) ?
5215  onerel->userid : GetUserId();
5216 
5217  vardata->acl_ok =
5218  rte->securityQuals == NIL &&
5219  (pg_class_aclcheck(rte->relid,
5220  userid,
5221  ACL_SELECT) == ACLCHECK_OK);
5222  }
5223  }
5224  }
5225  else
5226  {
5227  /* suppress leakproofness checks later */
5228  vardata->acl_ok = true;
5229  }
5230  }
5231  if (vardata->statsTuple)
5232  break;
5233  }
5234  indexpr_item = lnext(index->indexprs, indexpr_item);
5235  }
5236  }
5237  if (vardata->statsTuple)
5238  break;
5239  }
5240 
5241  /*
5242  * Search extended statistics for one with a matching expression.
5243  * There might be multiple ones, so just grab the first one. In the
5244  * future, we might consider the statistics target (and pick the most
5245  * accurate statistics) and maybe some other parameters.
5246  */
5247  foreach(slist, onerel->statlist)
5248  {
5249  StatisticExtInfo *info = (StatisticExtInfo *) lfirst(slist);
5250  RangeTblEntry *rte = planner_rt_fetch(onerel->relid, root);
5251  ListCell *expr_item;
5252  int pos;
5253 
5254  /*
5255  * Stop once we've found statistics for the expression (either
5256  * from extended stats, or for an index in the preceding loop).
5257  */
5258  if (vardata->statsTuple)
5259  break;
5260 
5261  /* skip stats without per-expression stats */
5262  if (info->kind != STATS_EXT_EXPRESSIONS)
5263  continue;
5264 
5265  /* skip stats with mismatching stxdinherit value */
5266  if (info->inherit != rte->inh)
5267  continue;
5268 
5269  pos = 0;
5270  foreach(expr_item, info->exprs)
5271  {
5272  Node *expr = (Node *) lfirst(expr_item);
5273 
5274  Assert(expr);
5275 
5276  /* strip RelabelType before comparing it */
5277  if (expr && IsA(expr, RelabelType))
5278  expr = (Node *) ((RelabelType *) expr)->arg;
5279 
5280  /* found a match, see if we can extract pg_statistic row */
5281  if (equal(node, expr))
5282  {
5283  Oid userid;
5284 
5285  /*
5286  * XXX Not sure if we should cache the tuple somewhere.
5287  * Now we just create a new copy every time.
5288  */
5289  vardata->statsTuple =
5290  statext_expressions_load(info->statOid, rte->inh, pos);
5291 
5292  vardata->freefunc = ReleaseDummy;
5293 
5294  /*
5295  * Use onerel->userid if it's set, in case we're accessing
5296  * the table via a view.
5297  */
5298  userid = OidIsValid(onerel->userid) ?
5299  onerel->userid : GetUserId();
5300 
5301  /*
5302  * For simplicity, we insist on the whole table being
5303  * selectable, rather than trying to identify which
5304  * column(s) the statistics object depends on. Also
5305  * require all rows to be selectable --- there must be no
5306  * securityQuals from security barrier views or RLS
5307  * policies.
5308  */
5309  vardata->acl_ok =
5310  rte->securityQuals == NIL &&
5311  (pg_class_aclcheck(rte->relid, userid,
5312  ACL_SELECT) == ACLCHECK_OK);
5313 
5314  /*
5315  * If the user doesn't have permissions to access an
5316  * inheritance child relation, check the permissions of
5317  * the table actually mentioned in the query, since most
5318  * likely the user does have that permission. Note that
5319  * whole-table select privilege on the parent doesn't
5320  * quite guarantee that the user could read all columns of
5321  * the child. But in practice it's unlikely that any
5322  * interesting security violation could result from
5323  * allowing access to the expression stats, so we allow it
5324  * anyway. See similar code in examine_simple_variable()
5325  * for additional comments.
5326  */
5327  if (!vardata->acl_ok &&
5328  root->append_rel_array != NULL)
5329  {
5330  AppendRelInfo *appinfo;
5331  Index varno = onerel->relid;
5332 
5333  appinfo = root->append_rel_array[varno];
5334  while (appinfo &&
5335  planner_rt_fetch(appinfo->parent_relid,
5336  root)->rtekind == RTE_RELATION)
5337  {
5338  varno = appinfo->parent_relid;
5339  appinfo = root->append_rel_array[varno];
5340  }
5341  if (varno != onerel->relid)
5342  {
5343  /* Repeat access check on this rel */
5344  rte = planner_rt_fetch(varno, root);
5345  Assert(rte->rtekind == RTE_RELATION);
5346 
5347  userid = OidIsValid(onerel->userid) ?
5348  onerel->userid : GetUserId();
5349 
5350  vardata->acl_ok =
5351  rte->securityQuals == NIL &&
5352  (pg_class_aclcheck(rte->relid,
5353  userid,
5354  ACL_SELECT) == ACLCHECK_OK);
5355  }
5356  }
5357 
5358  break;
5359  }
5360 
5361  pos++;
5362  }
5363  }
5364  }
5365 }
int bms_singleton_member(const Bitmapset *a)
Definition: bitmapset.c:580
void bms_free(Bitmapset *a)
Definition: bitmapset.c:209
BMS_Membership bms_membership(const Bitmapset *a)
Definition: bitmapset.c:675
@ BMS_SINGLETON
Definition: bitmapset.h:74
@ BMS_EMPTY_SET
Definition: bitmapset.h:73
@ BMS_MULTIPLE
Definition: bitmapset.h:75
#define MemSet(start, val, len)
Definition: c.h:953
HeapTuple statext_expressions_load(Oid stxoid, bool inh, int idx)
int32 exprTypmod(const Node *expr)
Definition: nodeFuncs.c:266
static ListCell * list_head(const List *l)
Definition: pg_list.h:126
static ListCell * lnext(const List *l, const ListCell *c)
Definition: pg_list.h:341
bool has_unique_index(RelOptInfo *rel, AttrNumber attno)
Definition: plancat.c:2126
RelOptInfo * find_join_rel(PlannerInfo *root, Relids relids)
Definition: relnode.c:443
static void ReleaseDummy(HeapTuple tuple)
Definition: selfuncs.c:4936
List * indexlist
Definition: pathnodes.h:889
Oid vartype
Definition: primnodes.h:220
int32 vartypmod
Definition: primnodes.h:222
int32 atttypmod
Definition: selfuncs.h:94
Relids pull_varnos(PlannerInfo *root, Node *node)
Definition: var.c:100

References VariableStatData::acl_ok, ACL_SELECT, ACLCHECK_OK, arg, Assert(), VariableStatData::atttype, VariableStatData::atttypmod, BMS_EMPTY_SET, bms_free(), bms_is_member(), bms_membership(), BMS_MULTIPLE, BMS_SINGLETON, bms_singleton_member(), BoolGetDatum(), elog(), equal(), ERROR, examine_simple_variable(), StatisticExtInfo::exprs, exprType(), exprTypmod(), find_base_rel(), find_join_rel(), VariableStatData::freefunc, get_index_stats_hook, GetUserId(), has_unique_index(), HeapTupleIsValid, if(), RelOptInfo::indexlist, RangeTblEntry::inh, StatisticExtInfo::inherit, Int16GetDatum(), IsA, VariableStatData::isunique, StatisticExtInfo::kind, lfirst, list_head(), lnext(), MemSet, NIL, ObjectIdGetDatum(), OidIsValid, AppendRelInfo::parent_relid, pg_class_aclcheck(), planner_rt_fetch, pull_varnos(), VariableStatData::rel, ReleaseDummy(), ReleaseSysCache(), RangeTblEntry::relid, RelOptInfo::relid, RTE_RELATION, RangeTblEntry::rtekind, SearchSysCache3(), RangeTblEntry::securityQuals, statext_expressions_load(), RelOptInfo::statlist, StatisticExtInfo::statOid, STATRELATTINH, VariableStatData::statsTuple, RelOptInfo::userid, VariableStatData::var, Var::varattno, Var::varno, Var::vartype, VariableStatData::vartype, and Var::vartypmod.

Referenced by booltestsel(), boolvarsel(), estimate_hash_bucket_stats(), estimate_num_groups(), get_join_variables(), get_restriction_variable(), mergejoinscansel(), nulltestsel(), and scalararraysel_containment().

◆ find_join_input_rel()

static RelOptInfo * find_join_input_rel ( PlannerInfo root,
Relids  relids 
)
static

Definition at line 6317 of file selfuncs.c.

6318 {
6319  RelOptInfo *rel = NULL;
6320 
6321  switch (bms_membership(relids))
6322  {
6323  case BMS_EMPTY_SET:
6324  /* should not happen */
6325  break;
6326  case BMS_SINGLETON:
6327  rel = find_base_rel(root, bms_singleton_member(relids));
6328  break;
6329  case BMS_MULTIPLE:
6330  rel = find_join_rel(root, relids);
6331  break;
6332  }
6333 
6334  if (rel == NULL)
6335  elog(ERROR, "could not find RelOptInfo for given relids");
6336 
6337  return rel;
6338 }

References BMS_EMPTY_SET, bms_membership(), BMS_MULTIPLE, BMS_SINGLETON, bms_singleton_member(), elog(), ERROR, find_base_rel(), and find_join_rel().

Referenced by eqjoinsel().

◆ generic_restriction_selectivity()

double generic_restriction_selectivity ( PlannerInfo root,
Oid  oproid,
Oid  collation,
List args,
int  varRelid,
double  default_selectivity 
)

Definition at line 912 of file selfuncs.c.

915 {
916  double selec;
917  VariableStatData vardata;
918  Node *other;
919  bool varonleft;
920 
921  /*
922  * If expression is not variable OP something or something OP variable,
923  * then punt and return the default estimate.
924  */
925  if (!get_restriction_variable(root, args, varRelid,
926  &vardata, &other, &varonleft))
927  return default_selectivity;
928 
929  /*
930  * If the something is a NULL constant, assume operator is strict and
931  * return zero, ie, operator will never return TRUE.
932  */
933  if (IsA(other, Const) &&
934  ((Const *) other)->constisnull)
935  {
936  ReleaseVariableStats(vardata);
937  return 0.0;
938  }
939 
940  if (IsA(other, Const))
941  {
942  /* Variable is being compared to a known non-null constant */
943  Datum constval = ((Const *) other)->constvalue;
944  FmgrInfo opproc;
945  double mcvsum;
946  double mcvsel;
947  double nullfrac;
948  int hist_size;
949 
950  fmgr_info(get_opcode(oproid), &opproc);
951 
952  /*
953  * Calculate the selectivity for the column's most common values.
954  */
955  mcvsel = mcv_selectivity(&vardata, &opproc, collation,
956  constval, varonleft,
957  &mcvsum);
958 
959  /*
960  * If the histogram is large enough, see what fraction of it matches
961  * the query, and assume that's representative of the non-MCV
962  * population. Otherwise use the default selectivity for the non-MCV
963  * population.
964  */
965  selec = histogram_selectivity(&vardata, &opproc, collation,
966  constval, varonleft,
967  10, 1, &hist_size);
968  if (selec < 0)
969  {
970  /* Nope, fall back on default */
971  selec = default_selectivity;
972  }
973  else if (hist_size < 100)
974  {
975  /*
976  * For histogram sizes from 10 to 100, we combine the histogram
977  * and default selectivities, putting increasingly more trust in
978  * the histogram for larger sizes.
979  */
980  double hist_weight = hist_size / 100.0;
981 
982  selec = selec * hist_weight +
983  default_selectivity * (1.0 - hist_weight);
984  }
985 
986  /* In any case, don't believe extremely small or large estimates. */
987  if (selec < 0.0001)
988  selec = 0.0001;
989  else if (selec > 0.9999)
990  selec = 0.9999;
991 
992  /* Don't forget to account for nulls. */
993  if (HeapTupleIsValid(vardata.statsTuple))
994  nullfrac = ((Form_pg_statistic) GETSTRUCT(vardata.statsTuple))->stanullfrac;
995  else
996  nullfrac = 0.0;
997 
998  /*
999  * Now merge the results from the MCV and histogram calculations,
1000  * realizing that the histogram covers only the non-null values that
1001  * are not listed in MCV.
1002  */
1003  selec *= 1.0 - nullfrac - mcvsum;
1004  selec += mcvsel;
1005  }
1006  else
1007  {
1008  /* Comparison value is not constant, so we can't do anything */
1009  selec = default_selectivity;
1010  }
1011 
1012  ReleaseVariableStats(vardata);
1013 
1014  /* result should be in range, but make sure... */
1015  CLAMP_PROBABILITY(selec);
1016 
1017  return selec;
1018 }
double mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc, Oid collation, Datum constval, bool varonleft, double *sumcommonp)
Definition: selfuncs.c:730
double histogram_selectivity(VariableStatData *vardata, FmgrInfo *opproc, Oid collation, Datum constval, bool varonleft, int min_hist_size, int n_skip, int *hist_size)
Definition: selfuncs.c:821

References generate_unaccent_rules::args, CLAMP_PROBABILITY, fmgr_info(), get_opcode(), get_restriction_variable(), GETSTRUCT, HeapTupleIsValid, histogram_selectivity(), IsA, mcv_selectivity(), ReleaseVariableStats, and VariableStatData::statsTuple.

Referenced by ltreeparentsel(), and matchingsel().

◆ genericcostestimate()

void genericcostestimate ( PlannerInfo root,
IndexPath path,
double  loop_count,
GenericCosts costs 
)

Definition at line 6436 of file selfuncs.c.

6440 {
6441  IndexOptInfo *index = path->indexinfo;
6442  List *indexQuals = get_quals_from_indexclauses(path->indexclauses);
6443  List *indexOrderBys = path->indexorderbys;
6444  Cost indexStartupCost;
6445  Cost indexTotalCost;
6446  Selectivity indexSelectivity;
6447  double indexCorrelation;
6448  double numIndexPages;
6449  double numIndexTuples;
6450  double spc_random_page_cost;
6451  double num_sa_scans;
6452  double num_outer_scans;
6453  double num_scans;
6454  double qual_op_cost;
6455  double qual_arg_cost;
6456  List *selectivityQuals;
6457  ListCell *l;
6458 
6459  /*
6460  * If the index is partial, AND the index predicate with the explicitly
6461  * given indexquals to produce a more accurate idea of the index
6462  * selectivity.
6463  */
6464  selectivityQuals = add_predicate_to_index_quals(index, indexQuals);
6465 
6466  /*
6467  * Check for ScalarArrayOpExpr index quals, and estimate the number of
6468  * index scans that will be performed.
6469  */
6470  num_sa_scans = 1;
6471  foreach(l, indexQuals)
6472  {
6473  RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
6474 
6475  if (IsA(rinfo->clause, ScalarArrayOpExpr))
6476  {
6477  ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) rinfo->clause;
6478  int alength = estimate_array_length(lsecond(saop->args));
6479 
6480  if (alength > 1)
6481  num_sa_scans *= alength;
6482  }
6483  }
6484 
6485  /* Estimate the fraction of main-table tuples that will be visited */
6486  indexSelectivity = clauselist_selectivity(root, selectivityQuals,
6487  index->rel->relid,
6488  JOIN_INNER,
6489  NULL);
6490 
6491  /*
6492  * If caller didn't give us an estimate, estimate the number of index
6493  * tuples that will be visited. We do it in this rather peculiar-looking
6494  * way in order to get the right answer for partial indexes.
6495  */
6496  numIndexTuples = costs->numIndexTuples;
6497  if (numIndexTuples <= 0.0)
6498  {
6499  numIndexTuples = indexSelectivity * index->rel->tuples;
6500 
6501  /*
6502  * The above calculation counts all the tuples visited across all
6503  * scans induced by ScalarArrayOpExpr nodes. We want to consider the
6504  * average per-indexscan number, so adjust. This is a handy place to
6505  * round to integer, too. (If caller supplied tuple estimate, it's
6506  * responsible for handling these considerations.)
6507  */
6508  numIndexTuples = rint(numIndexTuples / num_sa_scans);
6509  }
6510 
6511  /*
6512  * We can bound the number of tuples by the index size in any case. Also,
6513  * always estimate at least one tuple is touched, even when
6514  * indexSelectivity estimate is tiny.
6515  */
6516  if (numIndexTuples > index->tuples)
6517  numIndexTuples = index->tuples;
6518  if (numIndexTuples < 1.0)
6519  numIndexTuples = 1.0;
6520 
6521  /*
6522  * Estimate the number of index pages that will be retrieved.
6523  *
6524  * We use the simplistic method of taking a pro-rata fraction of the total
6525  * number of index pages. In effect, this counts only leaf pages and not
6526  * any overhead such as index metapage or upper tree levels.
6527  *
6528  * In practice access to upper index levels is often nearly free because
6529  * those tend to stay in cache under load; moreover, the cost involved is
6530  * highly dependent on index type. We therefore ignore such costs here
6531  * and leave it to the caller to add a suitable charge if needed.
6532  */
6533  if (index->pages > 1 && index->tuples > 1)
6534  numIndexPages = ceil(numIndexTuples * index->pages / index->tuples);
6535  else
6536  numIndexPages = 1.0;
6537 
6538  /* fetch estimated page cost for tablespace containing index */
6539  get_tablespace_page_costs(index->reltablespace,
6540  &spc_random_page_cost,
6541  NULL);
6542 
6543  /*
6544  * Now compute the disk access costs.
6545  *
6546  * The above calculations are all per-index-scan. However, if we are in a
6547  * nestloop inner scan, we can expect the scan to be repeated (with
6548  * different search keys) for each row of the outer relation. Likewise,
6549  * ScalarArrayOpExpr quals result in multiple index scans. This creates
6550  * the potential for cache effects to reduce the number of disk page
6551  * fetches needed. We want to estimate the average per-scan I/O cost in
6552  * the presence of caching.
6553  *
6554  * We use the Mackert-Lohman formula (see costsize.c for details) to
6555  * estimate the total number of page fetches that occur. While this
6556  * wasn't what it was designed for, it seems a reasonable model anyway.
6557  * Note that we are counting pages not tuples anymore, so we take N = T =
6558  * index size, as if there were one "tuple" per page.
6559  */
6560  num_outer_scans = loop_count;
6561  num_scans = num_sa_scans * num_outer_scans;
6562 
6563  if (num_scans > 1)
6564  {
6565  double pages_fetched;
6566 
6567  /* total page fetches ignoring cache effects */
6568  pages_fetched = numIndexPages * num_scans;
6569 
6570  /* use Mackert and Lohman formula to adjust for cache effects */
6571  pages_fetched = index_pages_fetched(pages_fetched,
6572  index->pages,
6573  (double) index->pages,
6574  root);
6575 
6576  /*
6577  * Now compute the total disk access cost, and then report a pro-rated
6578  * share for each outer scan. (Don't pro-rate for ScalarArrayOpExpr,
6579  * since that's internal to the indexscan.)
6580  */
6581  indexTotalCost = (pages_fetched * spc_random_page_cost)
6582  / num_outer_scans;
6583  }
6584  else
6585  {
6586  /*
6587  * For a single index scan, we just charge spc_random_page_cost per
6588  * page touched.
6589  */
6590  indexTotalCost = numIndexPages * spc_random_page_cost;
6591  }
6592 
6593  /*
6594  * CPU cost: any complex expressions in the indexquals will need to be
6595  * evaluated once at the start of the scan to reduce them to runtime keys
6596  * to pass to the index AM (see nodeIndexscan.c). We model the per-tuple
6597  * CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per
6598  * indexqual operator. Because we have numIndexTuples as a per-scan
6599  * number, we have to multiply by num_sa_scans to get the correct result
6600  * for ScalarArrayOpExpr cases. Similarly add in costs for any index
6601  * ORDER BY expressions.
6602  *
6603  * Note: this neglects the possible costs of rechecking lossy operators.
6604  * Detecting that that might be needed seems more expensive than it's
6605  * worth, though, considering all the other inaccuracies here ...
6606  */
6607  qual_arg_cost = index_other_operands_eval_cost(root, indexQuals) +
6608  index_other_operands_eval_cost(root, indexOrderBys);
6609  qual_op_cost = cpu_operator_cost *
6610  (list_length(indexQuals) + list_length(indexOrderBys));
6611 
6612  indexStartupCost = qual_arg_cost;
6613  indexTotalCost += qual_arg_cost;
6614  indexTotalCost += numIndexTuples * num_sa_scans * (cpu_index_tuple_cost + qual_op_cost);
6615 
6616  /*
6617  * Generic assumption about index correlation: there isn't any.
6618  */
6619  indexCorrelation = 0.0;
6620 
6621  /*
6622  * Return everything to caller.
6623  */
6624  costs->indexStartupCost = indexStartupCost;
6625  costs->indexTotalCost = indexTotalCost;
6626  costs->indexSelectivity = indexSelectivity;
6627  costs->indexCorrelation = indexCorrelation;
6628  costs->numIndexPages = numIndexPages;
6629  costs->numIndexTuples = numIndexTuples;
6630  costs->spc_random_page_cost = spc_random_page_cost;
6631  costs->num_sa_scans = num_sa_scans;
6632 }
double index_pages_fetched(double tuples_fetched, BlockNumber pages, double index_pages, PlannerInfo *root)
Definition: costsize.c:868
double cpu_index_tuple_cost
Definition: costsize.c:123
double spc_random_page_cost
Definition: selfuncs.h:130
List * indexorderbys
Definition: pathnodes.h:1602

References add_predicate_to_index_quals(), ScalarArrayOpExpr::args, RestrictInfo::clause, clauselist_selectivity(), cpu_index_tuple_cost, cpu_operator_cost, estimate_array_length(), get_quals_from_indexclauses(), get_tablespace_page_costs(), index_other_operands_eval_cost(), index_pages_fetched(), IndexPath::indexclauses, GenericCosts::indexCorrelation, IndexPath::indexinfo, IndexPath::indexorderbys, GenericCosts::indexSelectivity, GenericCosts::indexStartupCost, GenericCosts::indexTotalCost, IsA, JOIN_INNER, lfirst, list_length(), lsecond, GenericCosts::num_sa_scans, GenericCosts::numIndexPages, GenericCosts::numIndexTuples, and GenericCosts::spc_random_page_cost.

Referenced by blcostestimate(), btcostestimate(), gistcostestimate(), hashcostestimate(), and spgcostestimate().

◆ get_actual_variable_endpoint()

static bool get_actual_variable_endpoint ( Relation  heapRel,
Relation  indexRel,
ScanDirection  indexscandir,
ScanKey  scankeys,
int16  typLen,
bool  typByVal,
TupleTableSlot tableslot,
MemoryContext  outercontext,
Datum endpointDatum 
)
static

Definition at line 6156 of file selfuncs.c.

6165 {
6166  bool have_data = false;
6167  SnapshotData SnapshotNonVacuumable;
6168  IndexScanDesc index_scan;
6169  Buffer vmbuffer = InvalidBuffer;
6170  BlockNumber last_heap_block = InvalidBlockNumber;
6171  int n_visited_heap_pages = 0;
6172  ItemPointer tid;
6174  bool isnull[INDEX_MAX_KEYS];
6175  MemoryContext oldcontext;
6176 
6177  /*
6178  * We use the index-only-scan machinery for this. With mostly-static
6179  * tables that's a win because it avoids a heap visit. It's also a win
6180  * for dynamic data, but the reason is less obvious; read on for details.
6181  *
6182  * In principle, we should scan the index with our current active
6183  * snapshot, which is the best approximation we've got to what the query
6184  * will see when executed. But that won't be exact if a new snap is taken
6185  * before running the query, and it can be very expensive if a lot of
6186  * recently-dead or uncommitted rows exist at the beginning or end of the
6187  * index (because we'll laboriously fetch each one and reject it).
6188  * Instead, we use SnapshotNonVacuumable. That will accept recently-dead
6189  * and uncommitted rows as well as normal visible rows. On the other
6190  * hand, it will reject known-dead rows, and thus not give a bogus answer
6191  * when the extreme value has been deleted (unless the deletion was quite
6192  * recent); that case motivates not using SnapshotAny here.
6193  *
6194  * A crucial point here is that SnapshotNonVacuumable, with
6195  * GlobalVisTestFor(heapRel) as horizon, yields the inverse of the
6196  * condition that the indexscan will use to decide that index entries are
6197  * killable (see heap_hot_search_buffer()). Therefore, if the snapshot
6198  * rejects a tuple (or more precisely, all tuples of a HOT chain) and we
6199  * have to continue scanning past it, we know that the indexscan will mark
6200  * that index entry killed. That means that the next
6201  * get_actual_variable_endpoint() call will not have to re-consider that
6202  * index entry. In this way we avoid repetitive work when this function
6203  * is used a lot during planning.
6204  *
6205  * But using SnapshotNonVacuumable creates a hazard of its own. In a
6206  * recently-created index, some index entries may point at "broken" HOT
6207  * chains in which not all the tuple versions contain data matching the
6208  * index entry. The live tuple version(s) certainly do match the index,
6209  * but SnapshotNonVacuumable can accept recently-dead tuple versions that
6210  * don't match. Hence, if we took data from the selected heap tuple, we
6211  * might get a bogus answer that's not close to the index extremal value,
6212  * or could even be NULL. We avoid this hazard because we take the data
6213  * from the index entry not the heap.
6214  *
6215  * Despite all this care, there are situations where we might find many
6216  * non-visible tuples near the end of the index. We don't want to expend
6217  * a huge amount of time here, so we give up once we've read too many heap
6218  * pages. When we fail for that reason, the caller will end up using
6219  * whatever extremal value is recorded in pg_statistic.
6220  */
6221  InitNonVacuumableSnapshot(SnapshotNonVacuumable,
6222  GlobalVisTestFor(heapRel));
6223 
6224  index_scan = index_beginscan(heapRel, indexRel,
6225  &SnapshotNonVacuumable,
6226  1, 0);
6227  /* Set it up for index-only scan */
6228  index_scan->xs_want_itup = true;
6229  index_rescan(index_scan, scankeys, 1, NULL, 0);
6230 
6231  /* Fetch first/next tuple in specified direction */
6232  while ((tid = index_getnext_tid(index_scan, indexscandir)) != NULL)
6233  {
6235 
6236  if (!VM_ALL_VISIBLE(heapRel,
6237  block,
6238  &vmbuffer))
6239  {
6240  /* Rats, we have to visit the heap to check visibility */
6241  if (!index_fetch_heap(index_scan, tableslot))
6242  {
6243  /*
6244  * No visible tuple for this index entry, so we need to
6245  * advance to the next entry. Before doing so, count heap
6246  * page fetches and give up if we've done too many.
6247  *
6248  * We don't charge a page fetch if this is the same heap page
6249  * as the previous tuple. This is on the conservative side,
6250  * since other recently-accessed pages are probably still in
6251  * buffers too; but it's good enough for this heuristic.
6252  */
6253 #define VISITED_PAGES_LIMIT 100
6254 
6255  if (block != last_heap_block)
6256  {
6257  last_heap_block = block;
6258  n_visited_heap_pages++;
6259  if (n_visited_heap_pages > VISITED_PAGES_LIMIT)
6260  break;
6261  }
6262 
6263  continue; /* no visible tuple, try next index entry */
6264  }
6265 
6266  /* We don't actually need the heap tuple for anything */
6267  ExecClearTuple(tableslot);
6268 
6269  /*
6270  * We don't care whether there's more than one visible tuple in
6271  * the HOT chain; if any are visible, that's good enough.
6272  */
6273  }
6274 
6275  /*
6276  * We expect that btree will return data in IndexTuple not HeapTuple
6277  * format. It's not lossy either.
6278  */
6279  if (!index_scan->xs_itup)
6280  elog(ERROR, "no data returned for index-only scan");
6281  if (index_scan->xs_recheck)
6282  elog(ERROR, "unexpected recheck indication from btree");
6283 
6284  /* OK to deconstruct the index tuple */
6285  index_deform_tuple(index_scan->xs_itup,
6286  index_scan->xs_itupdesc,
6287  values, isnull);
6288 
6289  /* Shouldn't have got a null, but be careful */
6290  if (isnull[0])
6291  elog(ERROR, "found unexpected null value in index \"%s\"",
6292  RelationGetRelationName(indexRel));
6293 
6294  /* Copy the index column value out to caller's context */
6295  oldcontext = MemoryContextSwitchTo(outercontext);
6296  *endpointDatum = datumCopy(values[0], typByVal, typLen);
6297  MemoryContextSwitchTo(oldcontext);
6298  have_data = true;
6299  break;
6300  }
6301 
6302  if (vmbuffer != InvalidBuffer)
6303  ReleaseBuffer(vmbuffer);
6304  index_endscan(index_scan);
6305 
6306  return have_data;
6307 }
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
static Datum values[MAXATTR]
Definition: bootstrap.c:156
int Buffer
Definition: buf.h:23
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3931
Datum datumCopy(Datum value, bool typByVal, int typLen)
Definition: datum.c:132
ItemPointer index_getnext_tid(IndexScanDesc scan, ScanDirection direction)
Definition: indexam.c:525
IndexScanDesc index_beginscan(Relation heapRelation, Relation indexRelation, Snapshot snapshot, int nkeys, int norderbys)
Definition: indexam.c:205
bool index_fetch_heap(IndexScanDesc scan, TupleTableSlot *slot)
Definition: indexam.c:583
void index_endscan(IndexScanDesc scan)
Definition: indexam.c:327
void index_rescan(IndexScanDesc scan, ScanKey keys, int nkeys, ScanKey orderbys, int norderbys)
Definition: indexam.c:301
void index_deform_tuple(IndexTuple tup, TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: indextuple.c:456
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition: itemptr.h:103
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:135
#define INDEX_MAX_KEYS
GlobalVisState * GlobalVisTestFor(Relation rel)
Definition: procarray.c:4066
#define RelationGetRelationName(relation)
Definition: rel.h:535
#define VISITED_PAGES_LIMIT
#define InitNonVacuumableSnapshot(snapshotdata, vistestp)
Definition: snapmgr.h:82
IndexTuple xs_itup
Definition: relscan.h:142
struct TupleDescData * xs_itupdesc
Definition: relscan.h:143
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:433
#define VM_ALL_VISIBLE(r, b, v)
Definition: visibilitymap.h:24

References datumCopy(), elog(), ERROR, ExecClearTuple(), GlobalVisTestFor(), index_beginscan(), index_deform_tuple(), index_endscan(), index_fetch_heap(), index_getnext_tid(), INDEX_MAX_KEYS, index_rescan(), InitNonVacuumableSnapshot, InvalidBlockNumber, InvalidBuffer, ItemPointerGetBlockNumber(), MemoryContextSwitchTo(), RelationGetRelationName, ReleaseBuffer(), values, VISITED_PAGES_LIMIT, VM_ALL_VISIBLE, IndexScanDescData::xs_itup, IndexScanDescData::xs_itupdesc, IndexScanDescData::xs_recheck, and IndexScanDescData::xs_want_itup.

Referenced by get_actual_variable_range().

◆ get_actual_variable_range()

static bool get_actual_variable_range ( PlannerInfo root,
VariableStatData vardata,
Oid  sortop,
Oid  collation,
Datum min,
Datum max 
)
static

Definition at line 5980 of file selfuncs.c.

5983 {
5984  bool have_data = false;
5985  RelOptInfo *rel = vardata->rel;
5986  RangeTblEntry *rte;
5987  ListCell *lc;
5988 
5989  /* No hope if no relation or it doesn't have indexes */
5990  if (rel == NULL || rel->indexlist == NIL)
5991  return false;
5992  /* If it has indexes it must be a plain relation */
5993  rte = root->simple_rte_array[rel->relid];
5994  Assert(rte->rtekind == RTE_RELATION);
5995 
5996  /* Search through the indexes to see if any match our problem */
5997  foreach(lc, rel->indexlist)
5998  {
6000  ScanDirection indexscandir;
6001 
6002  /* Ignore non-btree indexes */
6003  if (index->relam != BTREE_AM_OID)
6004  continue;
6005 
6006  /*
6007  * Ignore partial indexes --- we only want stats that cover the entire
6008  * relation.
6009  */
6010  if (index->indpred != NIL)
6011  continue;
6012 
6013  /*
6014  * The index list might include hypothetical indexes inserted by a
6015  * get_relation_info hook --- don't try to access them.
6016  */
6017  if (index->hypothetical)
6018  continue;
6019 
6020  /*
6021  * The first index column must match the desired variable, sortop, and
6022  * collation --- but we can use a descending-order index.
6023  */
6024  if (collation != index->indexcollations[0])
6025  continue; /* test first 'cause it's cheapest */
6026  if (!match_index_to_operand(vardata->var, 0, index))
6027  continue;
6028  switch (get_op_opfamily_strategy(sortop, index->sortopfamily[0]))
6029  {
6030  case BTLessStrategyNumber:
6031  if (index->reverse_sort[0])
6032  indexscandir = BackwardScanDirection;
6033  else
6034  indexscandir = ForwardScanDirection;
6035  break;
6037  if (index->reverse_sort[0])
6038  indexscandir = ForwardScanDirection;
6039  else
6040  indexscandir = BackwardScanDirection;
6041  break;
6042  default:
6043  /* index doesn't match the sortop */
6044  continue;
6045  }
6046 
6047  /*
6048  * Found a suitable index to extract data from. Set up some data that
6049  * can be used by both invocations of get_actual_variable_endpoint.
6050  */
6051  {
6052  MemoryContext tmpcontext;
6053  MemoryContext oldcontext;
6054  Relation heapRel;
6055  Relation indexRel;
6056  TupleTableSlot *slot;
6057  int16 typLen;
6058  bool typByVal;
6059  ScanKeyData scankeys[1];
6060 
6061  /* Make sure any cruft gets recycled when we're done */
6063  "get_actual_variable_range workspace",
6065  oldcontext = MemoryContextSwitchTo(tmpcontext);
6066 
6067  /*
6068  * Open the table and index so we can read from them. We should
6069  * already have some type of lock on each.
6070  */
6071  heapRel = table_open(rte->relid, NoLock);
6072  indexRel = index_open(index->indexoid, NoLock);
6073 
6074  /* build some stuff needed for indexscan execution */
6075  slot = table_slot_create(heapRel, NULL);
6076  get_typlenbyval(vardata->atttype, &typLen, &typByVal);
6077 
6078  /* set up an IS NOT NULL scan key so that we ignore nulls */
6079  ScanKeyEntryInitialize(&scankeys[0],
6081  1, /* index col to scan */
6082  InvalidStrategy, /* no strategy */
6083  InvalidOid, /* no strategy subtype */
6084  InvalidOid, /* no collation */
6085  InvalidOid, /* no reg proc for this */
6086  (Datum) 0); /* constant */
6087 
6088  /* If min is requested ... */
6089  if (min)
6090  {
6091  have_data = get_actual_variable_endpoint(heapRel,
6092  indexRel,
6093  indexscandir,
6094  scankeys,
6095  typLen,
6096  typByVal,
6097  slot,
6098  oldcontext,
6099  min);
6100  }
6101  else
6102  {
6103  /* If min not requested, still want to fetch max */
6104  have_data = true;
6105  }
6106 
6107  /* If max is requested, and we didn't already fail ... */
6108  if (max && have_data)
6109  {
6110  /* scan in the opposite direction; all else is the same */
6111  have_data = get_actual_variable_endpoint(heapRel,
6112  indexRel,
6113  -indexscandir,
6114  scankeys,
6115  typLen,
6116  typByVal,
6117  slot,
6118  oldcontext,
6119  max);
6120  }
6121 
6122  /* Clean everything up */
6124 
6125  index_close(indexRel, NoLock);
6126  table_close(heapRel, NoLock);
6127 
6128  MemoryContextSwitchTo(oldcontext);
6129  MemoryContextDelete(tmpcontext);
6130 
6131  /* And we're done */
6132  break;
6133  }
6134  }
6135 
6136  return have_data;
6137 }
signed short int16
Definition: c.h:429
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
Definition: execTuples.c:1254
bool match_index_to_operand(Node *operand, int indexcol, IndexOptInfo *index)
Definition: indxpath.c:3716
void get_typlenbyval(Oid typid, int16 *typlen, bool *typbyval)
Definition: lsyscache.c:2209
MemoryContext CurrentMemoryContext
Definition: mcxt.c:124
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:376
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:153
void ScanKeyEntryInitialize(ScanKey entry, int flags, AttrNumber attributeNumber, StrategyNumber strategy, Oid subtype, Oid collation, RegProcedure procedure, Datum argument)
Definition: scankey.c:32
ScanDirection
Definition: sdir.h:23
@ BackwardScanDirection
Definition: sdir.h:24
@ ForwardScanDirection
Definition: sdir.h:26
static bool get_actual_variable_endpoint(Relation heapRel, Relation indexRel, ScanDirection indexscandir, ScanKey scankeys, int16 typLen, bool typByVal, TupleTableSlot *tableslot, MemoryContext outercontext, Datum *endpointDatum)
Definition: selfuncs.c:6156
#define SK_SEARCHNOTNULL
Definition: skey.h:122
#define SK_ISNULL
Definition: skey.h:115
#define BTGreaterStrategyNumber
Definition: stratnum.h:33
#define InvalidStrategy
Definition: stratnum.h:24
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:40
TupleTableSlot * table_slot_create(Relation relation, List **reglist)
Definition: tableam.c:91

References ALLOCSET_DEFAULT_SIZES, AllocSetContextCreate, Assert(), VariableStatData::atttype, BackwardScanDirection, BTGreaterStrategyNumber, BTLessStrategyNumber, CurrentMemoryContext, ExecDropSingleTupleTableSlot(), ForwardScanDirection, get_actual_variable_endpoint(), get_op_opfamily_strategy(), get_typlenbyval(), index_close(), index_open(), RelOptInfo::indexlist, InvalidOid, InvalidStrategy, lfirst, match_index_to_operand(), MemoryContextDelete(), MemoryContextSwitchTo(), NIL, NoLock, VariableStatData::rel, RangeTblEntry::relid, RelOptInfo::relid, RTE_RELATION, RangeTblEntry::rtekind, ScanKeyEntryInitialize(), SK_ISNULL, SK_SEARCHNOTNULL, table_close(), table_open(), table_slot_create(), and VariableStatData::var.

Referenced by get_variable_range(), and ineq_histogram_selectivity().

◆ get_join_variables()

void get_join_variables ( PlannerInfo root,
List args,
SpecialJoinInfo sjinfo,
VariableStatData vardata1,
VariableStatData vardata2,
bool join_is_reversed 
)

Definition at line 4908 of file selfuncs.c.

4911 {
4912  Node *left,
4913  *right;
4914 
4915  if (list_length(args) != 2)
4916  elog(ERROR, "join operator should take two arguments");
4917 
4918  left = (Node *) linitial(args);
4919  right = (Node *) lsecond(args);
4920 
4921  examine_variable(root, left, 0, vardata1);
4922  examine_variable(root, right, 0, vardata2);
4923 
4924  if (vardata1->rel &&
4925  bms_is_subset(vardata1->rel->relids, sjinfo->syn_righthand))
4926  *join_is_reversed = true; /* var1 is on RHS */
4927  else if (vardata2->rel &&
4928  bms_is_subset(vardata2->rel->relids, sjinfo->syn_lefthand))
4929  *join_is_reversed = true; /* var2 is on LHS */
4930  else
4931  *join_is_reversed = false;
4932 }
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:316
Relids relids
Definition: pathnodes.h:824
Relids syn_lefthand
Definition: pathnodes.h:2705
Relids syn_righthand
Definition: pathnodes.h:2706

References generate_unaccent_rules::args, bms_is_subset(), elog(), ERROR, examine_variable(), linitial, list_length(), lsecond, VariableStatData::rel, RelOptInfo::relids, SpecialJoinInfo::syn_lefthand, and SpecialJoinInfo::syn_righthand.

Referenced by eqjoinsel(), neqjoinsel(), and networkjoinsel().

◆ get_quals_from_indexclauses()

List* get_quals_from_indexclauses ( List indexclauses)

Definition at line 6352 of file selfuncs.c.

6353 {
6354  List *result = NIL;
6355  ListCell *lc;
6356 
6357  foreach(lc, indexclauses)
6358  {
6359  IndexClause *iclause = lfirst_node(IndexClause, lc);
6360  ListCell *lc2;
6361 
6362  foreach(lc2, iclause->indexquals)
6363  {
6364  RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc2);
6365 
6366  result = lappend(result, rinfo);
6367  }
6368  }
6369  return result;
6370 }

References IndexClause::indexquals, lappend(), lfirst_node, and NIL.

Referenced by brincostestimate(), genericcostestimate(), and gincostestimate().

◆ get_restriction_variable()

bool get_restriction_variable ( PlannerInfo root,
List args,
int  varRelid,
VariableStatData vardata,
Node **  other,
bool varonleft 
)

Definition at line 4848 of file selfuncs.c.

4851 {
4852  Node *left,
4853  *right;
4854  VariableStatData rdata;
4855 
4856  /* Fail if not a binary opclause (probably shouldn't happen) */
4857  if (list_length(args) != 2)
4858  return false;
4859 
4860  left = (Node *) linitial(args);
4861  right = (Node *) lsecond(args);
4862 
4863  /*
4864  * Examine both sides. Note that when varRelid is nonzero, Vars of other
4865  * relations will be treated as pseudoconstants.
4866  */
4867  examine_variable(root, left, varRelid, vardata);
4868  examine_variable(root, right, varRelid, &rdata);
4869 
4870  /*
4871  * If one side is a variable and the other not, we win.
4872  */
4873  if (vardata->rel && rdata.rel == NULL)
4874  {
4875  *varonleft = true;
4876  *other = estimate_expression_value(root, rdata.var);
4877  /* Assume we need no ReleaseVariableStats(rdata) here */
4878  return true;
4879  }
4880 
4881  if (vardata->rel == NULL && rdata.rel)
4882  {
4883  *varonleft = false;
4884  *other = estimate_expression_value(root, vardata->var);
4885  /* Assume we need no ReleaseVariableStats(*vardata) here */
4886  *vardata = rdata;
4887  return true;
4888  }
4889 
4890  /* Oops, clause has wrong structure (probably var op var) */
4891  ReleaseVariableStats(*vardata);
4892  ReleaseVariableStats(rdata);
4893 
4894  return false;
4895 }
Node * estimate_expression_value(PlannerInfo *root, Node *node)
Definition: clauses.c:2273

References generate_unaccent_rules::args, estimate_expression_value(), examine_variable(), linitial, list_length(), lsecond, VariableStatData::rel, ReleaseVariableStats, and VariableStatData::var.

Referenced by _int_matchsel(), arraycontsel(), eqsel_internal(), generic_restriction_selectivity(), multirangesel(), networksel(), patternsel_common(), rangesel(), scalarineqsel_wrapper(), and tsmatchsel().

◆ get_stats_slot_range()

static void get_stats_slot_range ( AttStatsSlot sslot,
Oid  opfuncoid,
FmgrInfo opproc,
Oid  collation,
int16  typLen,
bool  typByVal,
Datum min,
Datum max,
bool p_have_data 
)
static

Definition at line 5917 of file selfuncs.c.

5920 {
5921  Datum tmin = *min;
5922  Datum tmax = *max;
5923  bool have_data = *p_have_data;
5924  bool found_tmin = false;
5925  bool found_tmax = false;
5926 
5927  /* Look up the comparison function, if we didn't already do so */
5928  if (opproc->fn_oid != opfuncoid)
5929  fmgr_info(opfuncoid, opproc);
5930 
5931  /* Scan all the slot's values */
5932  for (int i = 0; i < sslot->nvalues; i++)
5933  {
5934  if (!have_data)
5935  {
5936  tmin = tmax = sslot->values[i];
5937  found_tmin = found_tmax = true;
5938  *p_have_data = have_data = true;
5939  continue;
5940  }
5941  if (DatumGetBool(FunctionCall2Coll(opproc,
5942  collation,
5943  sslot->values[i], tmin)))
5944  {
5945  tmin = sslot->values[i];
5946  found_tmin = true;
5947  }
5948  if (DatumGetBool(FunctionCall2Coll(opproc,
5949  collation,
5950  tmax, sslot->values[i])))
5951  {
5952  tmax = sslot->values[i];
5953  found_tmax = true;
5954  }
5955  }
5956 
5957  /*
5958  * Copy the slot's values, if we found new extreme values.
5959  */
5960  if (found_tmin)
5961  *min = datumCopy(tmin, typByVal, typLen);
5962  if (found_tmax)
5963  *max = datumCopy(tmax, typByVal, typLen);
5964 }
Datum FunctionCall2Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2)
Definition: fmgr.c:1136
Oid fn_oid
Definition: fmgr.h:59

References datumCopy(), DatumGetBool(), fmgr_info(), FmgrInfo::fn_oid, FunctionCall2Coll(), i, AttStatsSlot::nvalues, and AttStatsSlot::values.

Referenced by get_variable_range().

◆ get_variable_numdistinct()

double get_variable_numdistinct ( VariableStatData vardata,
bool isdefault 
)

Definition at line 5657 of file selfuncs.c.

5658 {
5659  double stadistinct;
5660  double stanullfrac = 0.0;
5661  double ntuples;
5662 
5663  *isdefault = false;
5664 
5665  /*
5666  * Determine the stadistinct value to use. There are cases where we can
5667  * get an estimate even without a pg_statistic entry, or can get a better
5668  * value than is in pg_statistic. Grab stanullfrac too if we can find it
5669  * (otherwise, assume no nulls, for lack of any better idea).
5670  */
5671  if (HeapTupleIsValid(vardata->statsTuple))
5672  {
5673  /* Use the pg_statistic entry */
5674  Form_pg_statistic stats;
5675 
5676  stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
5677  stadistinct = stats->stadistinct;
5678  stanullfrac = stats->stanullfrac;
5679  }
5680  else if (vardata->vartype == BOOLOID)
5681  {
5682  /*
5683  * Special-case boolean columns: presumably, two distinct values.
5684  *
5685  * Are there any other datatypes we should wire in special estimates
5686  * for?
5687  */
5688  stadistinct = 2.0;
5689  }
5690  else if (vardata->rel && vardata->rel->rtekind == RTE_VALUES)
5691  {
5692  /*
5693  * If the Var represents a column of a VALUES RTE, assume it's unique.
5694  * This could of course be very wrong, but it should tend to be true
5695  * in well-written queries. We could consider examining the VALUES'
5696  * contents to get some real statistics; but that only works if the
5697  * entries are all constants, and it would be pretty expensive anyway.
5698  */
5699  stadistinct = -1.0; /* unique (and all non null) */
5700  }
5701  else
5702  {
5703  /*
5704  * We don't keep statistics for system columns, but in some cases we
5705  * can infer distinctness anyway.
5706  */
5707  if (vardata->var && IsA(vardata->var, Var))
5708  {
5709  switch (((Var *) vardata->var)->varattno)
5710  {
5712  stadistinct = -1.0; /* unique (and all non null) */
5713  break;
5715  stadistinct = 1.0; /* only 1 value */
5716  break;
5717  default:
5718  stadistinct = 0.0; /* means "unknown" */
5719  break;
5720  }
5721  }
5722  else
5723  stadistinct = 0.0; /* means "unknown" */
5724 
5725  /*
5726  * XXX consider using estimate_num_groups on expressions?
5727  */
5728  }
5729 
5730  /*
5731  * If there is a unique index or DISTINCT clause for the variable, assume
5732  * it is unique no matter what pg_statistic says; the statistics could be
5733  * out of date, or we might have found a partial unique index that proves
5734  * the var is unique for this query. However, we'd better still believe
5735  * the null-fraction statistic.
5736  */
5737  if (vardata->isunique)
5738  stadistinct = -1.0 * (1.0 - stanullfrac);
5739 
5740  /*
5741  * If we had an absolute estimate, use that.
5742  */
5743  if (stadistinct > 0.0)
5744  return clamp_row_est(stadistinct);
5745 
5746  /*
5747  * Otherwise we need to get the relation size; punt if not available.
5748  */
5749  if (vardata->rel == NULL)
5750  {
5751  *isdefault = true;
5752  return DEFAULT_NUM_DISTINCT;
5753  }
5754  ntuples = vardata->rel->tuples;
5755  if (ntuples <= 0.0)
5756  {
5757  *isdefault = true;
5758  return DEFAULT_NUM_DISTINCT;
5759  }
5760 
5761  /*
5762  * If we had a relative estimate, use that.
5763  */
5764  if (stadistinct < 0.0)
5765  return clamp_row_est(-stadistinct * ntuples);
5766 
5767  /*
5768  * With no data, estimate ndistinct = ntuples if the table is small, else
5769  * use default. We use DEFAULT_NUM_DISTINCT as the cutoff for "small" so
5770  * that the behavior isn't discontinuous.
5771  */
5772  if (ntuples < DEFAULT_NUM_DISTINCT)
5773  return clamp_row_est(ntuples);
5774 
5775  *isdefault = true;
5776  return DEFAULT_NUM_DISTINCT;
5777 }
@ RTE_VALUES
Definition: parsenodes.h:987
#define DEFAULT_NUM_DISTINCT
Definition: selfuncs.h:52
RTEKind rtekind
Definition: pathnodes.h:875
#define TableOidAttributeNumber
Definition: sysattr.h:26
#define SelfItemPointerAttributeNumber
Definition: sysattr.h:21

References clamp_row_est(), DEFAULT_NUM_DISTINCT, GETSTRUCT, HeapTupleIsValid, IsA, VariableStatData::isunique, VariableStatData::rel, RTE_VALUES, RelOptInfo::rtekind, SelfItemPointerAttributeNumber, VariableStatData::statsTuple, TableOidAttributeNumber, RelOptInfo::tuples, VariableStatData::var, and VariableStatData::vartype.

Referenced by add_unique_group_var(), eqjoinsel(), estimate_hash_bucket_stats(), ineq_histogram_selectivity(), var_eq_const(), and var_eq_non_const().

◆ get_variable_range()

static bool get_variable_range ( PlannerInfo root,
VariableStatData vardata,
Oid  sortop,
Oid  collation,
Datum min,
Datum max 
)
static

Definition at line 5790 of file selfuncs.c.

5793 {
5794  Datum tmin = 0;
5795  Datum tmax = 0;
5796  bool have_data = false;
5797  int16 typLen;
5798  bool typByVal;
5799  Oid opfuncoid;
5800  FmgrInfo opproc;
5801  AttStatsSlot sslot;
5802 
5803  /*
5804  * XXX It's very tempting to try to use the actual column min and max, if
5805  * we can get them relatively-cheaply with an index probe. However, since
5806  * this function is called many times during join planning, that could
5807  * have unpleasant effects on planning speed. Need more investigation
5808  * before enabling this.
5809  */
5810 #ifdef NOT_USED
5811  if (get_actual_variable_range(root, vardata, sortop, collation, min, max))
5812  return true;
5813 #endif
5814 
5815  if (!HeapTupleIsValid(vardata->statsTuple))
5816  {
5817  /* no stats available, so default result */
5818  return false;
5819  }
5820 
5821  /*
5822  * If we can't apply the sortop to the stats data, just fail. In
5823  * principle, if there's a histogram and no MCVs, we could return the
5824  * histogram endpoints without ever applying the sortop ... but it's
5825  * probably not worth trying, because whatever the caller wants to do with
5826  * the endpoints would likely fail the security check too.
5827  */
5828  if (!statistic_proc_security_check(vardata,
5829  (opfuncoid = get_opcode(sortop))))
5830  return false;