PostgreSQL Source Code  git master
selfuncs.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * selfuncs.c
4  * Selectivity functions and index cost estimation functions for
5  * standard operators and index access methods.
6  *
7  * Selectivity routines are registered in the pg_operator catalog
8  * in the "oprrest" and "oprjoin" attributes.
9  *
10  * Index cost functions are located via the index AM's API struct,
11  * which is obtained from the handler function registered in pg_am.
12  *
13  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
14  * Portions Copyright (c) 1994, Regents of the University of California
15  *
16  *
17  * IDENTIFICATION
18  * src/backend/utils/adt/selfuncs.c
19  *
20  *-------------------------------------------------------------------------
21  */
22 
23 /*----------
24  * Operator selectivity estimation functions are called to estimate the
25  * selectivity of WHERE clauses whose top-level operator is their operator.
26  * We divide the problem into two cases:
27  * Restriction clause estimation: the clause involves vars of just
28  * one relation.
29  * Join clause estimation: the clause involves vars of multiple rels.
30  * Join selectivity estimation is far more difficult and usually less accurate
31  * than restriction estimation.
32  *
33  * When dealing with the inner scan of a nestloop join, we consider the
34  * join's joinclauses as restriction clauses for the inner relation, and
35  * treat vars of the outer relation as parameters (a/k/a constants of unknown
36  * values). So, restriction estimators need to be able to accept an argument
37  * telling which relation is to be treated as the variable.
38  *
39  * The call convention for a restriction estimator (oprrest function) is
40  *
41  * Selectivity oprrest (PlannerInfo *root,
42  * Oid operator,
43  * List *args,
44  * int varRelid);
45  *
46  * root: general information about the query (rtable and RelOptInfo lists
47  * are particularly important for the estimator).
48  * operator: OID of the specific operator in question.
49  * args: argument list from the operator clause.
50  * varRelid: if not zero, the relid (rtable index) of the relation to
51  * be treated as the variable relation. May be zero if the args list
52  * is known to contain vars of only one relation.
53  *
54  * This is represented at the SQL level (in pg_proc) as
55  *
56  * float8 oprrest (internal, oid, internal, int4);
57  *
58  * The result is a selectivity, that is, a fraction (0 to 1) of the rows
59  * of the relation that are expected to produce a TRUE result for the
60  * given operator.
61  *
62  * The call convention for a join estimator (oprjoin function) is similar
63  * except that varRelid is not needed, and instead join information is
64  * supplied:
65  *
66  * Selectivity oprjoin (PlannerInfo *root,
67  * Oid operator,
68  * List *args,
69  * JoinType jointype,
70  * SpecialJoinInfo *sjinfo);
71  *
72  * float8 oprjoin (internal, oid, internal, int2, internal);
73  *
74  * (Before Postgres 8.4, join estimators had only the first four of these
75  * parameters. That signature is still allowed, but deprecated.) The
76  * relationship between jointype and sjinfo is explained in the comments for
77  * clause_selectivity() --- the short version is that jointype is usually
78  * best ignored in favor of examining sjinfo.
79  *
80  * Join selectivity for regular inner and outer joins is defined as the
81  * fraction (0 to 1) of the cross product of the relations that is expected
82  * to produce a TRUE result for the given operator. For both semi and anti
83  * joins, however, the selectivity is defined as the fraction of the left-hand
84  * side relation's rows that are expected to have a match (ie, at least one
85  * row with a TRUE result) in the right-hand side.
86  *
87  * For both oprrest and oprjoin functions, the operator's input collation OID
88  * (if any) is passed using the standard fmgr mechanism, so that the estimator
89  * function can fetch it with PG_GET_COLLATION(). Note, however, that all
90  * statistics in pg_statistic are currently built using the relevant column's
91  * collation.
92  *----------
93  */
94 
95 #include "postgres.h"
96 
97 #include <ctype.h>
98 #include <math.h>
99 
100 #include "access/brin.h"
101 #include "access/brin_page.h"
102 #include "access/gin.h"
103 #include "access/table.h"
104 #include "access/tableam.h"
105 #include "access/visibilitymap.h"
106 #include "catalog/pg_am.h"
107 #include "catalog/pg_collation.h"
108 #include "catalog/pg_operator.h"
109 #include "catalog/pg_statistic.h"
111 #include "executor/nodeAgg.h"
112 #include "miscadmin.h"
113 #include "nodes/makefuncs.h"
114 #include "nodes/nodeFuncs.h"
115 #include "optimizer/clauses.h"
116 #include "optimizer/cost.h"
117 #include "optimizer/optimizer.h"
118 #include "optimizer/pathnode.h"
119 #include "optimizer/paths.h"
120 #include "optimizer/plancat.h"
121 #include "parser/parse_clause.h"
122 #include "parser/parsetree.h"
123 #include "statistics/statistics.h"
124 #include "storage/bufmgr.h"
125 #include "utils/acl.h"
126 #include "utils/builtins.h"
127 #include "utils/date.h"
128 #include "utils/datum.h"
129 #include "utils/fmgroids.h"
130 #include "utils/index_selfuncs.h"
131 #include "utils/lsyscache.h"
132 #include "utils/memutils.h"
133 #include "utils/pg_locale.h"
134 #include "utils/rel.h"
135 #include "utils/selfuncs.h"
136 #include "utils/snapmgr.h"
137 #include "utils/spccache.h"
138 #include "utils/syscache.h"
139 #include "utils/timestamp.h"
140 #include "utils/typcache.h"
141 
142 
143 /* Hooks for plugins to get control when we ask for stats */
146 
147 static double eqsel_internal(PG_FUNCTION_ARGS, bool negate);
148 static double eqjoinsel_inner(Oid opfuncoid, Oid collation,
149  VariableStatData *vardata1, VariableStatData *vardata2,
150  double nd1, double nd2,
151  bool isdefault1, bool isdefault2,
152  AttStatsSlot *sslot1, AttStatsSlot *sslot2,
153  Form_pg_statistic stats1, Form_pg_statistic stats2,
154  bool have_mcvs1, bool have_mcvs2);
155 static double eqjoinsel_semi(Oid opfuncoid, Oid collation,
156  VariableStatData *vardata1, VariableStatData *vardata2,
157  double nd1, double nd2,
158  bool isdefault1, bool isdefault2,
159  AttStatsSlot *sslot1, AttStatsSlot *sslot2,
160  Form_pg_statistic stats1, Form_pg_statistic stats2,
161  bool have_mcvs1, bool have_mcvs2,
162  RelOptInfo *inner_rel);
164  RelOptInfo *rel, List **varinfos, double *ndistinct);
165 static bool convert_to_scalar(Datum value, Oid valuetypid, Oid collid,
166  double *scaledvalue,
167  Datum lobound, Datum hibound, Oid boundstypid,
168  double *scaledlobound, double *scaledhibound);
169 static double convert_numeric_to_scalar(Datum value, Oid typid, bool *failure);
170 static void convert_string_to_scalar(char *value,
171  double *scaledvalue,
172  char *lobound,
173  double *scaledlobound,
174  char *hibound,
175  double *scaledhibound);
177  double *scaledvalue,
178  Datum lobound,
179  double *scaledlobound,
180  Datum hibound,
181  double *scaledhibound);
182 static double convert_one_string_to_scalar(char *value,
183  int rangelo, int rangehi);
184 static double convert_one_bytea_to_scalar(unsigned char *value, int valuelen,
185  int rangelo, int rangehi);
186 static char *convert_string_datum(Datum value, Oid typid, Oid collid,
187  bool *failure);
188 static double convert_timevalue_to_scalar(Datum value, Oid typid,
189  bool *failure);
190 static void examine_simple_variable(PlannerInfo *root, Var *var,
191  VariableStatData *vardata);
192 static bool get_variable_range(PlannerInfo *root, VariableStatData *vardata,
193  Oid sortop, Oid collation,
194  Datum *min, Datum *max);
195 static void get_stats_slot_range(AttStatsSlot *sslot,
196  Oid opfuncoid, FmgrInfo *opproc,
197  Oid collation, int16 typLen, bool typByVal,
198  Datum *min, Datum *max, bool *p_have_data);
199 static bool get_actual_variable_range(PlannerInfo *root,
200  VariableStatData *vardata,
201  Oid sortop, Oid collation,
202  Datum *min, Datum *max);
203 static bool get_actual_variable_endpoint(Relation heapRel,
204  Relation indexRel,
205  ScanDirection indexscandir,
206  ScanKey scankeys,
207  int16 typLen,
208  bool typByVal,
209  TupleTableSlot *tableslot,
210  MemoryContext outercontext,
211  Datum *endpointDatum);
212 static RelOptInfo *find_join_input_rel(PlannerInfo *root, Relids relids);
213 
214 
215 /*
216  * eqsel - Selectivity of "=" for any data types.
217  *
218  * Note: this routine is also used to estimate selectivity for some
219  * operators that are not "=" but have comparable selectivity behavior,
220  * such as "~=" (geometric approximate-match). Even for "=", we must
221  * keep in mind that the left and right datatypes may differ.
222  */
223 Datum
225 {
226  PG_RETURN_FLOAT8((float8) eqsel_internal(fcinfo, false));
227 }
228 
229 /*
230  * Common code for eqsel() and neqsel()
231  */
232 static double
234 {
236  Oid operator = PG_GETARG_OID(1);
237  List *args = (List *) PG_GETARG_POINTER(2);
238  int varRelid = PG_GETARG_INT32(3);
239  Oid collation = PG_GET_COLLATION();
240  VariableStatData vardata;
241  Node *other;
242  bool varonleft;
243  double selec;
244 
245  /*
246  * When asked about <>, we do the estimation using the corresponding =
247  * operator, then convert to <> via "1.0 - eq_selectivity - nullfrac".
248  */
249  if (negate)
250  {
251  operator = get_negator(operator);
252  if (!OidIsValid(operator))
253  {
254  /* Use default selectivity (should we raise an error instead?) */
255  return 1.0 - DEFAULT_EQ_SEL;
256  }
257  }
258 
259  /*
260  * If expression is not variable = something or something = variable, then
261  * punt and return a default estimate.
262  */
263  if (!get_restriction_variable(root, args, varRelid,
264  &vardata, &other, &varonleft))
265  return negate ? (1.0 - DEFAULT_EQ_SEL) : DEFAULT_EQ_SEL;
266 
267  /*
268  * We can do a lot better if the something is a constant. (Note: the
269  * Const might result from estimation rather than being a simple constant
270  * in the query.)
271  */
272  if (IsA(other, Const))
273  selec = var_eq_const(&vardata, operator, collation,
274  ((Const *) other)->constvalue,
275  ((Const *) other)->constisnull,
276  varonleft, negate);
277  else
278  selec = var_eq_non_const(&vardata, operator, collation, other,
279  varonleft, negate);
280 
281  ReleaseVariableStats(vardata);
282 
283  return selec;
284 }
285 
286 /*
287  * var_eq_const --- eqsel for var = const case
288  *
289  * This is exported so that some other estimation functions can use it.
290  */
291 double
292 var_eq_const(VariableStatData *vardata, Oid operator, Oid collation,
293  Datum constval, bool constisnull,
294  bool varonleft, bool negate)
295 {
296  double selec;
297  double nullfrac = 0.0;
298  bool isdefault;
299  Oid opfuncoid;
300 
301  /*
302  * If the constant is NULL, assume operator is strict and return zero, ie,
303  * operator will never return TRUE. (It's zero even for a negator op.)
304  */
305  if (constisnull)
306  return 0.0;
307 
308  /*
309  * Grab the nullfrac for use below. Note we allow use of nullfrac
310  * regardless of security check.
311  */
312  if (HeapTupleIsValid(vardata->statsTuple))
313  {
314  Form_pg_statistic stats;
315 
316  stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
317  nullfrac = stats->stanullfrac;
318  }
319 
320  /*
321  * If we matched the var to a unique index or DISTINCT clause, assume
322  * there is exactly one match regardless of anything else. (This is
323  * slightly bogus, since the index or clause's equality operator might be
324  * different from ours, but it's much more likely to be right than
325  * ignoring the information.)
326  */
327  if (vardata->isunique && vardata->rel && vardata->rel->tuples >= 1.0)
328  {
329  selec = 1.0 / vardata->rel->tuples;
330  }
331  else if (HeapTupleIsValid(vardata->statsTuple) &&
333  (opfuncoid = get_opcode(operator))))
334  {
335  AttStatsSlot sslot;
336  bool match = false;
337  int i;
338 
339  /*
340  * Is the constant "=" to any of the column's most common values?
341  * (Although the given operator may not really be "=", we will assume
342  * that seeing whether it returns TRUE is an appropriate test. If you
343  * don't like this, maybe you shouldn't be using eqsel for your
344  * operator...)
345  */
346  if (get_attstatsslot(&sslot, vardata->statsTuple,
347  STATISTIC_KIND_MCV, InvalidOid,
349  {
350  LOCAL_FCINFO(fcinfo, 2);
351  FmgrInfo eqproc;
352 
353  fmgr_info(opfuncoid, &eqproc);
354 
355  /*
356  * Save a few cycles by setting up the fcinfo struct just once.
357  * Using FunctionCallInvoke directly also avoids failure if the
358  * eqproc returns NULL, though really equality functions should
359  * never do that.
360  */
361  InitFunctionCallInfoData(*fcinfo, &eqproc, 2, collation,
362  NULL, NULL);
363  fcinfo->args[0].isnull = false;
364  fcinfo->args[1].isnull = false;
365  /* be careful to apply operator right way 'round */
366  if (varonleft)
367  fcinfo->args[1].value = constval;
368  else
369  fcinfo->args[0].value = constval;
370 
371  for (i = 0; i < sslot.nvalues; i++)
372  {
373  Datum fresult;
374 
375  if (varonleft)
376  fcinfo->args[0].value = sslot.values[i];
377  else
378  fcinfo->args[1].value = sslot.values[i];
379  fcinfo->isnull = false;
380  fresult = FunctionCallInvoke(fcinfo);
381  if (!fcinfo->isnull && DatumGetBool(fresult))
382  {
383  match = true;
384  break;
385  }
386  }
387  }
388  else
389  {
390  /* no most-common-value info available */
391  i = 0; /* keep compiler quiet */
392  }
393 
394  if (match)
395  {
396  /*
397  * Constant is "=" to this common value. We know selectivity
398  * exactly (or as exactly as ANALYZE could calculate it, anyway).
399  */
400  selec = sslot.numbers[i];
401  }
402  else
403  {
404  /*
405  * Comparison is against a constant that is neither NULL nor any
406  * of the common values. Its selectivity cannot be more than
407  * this:
408  */
409  double sumcommon = 0.0;
410  double otherdistinct;
411 
412  for (i = 0; i < sslot.nnumbers; i++)
413  sumcommon += sslot.numbers[i];
414  selec = 1.0 - sumcommon - nullfrac;
415  CLAMP_PROBABILITY(selec);
416 
417  /*
418  * and in fact it's probably a good deal less. We approximate that
419  * all the not-common values share this remaining fraction
420  * equally, so we divide by the number of other distinct values.
421  */
422  otherdistinct = get_variable_numdistinct(vardata, &isdefault) -
423  sslot.nnumbers;
424  if (otherdistinct > 1)
425  selec /= otherdistinct;
426 
427  /*
428  * Another cross-check: selectivity shouldn't be estimated as more
429  * than the least common "most common value".
430  */
431  if (sslot.nnumbers > 0 && selec > sslot.numbers[sslot.nnumbers - 1])
432  selec = sslot.numbers[sslot.nnumbers - 1];
433  }
434 
435  free_attstatsslot(&sslot);
436  }
437  else
438  {
439  /*
440  * No ANALYZE stats available, so make a guess using estimated number
441  * of distinct values and assuming they are equally common. (The guess
442  * is unlikely to be very good, but we do know a few special cases.)
443  */
444  selec = 1.0 / get_variable_numdistinct(vardata, &isdefault);
445  }
446 
447  /* now adjust if we wanted <> rather than = */
448  if (negate)
449  selec = 1.0 - selec - nullfrac;
450 
451  /* result should be in range, but make sure... */
452  CLAMP_PROBABILITY(selec);
453 
454  return selec;
455 }
456 
457 /*
458  * var_eq_non_const --- eqsel for var = something-other-than-const case
459  *
460  * This is exported so that some other estimation functions can use it.
461  */
462 double
463 var_eq_non_const(VariableStatData *vardata, Oid operator, Oid collation,
464  Node *other,
465  bool varonleft, bool negate)
466 {
467  double selec;
468  double nullfrac = 0.0;
469  bool isdefault;
470 
471  /*
472  * Grab the nullfrac for use below.
473  */
474  if (HeapTupleIsValid(vardata->statsTuple))
475  {
476  Form_pg_statistic stats;
477 
478  stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
479  nullfrac = stats->stanullfrac;
480  }
481 
482  /*
483  * If we matched the var to a unique index or DISTINCT clause, assume
484  * there is exactly one match regardless of anything else. (This is
485  * slightly bogus, since the index or clause's equality operator might be
486  * different from ours, but it's much more likely to be right than
487  * ignoring the information.)
488  */
489  if (vardata->isunique && vardata->rel && vardata->rel->tuples >= 1.0)
490  {
491  selec = 1.0 / vardata->rel->tuples;
492  }
493  else if (HeapTupleIsValid(vardata->statsTuple))
494  {
495  double ndistinct;
496  AttStatsSlot sslot;
497 
498  /*
499  * Search is for a value that we do not know a priori, but we will
500  * assume it is not NULL. Estimate the selectivity as non-null
501  * fraction divided by number of distinct values, so that we get a
502  * result averaged over all possible values whether common or
503  * uncommon. (Essentially, we are assuming that the not-yet-known
504  * comparison value is equally likely to be any of the possible
505  * values, regardless of their frequency in the table. Is that a good
506  * idea?)
507  */
508  selec = 1.0 - nullfrac;
509  ndistinct = get_variable_numdistinct(vardata, &isdefault);
510  if (ndistinct > 1)
511  selec /= ndistinct;
512 
513  /*
514  * Cross-check: selectivity should never be estimated as more than the
515  * most common value's.
516  */
517  if (get_attstatsslot(&sslot, vardata->statsTuple,
518  STATISTIC_KIND_MCV, InvalidOid,
520  {
521  if (sslot.nnumbers > 0 && selec > sslot.numbers[0])
522  selec = sslot.numbers[0];
523  free_attstatsslot(&sslot);
524  }
525  }
526  else
527  {
528  /*
529  * No ANALYZE stats available, so make a guess using estimated number
530  * of distinct values and assuming they are equally common. (The guess
531  * is unlikely to be very good, but we do know a few special cases.)
532  */
533  selec = 1.0 / get_variable_numdistinct(vardata, &isdefault);
534  }
535 
536  /* now adjust if we wanted <> rather than = */
537  if (negate)
538  selec = 1.0 - selec - nullfrac;
539 
540  /* result should be in range, but make sure... */
541  CLAMP_PROBABILITY(selec);
542 
543  return selec;
544 }
545 
546 /*
547  * neqsel - Selectivity of "!=" for any data types.
548  *
549  * This routine is also used for some operators that are not "!="
550  * but have comparable selectivity behavior. See above comments
551  * for eqsel().
552  */
553 Datum
555 {
556  PG_RETURN_FLOAT8((float8) eqsel_internal(fcinfo, true));
557 }
558 
559 /*
560  * scalarineqsel - Selectivity of "<", "<=", ">", ">=" for scalars.
561  *
562  * This is the guts of scalarltsel/scalarlesel/scalargtsel/scalargesel.
563  * The isgt and iseq flags distinguish which of the four cases apply.
564  *
565  * The caller has commuted the clause, if necessary, so that we can treat
566  * the variable as being on the left. The caller must also make sure that
567  * the other side of the clause is a non-null Const, and dissect that into
568  * a value and datatype. (This definition simplifies some callers that
569  * want to estimate against a computed value instead of a Const node.)
570  *
571  * This routine works for any datatype (or pair of datatypes) known to
572  * convert_to_scalar(). If it is applied to some other datatype,
573  * it will return an approximate estimate based on assuming that the constant
574  * value falls in the middle of the bin identified by binary search.
575  */
576 static double
577 scalarineqsel(PlannerInfo *root, Oid operator, bool isgt, bool iseq,
578  Oid collation,
579  VariableStatData *vardata, Datum constval, Oid consttype)
580 {
581  Form_pg_statistic stats;
582  FmgrInfo opproc;
583  double mcv_selec,
584  hist_selec,
585  sumcommon;
586  double selec;
587 
588  if (!HeapTupleIsValid(vardata->statsTuple))
589  {
590  /*
591  * No stats are available. Typically this means we have to fall back
592  * on the default estimate; but if the variable is CTID then we can
593  * make an estimate based on comparing the constant to the table size.
594  */
595  if (vardata->var && IsA(vardata->var, Var) &&
596  ((Var *) vardata->var)->varattno == SelfItemPointerAttributeNumber)
597  {
598  ItemPointer itemptr;
599  double block;
600  double density;
601 
602  /*
603  * If the relation's empty, we're going to include all of it.
604  * (This is mostly to avoid divide-by-zero below.)
605  */
606  if (vardata->rel->pages == 0)
607  return 1.0;
608 
609  itemptr = (ItemPointer) DatumGetPointer(constval);
610  block = ItemPointerGetBlockNumberNoCheck(itemptr);
611 
612  /*
613  * Determine the average number of tuples per page (density).
614  *
615  * Since the last page will, on average, be only half full, we can
616  * estimate it to have half as many tuples as earlier pages. So
617  * give it half the weight of a regular page.
618  */
619  density = vardata->rel->tuples / (vardata->rel->pages - 0.5);
620 
621  /* If target is the last page, use half the density. */
622  if (block >= vardata->rel->pages - 1)
623  density *= 0.5;
624 
625  /*
626  * Using the average tuples per page, calculate how far into the
627  * page the itemptr is likely to be and adjust block accordingly,
628  * by adding that fraction of a whole block (but never more than a
629  * whole block, no matter how high the itemptr's offset is). Here
630  * we are ignoring the possibility of dead-tuple line pointers,
631  * which is fairly bogus, but we lack the info to do better.
632  */
633  if (density > 0.0)
634  {
636 
637  block += Min(offset / density, 1.0);
638  }
639 
640  /*
641  * Convert relative block number to selectivity. Again, the last
642  * page has only half weight.
643  */
644  selec = block / (vardata->rel->pages - 0.5);
645 
646  /*
647  * The calculation so far gave us a selectivity for the "<=" case.
648  * We'll have one less tuple for "<" and one additional tuple for
649  * ">=", the latter of which we'll reverse the selectivity for
650  * below, so we can simply subtract one tuple for both cases. The
651  * cases that need this adjustment can be identified by iseq being
652  * equal to isgt.
653  */
654  if (iseq == isgt && vardata->rel->tuples >= 1.0)
655  selec -= (1.0 / vardata->rel->tuples);
656 
657  /* Finally, reverse the selectivity for the ">", ">=" cases. */
658  if (isgt)
659  selec = 1.0 - selec;
660 
661  CLAMP_PROBABILITY(selec);
662  return selec;
663  }
664 
665  /* no stats available, so default result */
666  return DEFAULT_INEQ_SEL;
667  }
668  stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
669 
670  fmgr_info(get_opcode(operator), &opproc);
671 
672  /*
673  * If we have most-common-values info, add up the fractions of the MCV
674  * entries that satisfy MCV OP CONST. These fractions contribute directly
675  * to the result selectivity. Also add up the total fraction represented
676  * by MCV entries.
677  */
678  mcv_selec = mcv_selectivity(vardata, &opproc, collation, constval, true,
679  &sumcommon);
680 
681  /*
682  * If there is a histogram, determine which bin the constant falls in, and
683  * compute the resulting contribution to selectivity.
684  */
685  hist_selec = ineq_histogram_selectivity(root, vardata,
686  operator, &opproc, isgt, iseq,
687  collation,
688  constval, consttype);
689 
690  /*
691  * Now merge the results from the MCV and histogram calculations,
692  * realizing that the histogram covers only the non-null values that are
693  * not listed in MCV.
694  */
695  selec = 1.0 - stats->stanullfrac - sumcommon;
696 
697  if (hist_selec >= 0.0)
698  selec *= hist_selec;
699  else
700  {
701  /*
702  * If no histogram but there are values not accounted for by MCV,
703  * arbitrarily assume half of them will match.
704  */
705  selec *= 0.5;
706  }
707 
708  selec += mcv_selec;
709 
710  /* result should be in range, but make sure... */
711  CLAMP_PROBABILITY(selec);
712 
713  return selec;
714 }
715 
716 /*
717  * mcv_selectivity - Examine the MCV list for selectivity estimates
718  *
719  * Determine the fraction of the variable's MCV population that satisfies
720  * the predicate (VAR OP CONST), or (CONST OP VAR) if !varonleft. Also
721  * compute the fraction of the total column population represented by the MCV
722  * list. This code will work for any boolean-returning predicate operator.
723  *
724  * The function result is the MCV selectivity, and the fraction of the
725  * total population is returned into *sumcommonp. Zeroes are returned
726  * if there is no MCV list.
727  */
728 double
729 mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc, Oid collation,
730  Datum constval, bool varonleft,
731  double *sumcommonp)
732 {
733  double mcv_selec,
734  sumcommon;
735  AttStatsSlot sslot;
736  int i;
737 
738  mcv_selec = 0.0;
739  sumcommon = 0.0;
740 
741  if (HeapTupleIsValid(vardata->statsTuple) &&
742  statistic_proc_security_check(vardata, opproc->fn_oid) &&
743  get_attstatsslot(&sslot, vardata->statsTuple,
744  STATISTIC_KIND_MCV, InvalidOid,
746  {
747  LOCAL_FCINFO(fcinfo, 2);
748 
749  /*
750  * We invoke the opproc "by hand" so that we won't fail on NULL
751  * results. Such cases won't arise for normal comparison functions,
752  * but generic_restriction_selectivity could perhaps be used with
753  * operators that can return NULL. A small side benefit is to not
754  * need to re-initialize the fcinfo struct from scratch each time.
755  */
756  InitFunctionCallInfoData(*fcinfo, opproc, 2, collation,
757  NULL, NULL);
758  fcinfo->args[0].isnull = false;
759  fcinfo->args[1].isnull = false;
760  /* be careful to apply operator right way 'round */
761  if (varonleft)
762  fcinfo->args[1].value = constval;
763  else
764  fcinfo->args[0].value = constval;
765 
766  for (i = 0; i < sslot.nvalues; i++)
767  {
768  Datum fresult;
769 
770  if (varonleft)
771  fcinfo->args[0].value = sslot.values[i];
772  else
773  fcinfo->args[1].value = sslot.values[i];
774  fcinfo->isnull = false;
775  fresult = FunctionCallInvoke(fcinfo);
776  if (!fcinfo->isnull && DatumGetBool(fresult))
777  mcv_selec += sslot.numbers[i];
778  sumcommon += sslot.numbers[i];
779  }
780  free_attstatsslot(&sslot);
781  }
782 
783  *sumcommonp = sumcommon;
784  return mcv_selec;
785 }
786 
787 /*
788  * histogram_selectivity - Examine the histogram for selectivity estimates
789  *
790  * Determine the fraction of the variable's histogram entries that satisfy
791  * the predicate (VAR OP CONST), or (CONST OP VAR) if !varonleft.
792  *
793  * This code will work for any boolean-returning predicate operator, whether
794  * or not it has anything to do with the histogram sort operator. We are
795  * essentially using the histogram just as a representative sample. However,
796  * small histograms are unlikely to be all that representative, so the caller
797  * should be prepared to fall back on some other estimation approach when the
798  * histogram is missing or very small. It may also be prudent to combine this
799  * approach with another one when the histogram is small.
800  *
801  * If the actual histogram size is not at least min_hist_size, we won't bother
802  * to do the calculation at all. Also, if the n_skip parameter is > 0, we
803  * ignore the first and last n_skip histogram elements, on the grounds that
804  * they are outliers and hence not very representative. Typical values for
805  * these parameters are 10 and 1.
806  *
807  * The function result is the selectivity, or -1 if there is no histogram
808  * or it's smaller than min_hist_size.
809  *
810  * The output parameter *hist_size receives the actual histogram size,
811  * or zero if no histogram. Callers may use this number to decide how
812  * much faith to put in the function result.
813  *
814  * Note that the result disregards both the most-common-values (if any) and
815  * null entries. The caller is expected to combine this result with
816  * statistics for those portions of the column population. It may also be
817  * prudent to clamp the result range, ie, disbelieve exact 0 or 1 outputs.
818  */
819 double
821  FmgrInfo *opproc, Oid collation,
822  Datum constval, bool varonleft,
823  int min_hist_size, int n_skip,
824  int *hist_size)
825 {
826  double result;
827  AttStatsSlot sslot;
828 
829  /* check sanity of parameters */
830  Assert(n_skip >= 0);
831  Assert(min_hist_size > 2 * n_skip);
832 
833  if (HeapTupleIsValid(vardata->statsTuple) &&
834  statistic_proc_security_check(vardata, opproc->fn_oid) &&
835  get_attstatsslot(&sslot, vardata->statsTuple,
836  STATISTIC_KIND_HISTOGRAM, InvalidOid,
838  {
839  *hist_size = sslot.nvalues;
840  if (sslot.nvalues >= min_hist_size)
841  {
842  LOCAL_FCINFO(fcinfo, 2);
843  int nmatch = 0;
844  int i;
845 
846  /*
847  * We invoke the opproc "by hand" so that we won't fail on NULL
848  * results. Such cases won't arise for normal comparison
849  * functions, but generic_restriction_selectivity could perhaps be
850  * used with operators that can return NULL. A small side benefit
851  * is to not need to re-initialize the fcinfo struct from scratch
852  * each time.
853  */
854  InitFunctionCallInfoData(*fcinfo, opproc, 2, collation,
855  NULL, NULL);
856  fcinfo->args[0].isnull = false;
857  fcinfo->args[1].isnull = false;
858  /* be careful to apply operator right way 'round */
859  if (varonleft)
860  fcinfo->args[1].value = constval;
861  else
862  fcinfo->args[0].value = constval;
863 
864  for (i = n_skip; i < sslot.nvalues - n_skip; i++)
865  {
866  Datum fresult;
867 
868  if (varonleft)
869  fcinfo->args[0].value = sslot.values[i];
870  else
871  fcinfo->args[1].value = sslot.values[i];
872  fcinfo->isnull = false;
873  fresult = FunctionCallInvoke(fcinfo);
874  if (!fcinfo->isnull && DatumGetBool(fresult))
875  nmatch++;
876  }
877  result = ((double) nmatch) / ((double) (sslot.nvalues - 2 * n_skip));
878  }
879  else
880  result = -1;
881  free_attstatsslot(&sslot);
882  }
883  else
884  {
885  *hist_size = 0;
886  result = -1;
887  }
888 
889  return result;
890 }
891 
892 /*
893  * generic_restriction_selectivity - Selectivity for almost anything
894  *
895  * This function estimates selectivity for operators that we don't have any
896  * special knowledge about, but are on data types that we collect standard
897  * MCV and/or histogram statistics for. (Additional assumptions are that
898  * the operator is strict and immutable, or at least stable.)
899  *
900  * If we have "VAR OP CONST" or "CONST OP VAR", selectivity is estimated by
901  * applying the operator to each element of the column's MCV and/or histogram
902  * stats, and merging the results using the assumption that the histogram is
903  * a reasonable random sample of the column's non-MCV population. Note that
904  * if the operator's semantics are related to the histogram ordering, this
905  * might not be such a great assumption; other functions such as
906  * scalarineqsel() are probably a better match in such cases.
907  *
908  * Otherwise, fall back to the default selectivity provided by the caller.
909  */
910 double
912  List *args, int varRelid,
913  double default_selectivity)
914 {
915  double selec;
916  VariableStatData vardata;
917  Node *other;
918  bool varonleft;
919 
920  /*
921  * If expression is not variable OP something or something OP variable,
922  * then punt and return the default estimate.
923  */
924  if (!get_restriction_variable(root, args, varRelid,
925  &vardata, &other, &varonleft))
926  return default_selectivity;
927 
928  /*
929  * If the something is a NULL constant, assume operator is strict and
930  * return zero, ie, operator will never return TRUE.
931  */
932  if (IsA(other, Const) &&
933  ((Const *) other)->constisnull)
934  {
935  ReleaseVariableStats(vardata);
936  return 0.0;
937  }
938 
939  if (IsA(other, Const))
940  {
941  /* Variable is being compared to a known non-null constant */
942  Datum constval = ((Const *) other)->constvalue;
943  FmgrInfo opproc;
944  double mcvsum;
945  double mcvsel;
946  double nullfrac;
947  int hist_size;
948 
949  fmgr_info(get_opcode(oproid), &opproc);
950 
951  /*
952  * Calculate the selectivity for the column's most common values.
953  */
954  mcvsel = mcv_selectivity(&vardata, &opproc, collation,
955  constval, varonleft,
956  &mcvsum);
957 
958  /*
959  * If the histogram is large enough, see what fraction of it matches
960  * the query, and assume that's representative of the non-MCV
961  * population. Otherwise use the default selectivity for the non-MCV
962  * population.
963  */
964  selec = histogram_selectivity(&vardata, &opproc, collation,
965  constval, varonleft,
966  10, 1, &hist_size);
967  if (selec < 0)
968  {
969  /* Nope, fall back on default */
970  selec = default_selectivity;
971  }
972  else if (hist_size < 100)
973  {
974  /*
975  * For histogram sizes from 10 to 100, we combine the histogram
976  * and default selectivities, putting increasingly more trust in
977  * the histogram for larger sizes.
978  */
979  double hist_weight = hist_size / 100.0;
980 
981  selec = selec * hist_weight +
982  default_selectivity * (1.0 - hist_weight);
983  }
984 
985  /* In any case, don't believe extremely small or large estimates. */
986  if (selec < 0.0001)
987  selec = 0.0001;
988  else if (selec > 0.9999)
989  selec = 0.9999;
990 
991  /* Don't forget to account for nulls. */
992  if (HeapTupleIsValid(vardata.statsTuple))
993  nullfrac = ((Form_pg_statistic) GETSTRUCT(vardata.statsTuple))->stanullfrac;
994  else
995  nullfrac = 0.0;
996 
997  /*
998  * Now merge the results from the MCV and histogram calculations,
999  * realizing that the histogram covers only the non-null values that
1000  * are not listed in MCV.
1001  */
1002  selec *= 1.0 - nullfrac - mcvsum;
1003  selec += mcvsel;
1004  }
1005  else
1006  {
1007  /* Comparison value is not constant, so we can't do anything */
1008  selec = default_selectivity;
1009  }
1010 
1011  ReleaseVariableStats(vardata);
1012 
1013  /* result should be in range, but make sure... */
1014  CLAMP_PROBABILITY(selec);
1015 
1016  return selec;
1017 }
1018 
1019 /*
1020  * ineq_histogram_selectivity - Examine the histogram for scalarineqsel
1021  *
1022  * Determine the fraction of the variable's histogram population that
1023  * satisfies the inequality condition, ie, VAR < (or <=, >, >=) CONST.
1024  * The isgt and iseq flags distinguish which of the four cases apply.
1025  *
1026  * While opproc could be looked up from the operator OID, common callers
1027  * also need to call it separately, so we make the caller pass both.
1028  *
1029  * Returns -1 if there is no histogram (valid results will always be >= 0).
1030  *
1031  * Note that the result disregards both the most-common-values (if any) and
1032  * null entries. The caller is expected to combine this result with
1033  * statistics for those portions of the column population.
1034  *
1035  * This is exported so that some other estimation functions can use it.
1036  */
1037 double
1039  VariableStatData *vardata,
1040  Oid opoid, FmgrInfo *opproc, bool isgt, bool iseq,
1041  Oid collation,
1042  Datum constval, Oid consttype)
1043 {
1044  double hist_selec;
1045  AttStatsSlot sslot;
1046 
1047  hist_selec = -1.0;
1048 
1049  /*
1050  * Someday, ANALYZE might store more than one histogram per rel/att,
1051  * corresponding to more than one possible sort ordering defined for the
1052  * column type. Right now, we know there is only one, so just grab it and
1053  * see if it matches the query.
1054  *
1055  * Note that we can't use opoid as search argument; the staop appearing in
1056  * pg_statistic will be for the relevant '<' operator, but what we have
1057  * might be some other inequality operator such as '>='. (Even if opoid
1058  * is a '<' operator, it could be cross-type.) Hence we must use
1059  * comparison_ops_are_compatible() to see if the operators match.
1060  */
1061  if (HeapTupleIsValid(vardata->statsTuple) &&
1062  statistic_proc_security_check(vardata, opproc->fn_oid) &&
1063  get_attstatsslot(&sslot, vardata->statsTuple,
1064  STATISTIC_KIND_HISTOGRAM, InvalidOid,
1066  {
1067  if (sslot.nvalues > 1 &&
1068  sslot.stacoll == collation &&
1069  comparison_ops_are_compatible(sslot.staop, opoid))
1070  {
1071  /*
1072  * Use binary search to find the desired location, namely the
1073  * right end of the histogram bin containing the comparison value,
1074  * which is the leftmost entry for which the comparison operator
1075  * succeeds (if isgt) or fails (if !isgt).
1076  *
1077  * In this loop, we pay no attention to whether the operator iseq
1078  * or not; that detail will be mopped up below. (We cannot tell,
1079  * anyway, whether the operator thinks the values are equal.)
1080  *
1081  * If the binary search accesses the first or last histogram
1082  * entry, we try to replace that endpoint with the true column min
1083  * or max as found by get_actual_variable_range(). This
1084  * ameliorates misestimates when the min or max is moving as a
1085  * result of changes since the last ANALYZE. Note that this could
1086  * result in effectively including MCVs into the histogram that
1087  * weren't there before, but we don't try to correct for that.
1088  */
1089  double histfrac;
1090  int lobound = 0; /* first possible slot to search */
1091  int hibound = sslot.nvalues; /* last+1 slot to search */
1092  bool have_end = false;
1093 
1094  /*
1095  * If there are only two histogram entries, we'll want up-to-date
1096  * values for both. (If there are more than two, we need at most
1097  * one of them to be updated, so we deal with that within the
1098  * loop.)
1099  */
1100  if (sslot.nvalues == 2)
1101  have_end = get_actual_variable_range(root,
1102  vardata,
1103  sslot.staop,
1104  collation,
1105  &sslot.values[0],
1106  &sslot.values[1]);
1107 
1108  while (lobound < hibound)
1109  {
1110  int probe = (lobound + hibound) / 2;
1111  bool ltcmp;
1112 
1113  /*
1114  * If we find ourselves about to compare to the first or last
1115  * histogram entry, first try to replace it with the actual
1116  * current min or max (unless we already did so above).
1117  */
1118  if (probe == 0 && sslot.nvalues > 2)
1119  have_end = get_actual_variable_range(root,
1120  vardata,
1121  sslot.staop,
1122  collation,
1123  &sslot.values[0],
1124  NULL);
1125  else if (probe == sslot.nvalues - 1 && sslot.nvalues > 2)
1126  have_end = get_actual_variable_range(root,
1127  vardata,
1128  sslot.staop,
1129  collation,
1130  NULL,
1131  &sslot.values[probe]);
1132 
1133  ltcmp = DatumGetBool(FunctionCall2Coll(opproc,
1134  collation,
1135  sslot.values[probe],
1136  constval));
1137  if (isgt)
1138  ltcmp = !ltcmp;
1139  if (ltcmp)
1140  lobound = probe + 1;
1141  else
1142  hibound = probe;
1143  }
1144 
1145  if (lobound <= 0)
1146  {
1147  /*
1148  * Constant is below lower histogram boundary. More
1149  * precisely, we have found that no entry in the histogram
1150  * satisfies the inequality clause (if !isgt) or they all do
1151  * (if isgt). We estimate that that's true of the entire
1152  * table, so set histfrac to 0.0 (which we'll flip to 1.0
1153  * below, if isgt).
1154  */
1155  histfrac = 0.0;
1156  }
1157  else if (lobound >= sslot.nvalues)
1158  {
1159  /*
1160  * Inverse case: constant is above upper histogram boundary.
1161  */
1162  histfrac = 1.0;
1163  }
1164  else
1165  {
1166  /* We have values[i-1] <= constant <= values[i]. */
1167  int i = lobound;
1168  double eq_selec = 0;
1169  double val,
1170  high,
1171  low;
1172  double binfrac;
1173 
1174  /*
1175  * In the cases where we'll need it below, obtain an estimate
1176  * of the selectivity of "x = constval". We use a calculation
1177  * similar to what var_eq_const() does for a non-MCV constant,
1178  * ie, estimate that all distinct non-MCV values occur equally
1179  * often. But multiplication by "1.0 - sumcommon - nullfrac"
1180  * will be done by our caller, so we shouldn't do that here.
1181  * Therefore we can't try to clamp the estimate by reference
1182  * to the least common MCV; the result would be too small.
1183  *
1184  * Note: since this is effectively assuming that constval
1185  * isn't an MCV, it's logically dubious if constval in fact is
1186  * one. But we have to apply *some* correction for equality,
1187  * and anyway we cannot tell if constval is an MCV, since we
1188  * don't have a suitable equality operator at hand.
1189  */
1190  if (i == 1 || isgt == iseq)
1191  {
1192  double otherdistinct;
1193  bool isdefault;
1194  AttStatsSlot mcvslot;
1195 
1196  /* Get estimated number of distinct values */
1197  otherdistinct = get_variable_numdistinct(vardata,
1198  &isdefault);
1199 
1200  /* Subtract off the number of known MCVs */
1201  if (get_attstatsslot(&mcvslot, vardata->statsTuple,
1202  STATISTIC_KIND_MCV, InvalidOid,
1204  {
1205  otherdistinct -= mcvslot.nnumbers;
1206  free_attstatsslot(&mcvslot);
1207  }
1208 
1209  /* If result doesn't seem sane, leave eq_selec at 0 */
1210  if (otherdistinct > 1)
1211  eq_selec = 1.0 / otherdistinct;
1212  }
1213 
1214  /*
1215  * Convert the constant and the two nearest bin boundary
1216  * values to a uniform comparison scale, and do a linear
1217  * interpolation within this bin.
1218  */
1219  if (convert_to_scalar(constval, consttype, collation,
1220  &val,
1221  sslot.values[i - 1], sslot.values[i],
1222  vardata->vartype,
1223  &low, &high))
1224  {
1225  if (high <= low)
1226  {
1227  /* cope if bin boundaries appear identical */
1228  binfrac = 0.5;
1229  }
1230  else if (val <= low)
1231  binfrac = 0.0;
1232  else if (val >= high)
1233  binfrac = 1.0;
1234  else
1235  {
1236  binfrac = (val - low) / (high - low);
1237 
1238  /*
1239  * Watch out for the possibility that we got a NaN or
1240  * Infinity from the division. This can happen
1241  * despite the previous checks, if for example "low"
1242  * is -Infinity.
1243  */
1244  if (isnan(binfrac) ||
1245  binfrac < 0.0 || binfrac > 1.0)
1246  binfrac = 0.5;
1247  }
1248  }
1249  else
1250  {
1251  /*
1252  * Ideally we'd produce an error here, on the grounds that
1253  * the given operator shouldn't have scalarXXsel
1254  * registered as its selectivity func unless we can deal
1255  * with its operand types. But currently, all manner of
1256  * stuff is invoking scalarXXsel, so give a default
1257  * estimate until that can be fixed.
1258  */
1259  binfrac = 0.5;
1260  }
1261 
1262  /*
1263  * Now, compute the overall selectivity across the values
1264  * represented by the histogram. We have i-1 full bins and
1265  * binfrac partial bin below the constant.
1266  */
1267  histfrac = (double) (i - 1) + binfrac;
1268  histfrac /= (double) (sslot.nvalues - 1);
1269 
1270  /*
1271  * At this point, histfrac is an estimate of the fraction of
1272  * the population represented by the histogram that satisfies
1273  * "x <= constval". Somewhat remarkably, this statement is
1274  * true regardless of which operator we were doing the probes
1275  * with, so long as convert_to_scalar() delivers reasonable
1276  * results. If the probe constant is equal to some histogram
1277  * entry, we would have considered the bin to the left of that
1278  * entry if probing with "<" or ">=", or the bin to the right
1279  * if probing with "<=" or ">"; but binfrac would have come
1280  * out as 1.0 in the first case and 0.0 in the second, leading
1281  * to the same histfrac in either case. For probe constants
1282  * between histogram entries, we find the same bin and get the
1283  * same estimate with any operator.
1284  *
1285  * The fact that the estimate corresponds to "x <= constval"
1286  * and not "x < constval" is because of the way that ANALYZE
1287  * constructs the histogram: each entry is, effectively, the
1288  * rightmost value in its sample bucket. So selectivity
1289  * values that are exact multiples of 1/(histogram_size-1)
1290  * should be understood as estimates including a histogram
1291  * entry plus everything to its left.
1292  *
1293  * However, that breaks down for the first histogram entry,
1294  * which necessarily is the leftmost value in its sample
1295  * bucket. That means the first histogram bin is slightly
1296  * narrower than the rest, by an amount equal to eq_selec.
1297  * Another way to say that is that we want "x <= leftmost" to
1298  * be estimated as eq_selec not zero. So, if we're dealing
1299  * with the first bin (i==1), rescale to make that true while
1300  * adjusting the rest of that bin linearly.
1301  */
1302  if (i == 1)
1303  histfrac += eq_selec * (1.0 - binfrac);
1304 
1305  /*
1306  * "x <= constval" is good if we want an estimate for "<=" or
1307  * ">", but if we are estimating for "<" or ">=", we now need
1308  * to decrease the estimate by eq_selec.
1309  */
1310  if (isgt == iseq)
1311  histfrac -= eq_selec;
1312  }
1313 
1314  /*
1315  * Now the estimate is finished for "<" and "<=" cases. If we are
1316  * estimating for ">" or ">=", flip it.
1317  */
1318  hist_selec = isgt ? (1.0 - histfrac) : histfrac;
1319 
1320  /*
1321  * The histogram boundaries are only approximate to begin with,
1322  * and may well be out of date anyway. Therefore, don't believe
1323  * extremely small or large selectivity estimates --- unless we
1324  * got actual current endpoint values from the table, in which
1325  * case just do the usual sanity clamp. Somewhat arbitrarily, we
1326  * set the cutoff for other cases at a hundredth of the histogram
1327  * resolution.
1328  */
1329  if (have_end)
1330  CLAMP_PROBABILITY(hist_selec);
1331  else
1332  {
1333  double cutoff = 0.01 / (double) (sslot.nvalues - 1);
1334 
1335  if (hist_selec < cutoff)
1336  hist_selec = cutoff;
1337  else if (hist_selec > 1.0 - cutoff)
1338  hist_selec = 1.0 - cutoff;
1339  }
1340  }
1341  else if (sslot.nvalues > 1)
1342  {
1343  /*
1344  * If we get here, we have a histogram but it's not sorted the way
1345  * we want. Do a brute-force search to see how many of the
1346  * entries satisfy the comparison condition, and take that
1347  * fraction as our estimate. (This is identical to the inner loop
1348  * of histogram_selectivity; maybe share code?)
1349  */
1350  LOCAL_FCINFO(fcinfo, 2);
1351  int nmatch = 0;
1352 
1353  InitFunctionCallInfoData(*fcinfo, opproc, 2, collation,
1354  NULL, NULL);
1355  fcinfo->args[0].isnull = false;
1356  fcinfo->args[1].isnull = false;
1357  fcinfo->args[1].value = constval;
1358  for (int i = 0; i < sslot.nvalues; i++)
1359  {
1360  Datum fresult;
1361 
1362  fcinfo->args[0].value = sslot.values[i];
1363  fcinfo->isnull = false;
1364  fresult = FunctionCallInvoke(fcinfo);
1365  if (!fcinfo->isnull && DatumGetBool(fresult))
1366  nmatch++;
1367  }
1368  hist_selec = ((double) nmatch) / ((double) sslot.nvalues);
1369 
1370  /*
1371  * As above, clamp to a hundredth of the histogram resolution.
1372  * This case is surely even less trustworthy than the normal one,
1373  * so we shouldn't believe exact 0 or 1 selectivity. (Maybe the
1374  * clamp should be more restrictive in this case?)
1375  */
1376  {
1377  double cutoff = 0.01 / (double) (sslot.nvalues - 1);
1378 
1379  if (hist_selec < cutoff)
1380  hist_selec = cutoff;
1381  else if (hist_selec > 1.0 - cutoff)
1382  hist_selec = 1.0 - cutoff;
1383  }
1384  }
1385 
1386  free_attstatsslot(&sslot);
1387  }
1388 
1389  return hist_selec;
1390 }
1391 
1392 /*
1393  * Common wrapper function for the selectivity estimators that simply
1394  * invoke scalarineqsel().
1395  */
1396 static Datum
1398 {
1399  PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
1400  Oid operator = PG_GETARG_OID(1);
1401  List *args = (List *) PG_GETARG_POINTER(2);
1402  int varRelid = PG_GETARG_INT32(3);
1403  Oid collation = PG_GET_COLLATION();
1404  VariableStatData vardata;
1405  Node *other;
1406  bool varonleft;
1407  Datum constval;
1408  Oid consttype;
1409  double selec;
1410 
1411  /*
1412  * If expression is not variable op something or something op variable,
1413  * then punt and return a default estimate.
1414  */
1415  if (!get_restriction_variable(root, args, varRelid,
1416  &vardata, &other, &varonleft))
1418 
1419  /*
1420  * Can't do anything useful if the something is not a constant, either.
1421  */
1422  if (!IsA(other, Const))
1423  {
1424  ReleaseVariableStats(vardata);
1426  }
1427 
1428  /*
1429  * If the constant is NULL, assume operator is strict and return zero, ie,
1430  * operator will never return TRUE.
1431  */
1432  if (((Const *) other)->constisnull)
1433  {
1434  ReleaseVariableStats(vardata);
1435  PG_RETURN_FLOAT8(0.0);
1436  }
1437  constval = ((Const *) other)->constvalue;
1438  consttype = ((Const *) other)->consttype;
1439 
1440  /*
1441  * Force the var to be on the left to simplify logic in scalarineqsel.
1442  */
1443  if (!varonleft)
1444  {
1445  operator = get_commutator(operator);
1446  if (!operator)
1447  {
1448  /* Use default selectivity (should we raise an error instead?) */
1449  ReleaseVariableStats(vardata);
1451  }
1452  isgt = !isgt;
1453  }
1454 
1455  /* The rest of the work is done by scalarineqsel(). */
1456  selec = scalarineqsel(root, operator, isgt, iseq, collation,
1457  &vardata, constval, consttype);
1458 
1459  ReleaseVariableStats(vardata);
1460 
1461  PG_RETURN_FLOAT8((float8) selec);
1462 }
1463 
1464 /*
1465  * scalarltsel - Selectivity of "<" for scalars.
1466  */
1467 Datum
1469 {
1470  return scalarineqsel_wrapper(fcinfo, false, false);
1471 }
1472 
1473 /*
1474  * scalarlesel - Selectivity of "<=" for scalars.
1475  */
1476 Datum
1478 {
1479  return scalarineqsel_wrapper(fcinfo, false, true);
1480 }
1481 
1482 /*
1483  * scalargtsel - Selectivity of ">" for scalars.
1484  */
1485 Datum
1487 {
1488  return scalarineqsel_wrapper(fcinfo, true, false);
1489 }
1490 
1491 /*
1492  * scalargesel - Selectivity of ">=" for scalars.
1493  */
1494 Datum
1496 {
1497  return scalarineqsel_wrapper(fcinfo, true, true);
1498 }
1499 
1500 /*
1501  * boolvarsel - Selectivity of Boolean variable.
1502  *
1503  * This can actually be called on any boolean-valued expression. If it
1504  * involves only Vars of the specified relation, and if there are statistics
1505  * about the Var or expression (the latter is possible if it's indexed) then
1506  * we'll produce a real estimate; otherwise it's just a default.
1507  */
1509 boolvarsel(PlannerInfo *root, Node *arg, int varRelid)
1510 {
1511  VariableStatData vardata;
1512  double selec;
1513 
1514  examine_variable(root, arg, varRelid, &vardata);
1515  if (HeapTupleIsValid(vardata.statsTuple))
1516  {
1517  /*
1518  * A boolean variable V is equivalent to the clause V = 't', so we
1519  * compute the selectivity as if that is what we have.
1520  */
1521  selec = var_eq_const(&vardata, BooleanEqualOperator, InvalidOid,
1522  BoolGetDatum(true), false, true, false);
1523  }
1524  else
1525  {
1526  /* Otherwise, the default estimate is 0.5 */
1527  selec = 0.5;
1528  }
1529  ReleaseVariableStats(vardata);
1530  return selec;
1531 }
1532 
1533 /*
1534  * booltestsel - Selectivity of BooleanTest Node.
1535  */
1538  int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
1539 {
1540  VariableStatData vardata;
1541  double selec;
1542 
1543  examine_variable(root, arg, varRelid, &vardata);
1544 
1545  if (HeapTupleIsValid(vardata.statsTuple))
1546  {
1547  Form_pg_statistic stats;
1548  double freq_null;
1549  AttStatsSlot sslot;
1550 
1551  stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
1552  freq_null = stats->stanullfrac;
1553 
1554  if (get_attstatsslot(&sslot, vardata.statsTuple,
1555  STATISTIC_KIND_MCV, InvalidOid,
1557  && sslot.nnumbers > 0)
1558  {
1559  double freq_true;
1560  double freq_false;
1561 
1562  /*
1563  * Get first MCV frequency and derive frequency for true.
1564  */
1565  if (DatumGetBool(sslot.values[0]))
1566  freq_true = sslot.numbers[0];
1567  else
1568  freq_true = 1.0 - sslot.numbers[0] - freq_null;
1569 
1570  /*
1571  * Next derive frequency for false. Then use these as appropriate
1572  * to derive frequency for each case.
1573  */
1574  freq_false = 1.0 - freq_true - freq_null;
1575 
1576  switch (booltesttype)
1577  {
1578  case IS_UNKNOWN:
1579  /* select only NULL values */
1580  selec = freq_null;
1581  break;
1582  case IS_NOT_UNKNOWN:
1583  /* select non-NULL values */
1584  selec = 1.0 - freq_null;
1585  break;
1586  case IS_TRUE:
1587  /* select only TRUE values */
1588  selec = freq_true;
1589  break;
1590  case IS_NOT_TRUE:
1591  /* select non-TRUE values */
1592  selec = 1.0 - freq_true;
1593  break;
1594  case IS_FALSE:
1595  /* select only FALSE values */
1596  selec = freq_false;
1597  break;
1598  case IS_NOT_FALSE:
1599  /* select non-FALSE values */
1600  selec = 1.0 - freq_false;
1601  break;
1602  default:
1603  elog(ERROR, "unrecognized booltesttype: %d",
1604  (int) booltesttype);
1605  selec = 0.0; /* Keep compiler quiet */
1606  break;
1607  }
1608 
1609  free_attstatsslot(&sslot);
1610  }
1611  else
1612  {
1613  /*
1614  * No most-common-value info available. Still have null fraction
1615  * information, so use it for IS [NOT] UNKNOWN. Otherwise adjust
1616  * for null fraction and assume a 50-50 split of TRUE and FALSE.
1617  */
1618  switch (booltesttype)
1619  {
1620  case IS_UNKNOWN:
1621  /* select only NULL values */
1622  selec = freq_null;
1623  break;
1624  case IS_NOT_UNKNOWN:
1625  /* select non-NULL values */
1626  selec = 1.0 - freq_null;
1627  break;
1628  case IS_TRUE:
1629  case IS_FALSE:
1630  /* Assume we select half of the non-NULL values */
1631  selec = (1.0 - freq_null) / 2.0;
1632  break;
1633  case IS_NOT_TRUE:
1634  case IS_NOT_FALSE:
1635  /* Assume we select NULLs plus half of the non-NULLs */
1636  /* equiv. to freq_null + (1.0 - freq_null) / 2.0 */
1637  selec = (freq_null + 1.0) / 2.0;
1638  break;
1639  default:
1640  elog(ERROR, "unrecognized booltesttype: %d",
1641  (int) booltesttype);
1642  selec = 0.0; /* Keep compiler quiet */
1643  break;
1644  }
1645  }
1646  }
1647  else
1648  {
1649  /*
1650  * If we can't get variable statistics for the argument, perhaps
1651  * clause_selectivity can do something with it. We ignore the
1652  * possibility of a NULL value when using clause_selectivity, and just
1653  * assume the value is either TRUE or FALSE.
1654  */
1655  switch (booltesttype)
1656  {
1657  case IS_UNKNOWN:
1658  selec = DEFAULT_UNK_SEL;
1659  break;
1660  case IS_NOT_UNKNOWN:
1661  selec = DEFAULT_NOT_UNK_SEL;
1662  break;
1663  case IS_TRUE:
1664  case IS_NOT_FALSE:
1665  selec = (double) clause_selectivity(root, arg,
1666  varRelid,
1667  jointype, sjinfo);
1668  break;
1669  case IS_FALSE:
1670  case IS_NOT_TRUE:
1671  selec = 1.0 - (double) clause_selectivity(root, arg,
1672  varRelid,
1673  jointype, sjinfo);
1674  break;
1675  default:
1676  elog(ERROR, "unrecognized booltesttype: %d",
1677  (int) booltesttype);
1678  selec = 0.0; /* Keep compiler quiet */
1679  break;
1680  }
1681  }
1682 
1683  ReleaseVariableStats(vardata);
1684 
1685  /* result should be in range, but make sure... */
1686  CLAMP_PROBABILITY(selec);
1687 
1688  return (Selectivity) selec;
1689 }
1690 
1691 /*
1692  * nulltestsel - Selectivity of NullTest Node.
1693  */
1696  int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
1697 {
1698  VariableStatData vardata;
1699  double selec;
1700 
1701  examine_variable(root, arg, varRelid, &vardata);
1702 
1703  if (HeapTupleIsValid(vardata.statsTuple))
1704  {
1705  Form_pg_statistic stats;
1706  double freq_null;
1707 
1708  stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
1709  freq_null = stats->stanullfrac;
1710 
1711  switch (nulltesttype)
1712  {
1713  case IS_NULL:
1714 
1715  /*
1716  * Use freq_null directly.
1717  */
1718  selec = freq_null;
1719  break;
1720  case IS_NOT_NULL:
1721 
1722  /*
1723  * Select not unknown (not null) values. Calculate from
1724  * freq_null.
1725  */
1726  selec = 1.0 - freq_null;
1727  break;
1728  default:
1729  elog(ERROR, "unrecognized nulltesttype: %d",
1730  (int) nulltesttype);
1731  return (Selectivity) 0; /* keep compiler quiet */
1732  }
1733  }
1734  else if (vardata.var && IsA(vardata.var, Var) &&
1735  ((Var *) vardata.var)->varattno < 0)
1736  {
1737  /*
1738  * There are no stats for system columns, but we know they are never
1739  * NULL.
1740  */
1741  selec = (nulltesttype == IS_NULL) ? 0.0 : 1.0;
1742  }
1743  else
1744  {
1745  /*
1746  * No ANALYZE stats available, so make a guess
1747  */
1748  switch (nulltesttype)
1749  {
1750  case IS_NULL:
1751  selec = DEFAULT_UNK_SEL;
1752  break;
1753  case IS_NOT_NULL:
1754  selec = DEFAULT_NOT_UNK_SEL;
1755  break;
1756  default:
1757  elog(ERROR, "unrecognized nulltesttype: %d",
1758  (int) nulltesttype);
1759  return (Selectivity) 0; /* keep compiler quiet */
1760  }
1761  }
1762 
1763  ReleaseVariableStats(vardata);
1764 
1765  /* result should be in range, but make sure... */
1766  CLAMP_PROBABILITY(selec);
1767 
1768  return (Selectivity) selec;
1769 }
1770 
1771 /*
1772  * strip_array_coercion - strip binary-compatible relabeling from an array expr
1773  *
1774  * For array values, the parser normally generates ArrayCoerceExpr conversions,
1775  * but it seems possible that RelabelType might show up. Also, the planner
1776  * is not currently tense about collapsing stacked ArrayCoerceExpr nodes,
1777  * so we need to be ready to deal with more than one level.
1778  */
1779 static Node *
1781 {
1782  for (;;)
1783  {
1784  if (node && IsA(node, ArrayCoerceExpr))
1785  {
1786  ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
1787 
1788  /*
1789  * If the per-element expression is just a RelabelType on top of
1790  * CaseTestExpr, then we know it's a binary-compatible relabeling.
1791  */
1792  if (IsA(acoerce->elemexpr, RelabelType) &&
1793  IsA(((RelabelType *) acoerce->elemexpr)->arg, CaseTestExpr))
1794  node = (Node *) acoerce->arg;
1795  else
1796  break;
1797  }
1798  else if (node && IsA(node, RelabelType))
1799  {
1800  /* We don't really expect this case, but may as well cope */
1801  node = (Node *) ((RelabelType *) node)->arg;
1802  }
1803  else
1804  break;
1805  }
1806  return node;
1807 }
1808 
1809 /*
1810  * scalararraysel - Selectivity of ScalarArrayOpExpr Node.
1811  */
1814  ScalarArrayOpExpr *clause,
1815  bool is_join_clause,
1816  int varRelid,
1817  JoinType jointype,
1818  SpecialJoinInfo *sjinfo)
1819 {
1820  Oid operator = clause->opno;
1821  bool useOr = clause->useOr;
1822  bool isEquality = false;
1823  bool isInequality = false;
1824  Node *leftop;
1825  Node *rightop;
1826  Oid nominal_element_type;
1827  Oid nominal_element_collation;
1828  TypeCacheEntry *typentry;
1829  RegProcedure oprsel;
1830  FmgrInfo oprselproc;
1831  Selectivity s1;
1832  Selectivity s1disjoint;
1833 
1834  /* First, deconstruct the expression */
1835  Assert(list_length(clause->args) == 2);
1836  leftop = (Node *) linitial(clause->args);
1837  rightop = (Node *) lsecond(clause->args);
1838 
1839  /* aggressively reduce both sides to constants */
1840  leftop = estimate_expression_value(root, leftop);
1841  rightop = estimate_expression_value(root, rightop);
1842 
1843  /* get nominal (after relabeling) element type of rightop */
1844  nominal_element_type = get_base_element_type(exprType(rightop));
1845  if (!OidIsValid(nominal_element_type))
1846  return (Selectivity) 0.5; /* probably shouldn't happen */
1847  /* get nominal collation, too, for generating constants */
1848  nominal_element_collation = exprCollation(rightop);
1849 
1850  /* look through any binary-compatible relabeling of rightop */
1851  rightop = strip_array_coercion(rightop);
1852 
1853  /*
1854  * Detect whether the operator is the default equality or inequality
1855  * operator of the array element type.
1856  */
1857  typentry = lookup_type_cache(nominal_element_type, TYPECACHE_EQ_OPR);
1858  if (OidIsValid(typentry->eq_opr))
1859  {
1860  if (operator == typentry->eq_opr)
1861  isEquality = true;
1862  else if (get_negator(operator) == typentry->eq_opr)
1863  isInequality = true;
1864  }
1865 
1866  /*
1867  * If it is equality or inequality, we might be able to estimate this as a
1868  * form of array containment; for instance "const = ANY(column)" can be
1869  * treated as "ARRAY[const] <@ column". scalararraysel_containment tries
1870  * that, and returns the selectivity estimate if successful, or -1 if not.
1871  */
1872  if ((isEquality || isInequality) && !is_join_clause)
1873  {
1874  s1 = scalararraysel_containment(root, leftop, rightop,
1875  nominal_element_type,
1876  isEquality, useOr, varRelid);
1877  if (s1 >= 0.0)
1878  return s1;
1879  }
1880 
1881  /*
1882  * Look up the underlying operator's selectivity estimator. Punt if it
1883  * hasn't got one.
1884  */
1885  if (is_join_clause)
1886  oprsel = get_oprjoin(operator);
1887  else
1888  oprsel = get_oprrest(operator);
1889  if (!oprsel)
1890  return (Selectivity) 0.5;
1891  fmgr_info(oprsel, &oprselproc);
1892 
1893  /*
1894  * In the array-containment check above, we must only believe that an
1895  * operator is equality or inequality if it is the default btree equality
1896  * operator (or its negator) for the element type, since those are the
1897  * operators that array containment will use. But in what follows, we can
1898  * be a little laxer, and also believe that any operators using eqsel() or
1899  * neqsel() as selectivity estimator act like equality or inequality.
1900  */
1901  if (oprsel == F_EQSEL || oprsel == F_EQJOINSEL)
1902  isEquality = true;
1903  else if (oprsel == F_NEQSEL || oprsel == F_NEQJOINSEL)
1904  isInequality = true;
1905 
1906  /*
1907  * We consider three cases:
1908  *
1909  * 1. rightop is an Array constant: deconstruct the array, apply the
1910  * operator's selectivity function for each array element, and merge the
1911  * results in the same way that clausesel.c does for AND/OR combinations.
1912  *
1913  * 2. rightop is an ARRAY[] construct: apply the operator's selectivity
1914  * function for each element of the ARRAY[] construct, and merge.
1915  *
1916  * 3. otherwise, make a guess ...
1917  */
1918  if (rightop && IsA(rightop, Const))
1919  {
1920  Datum arraydatum = ((Const *) rightop)->constvalue;
1921  bool arrayisnull = ((Const *) rightop)->constisnull;
1922  ArrayType *arrayval;
1923  int16 elmlen;
1924  bool elmbyval;
1925  char elmalign;
1926  int num_elems;
1927  Datum *elem_values;
1928  bool *elem_nulls;
1929  int i;
1930 
1931  if (arrayisnull) /* qual can't succeed if null array */
1932  return (Selectivity) 0.0;
1933  arrayval = DatumGetArrayTypeP(arraydatum);
1935  &elmlen, &elmbyval, &elmalign);
1936  deconstruct_array(arrayval,
1937  ARR_ELEMTYPE(arrayval),
1938  elmlen, elmbyval, elmalign,
1939  &elem_values, &elem_nulls, &num_elems);
1940 
1941  /*
1942  * For generic operators, we assume the probability of success is
1943  * independent for each array element. But for "= ANY" or "<> ALL",
1944  * if the array elements are distinct (which'd typically be the case)
1945  * then the probabilities are disjoint, and we should just sum them.
1946  *
1947  * If we were being really tense we would try to confirm that the
1948  * elements are all distinct, but that would be expensive and it
1949  * doesn't seem to be worth the cycles; it would amount to penalizing
1950  * well-written queries in favor of poorly-written ones. However, we
1951  * do protect ourselves a little bit by checking whether the
1952  * disjointness assumption leads to an impossible (out of range)
1953  * probability; if so, we fall back to the normal calculation.
1954  */
1955  s1 = s1disjoint = (useOr ? 0.0 : 1.0);
1956 
1957  for (i = 0; i < num_elems; i++)
1958  {
1959  List *args;
1960  Selectivity s2;
1961 
1962  args = list_make2(leftop,
1963  makeConst(nominal_element_type,
1964  -1,
1965  nominal_element_collation,
1966  elmlen,
1967  elem_values[i],
1968  elem_nulls[i],
1969  elmbyval));
1970  if (is_join_clause)
1971  s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
1972  clause->inputcollid,
1973  PointerGetDatum(root),
1974  ObjectIdGetDatum(operator),
1975  PointerGetDatum(args),
1976  Int16GetDatum(jointype),
1977  PointerGetDatum(sjinfo)));
1978  else
1979  s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
1980  clause->inputcollid,
1981  PointerGetDatum(root),
1982  ObjectIdGetDatum(operator),
1983  PointerGetDatum(args),
1984  Int32GetDatum(varRelid)));
1985 
1986  if (useOr)
1987  {
1988  s1 = s1 + s2 - s1 * s2;
1989  if (isEquality)
1990  s1disjoint += s2;
1991  }
1992  else
1993  {
1994  s1 = s1 * s2;
1995  if (isInequality)
1996  s1disjoint += s2 - 1.0;
1997  }
1998  }
1999 
2000  /* accept disjoint-probability estimate if in range */
2001  if ((useOr ? isEquality : isInequality) &&
2002  s1disjoint >= 0.0 && s1disjoint <= 1.0)
2003  s1 = s1disjoint;
2004  }
2005  else if (rightop && IsA(rightop, ArrayExpr) &&
2006  !((ArrayExpr *) rightop)->multidims)
2007  {
2008  ArrayExpr *arrayexpr = (ArrayExpr *) rightop;
2009  int16 elmlen;
2010  bool elmbyval;
2011  ListCell *l;
2012 
2013  get_typlenbyval(arrayexpr->element_typeid,
2014  &elmlen, &elmbyval);
2015 
2016  /*
2017  * We use the assumption of disjoint probabilities here too, although
2018  * the odds of equal array elements are rather higher if the elements
2019  * are not all constants (which they won't be, else constant folding
2020  * would have reduced the ArrayExpr to a Const). In this path it's
2021  * critical to have the sanity check on the s1disjoint estimate.
2022  */
2023  s1 = s1disjoint = (useOr ? 0.0 : 1.0);
2024 
2025  foreach(l, arrayexpr->elements)
2026  {
2027  Node *elem = (Node *) lfirst(l);
2028  List *args;
2029  Selectivity s2;
2030 
2031  /*
2032  * Theoretically, if elem isn't of nominal_element_type we should
2033  * insert a RelabelType, but it seems unlikely that any operator
2034  * estimation function would really care ...
2035  */
2036  args = list_make2(leftop, elem);
2037  if (is_join_clause)
2038  s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
2039  clause->inputcollid,
2040  PointerGetDatum(root),
2041  ObjectIdGetDatum(operator),
2042  PointerGetDatum(args),
2043  Int16GetDatum(jointype),
2044  PointerGetDatum(sjinfo)));
2045  else
2046  s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
2047  clause->inputcollid,
2048  PointerGetDatum(root),
2049  ObjectIdGetDatum(operator),
2050  PointerGetDatum(args),
2051  Int32GetDatum(varRelid)));
2052 
2053  if (useOr)
2054  {
2055  s1 = s1 + s2 - s1 * s2;
2056  if (isEquality)
2057  s1disjoint += s2;
2058  }
2059  else
2060  {
2061  s1 = s1 * s2;
2062  if (isInequality)
2063  s1disjoint += s2 - 1.0;
2064  }
2065  }
2066 
2067  /* accept disjoint-probability estimate if in range */
2068  if ((useOr ? isEquality : isInequality) &&
2069  s1disjoint >= 0.0 && s1disjoint <= 1.0)
2070  s1 = s1disjoint;
2071  }
2072  else
2073  {
2074  CaseTestExpr *dummyexpr;
2075  List *args;
2076  Selectivity s2;
2077  int i;
2078 
2079  /*
2080  * We need a dummy rightop to pass to the operator selectivity
2081  * routine. It can be pretty much anything that doesn't look like a
2082  * constant; CaseTestExpr is a convenient choice.
2083  */
2084  dummyexpr = makeNode(CaseTestExpr);
2085  dummyexpr->typeId = nominal_element_type;
2086  dummyexpr->typeMod = -1;
2087  dummyexpr->collation = clause->inputcollid;
2088  args = list_make2(leftop, dummyexpr);
2089  if (is_join_clause)
2090  s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
2091  clause->inputcollid,
2092  PointerGetDatum(root),
2093  ObjectIdGetDatum(operator),
2094  PointerGetDatum(args),
2095  Int16GetDatum(jointype),
2096  PointerGetDatum(sjinfo)));
2097  else
2098  s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
2099  clause->inputcollid,
2100  PointerGetDatum(root),
2101  ObjectIdGetDatum(operator),
2102  PointerGetDatum(args),
2103  Int32GetDatum(varRelid)));
2104  s1 = useOr ? 0.0 : 1.0;
2105 
2106  /*
2107  * Arbitrarily assume 10 elements in the eventual array value (see
2108  * also estimate_array_length). We don't risk an assumption of
2109  * disjoint probabilities here.
2110  */
2111  for (i = 0; i < 10; i++)
2112  {
2113  if (useOr)
2114  s1 = s1 + s2 - s1 * s2;
2115  else
2116  s1 = s1 * s2;
2117  }
2118  }
2119 
2120  /* result should be in range, but make sure... */
2121  CLAMP_PROBABILITY(s1);
2122 
2123  return s1;
2124 }
2125 
2126 /*
2127  * Estimate number of elements in the array yielded by an expression.
2128  *
2129  * It's important that this agree with scalararraysel.
2130  */
2131 int
2133 {
2134  /* look through any binary-compatible relabeling of arrayexpr */
2135  arrayexpr = strip_array_coercion(arrayexpr);
2136 
2137  if (arrayexpr && IsA(arrayexpr, Const))
2138  {
2139  Datum arraydatum = ((Const *) arrayexpr)->constvalue;
2140  bool arrayisnull = ((Const *) arrayexpr)->constisnull;
2141  ArrayType *arrayval;
2142 
2143  if (arrayisnull)
2144  return 0;
2145  arrayval = DatumGetArrayTypeP(arraydatum);
2146  return ArrayGetNItems(ARR_NDIM(arrayval), ARR_DIMS(arrayval));
2147  }
2148  else if (arrayexpr && IsA(arrayexpr, ArrayExpr) &&
2149  !((ArrayExpr *) arrayexpr)->multidims)
2150  {
2151  return list_length(((ArrayExpr *) arrayexpr)->elements);
2152  }
2153  else
2154  {
2155  /* default guess --- see also scalararraysel */
2156  return 10;
2157  }
2158 }
2159 
2160 /*
2161  * rowcomparesel - Selectivity of RowCompareExpr Node.
2162  *
2163  * We estimate RowCompare selectivity by considering just the first (high
2164  * order) columns, which makes it equivalent to an ordinary OpExpr. While
2165  * this estimate could be refined by considering additional columns, it
2166  * seems unlikely that we could do a lot better without multi-column
2167  * statistics.
2168  */
2171  RowCompareExpr *clause,
2172  int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
2173 {
2174  Selectivity s1;
2175  Oid opno = linitial_oid(clause->opnos);
2176  Oid inputcollid = linitial_oid(clause->inputcollids);
2177  List *opargs;
2178  bool is_join_clause;
2179 
2180  /* Build equivalent arg list for single operator */
2181  opargs = list_make2(linitial(clause->largs), linitial(clause->rargs));
2182 
2183  /*
2184  * Decide if it's a join clause. This should match clausesel.c's
2185  * treat_as_join_clause(), except that we intentionally consider only the
2186  * leading columns and not the rest of the clause.
2187  */
2188  if (varRelid != 0)
2189  {
2190  /*
2191  * Caller is forcing restriction mode (eg, because we are examining an
2192  * inner indexscan qual).
2193  */
2194  is_join_clause = false;
2195  }
2196  else if (sjinfo == NULL)
2197  {
2198  /*
2199  * It must be a restriction clause, since it's being evaluated at a
2200  * scan node.
2201  */
2202  is_join_clause = false;
2203  }
2204  else
2205  {
2206  /*
2207  * Otherwise, it's a join if there's more than one relation used.
2208  */
2209  is_join_clause = (NumRelids((Node *) opargs) > 1);
2210  }
2211 
2212  if (is_join_clause)
2213  {
2214  /* Estimate selectivity for a join clause. */
2215  s1 = join_selectivity(root, opno,
2216  opargs,
2217  inputcollid,
2218  jointype,
2219  sjinfo);
2220  }
2221  else
2222  {
2223  /* Estimate selectivity for a restriction clause. */
2224  s1 = restriction_selectivity(root, opno,
2225  opargs,
2226  inputcollid,
2227  varRelid);
2228  }
2229 
2230  return s1;
2231 }
2232 
2233 /*
2234  * eqjoinsel - Join selectivity of "="
2235  */
2236 Datum
2238 {
2239  PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
2240  Oid operator = PG_GETARG_OID(1);
2241  List *args = (List *) PG_GETARG_POINTER(2);
2242 
2243 #ifdef NOT_USED
2244  JoinType jointype = (JoinType) PG_GETARG_INT16(3);
2245 #endif
2247  Oid collation = PG_GET_COLLATION();
2248  double selec;
2249  double selec_inner;
2250  VariableStatData vardata1;
2251  VariableStatData vardata2;
2252  double nd1;
2253  double nd2;
2254  bool isdefault1;
2255  bool isdefault2;
2256  Oid opfuncoid;
2257  AttStatsSlot sslot1;
2258  AttStatsSlot sslot2;
2259  Form_pg_statistic stats1 = NULL;
2260  Form_pg_statistic stats2 = NULL;
2261  bool have_mcvs1 = false;
2262  bool have_mcvs2 = false;
2263  bool join_is_reversed;
2264  RelOptInfo *inner_rel;
2265 
2266  get_join_variables(root, args, sjinfo,
2267  &vardata1, &vardata2, &join_is_reversed);
2268 
2269  nd1 = get_variable_numdistinct(&vardata1, &isdefault1);
2270  nd2 = get_variable_numdistinct(&vardata2, &isdefault2);
2271 
2272  opfuncoid = get_opcode(operator);
2273 
2274  memset(&sslot1, 0, sizeof(sslot1));
2275  memset(&sslot2, 0, sizeof(sslot2));
2276 
2277  if (HeapTupleIsValid(vardata1.statsTuple))
2278  {
2279  /* note we allow use of nullfrac regardless of security check */
2280  stats1 = (Form_pg_statistic) GETSTRUCT(vardata1.statsTuple);
2281  if (statistic_proc_security_check(&vardata1, opfuncoid))
2282  have_mcvs1 = get_attstatsslot(&sslot1, vardata1.statsTuple,
2283  STATISTIC_KIND_MCV, InvalidOid,
2285  }
2286 
2287  if (HeapTupleIsValid(vardata2.statsTuple))
2288  {
2289  /* note we allow use of nullfrac regardless of security check */
2290  stats2 = (Form_pg_statistic) GETSTRUCT(vardata2.statsTuple);
2291  if (statistic_proc_security_check(&vardata2, opfuncoid))
2292  have_mcvs2 = get_attstatsslot(&sslot2, vardata2.statsTuple,
2293  STATISTIC_KIND_MCV, InvalidOid,
2295  }
2296 
2297  /* We need to compute the inner-join selectivity in all cases */
2298  selec_inner = eqjoinsel_inner(opfuncoid, collation,
2299  &vardata1, &vardata2,
2300  nd1, nd2,
2301  isdefault1, isdefault2,
2302  &sslot1, &sslot2,
2303  stats1, stats2,
2304  have_mcvs1, have_mcvs2);
2305 
2306  switch (sjinfo->jointype)
2307  {
2308  case JOIN_INNER:
2309  case JOIN_LEFT:
2310  case JOIN_FULL:
2311  selec = selec_inner;
2312  break;
2313  case JOIN_SEMI:
2314  case JOIN_ANTI:
2315 
2316  /*
2317  * Look up the join's inner relation. min_righthand is sufficient
2318  * information because neither SEMI nor ANTI joins permit any
2319  * reassociation into or out of their RHS, so the righthand will
2320  * always be exactly that set of rels.
2321  */
2322  inner_rel = find_join_input_rel(root, sjinfo->min_righthand);
2323 
2324  if (!join_is_reversed)
2325  selec = eqjoinsel_semi(opfuncoid, collation,
2326  &vardata1, &vardata2,
2327  nd1, nd2,
2328  isdefault1, isdefault2,
2329  &sslot1, &sslot2,
2330  stats1, stats2,
2331  have_mcvs1, have_mcvs2,
2332  inner_rel);
2333  else
2334  {
2335  Oid commop = get_commutator(operator);
2336  Oid commopfuncoid = OidIsValid(commop) ? get_opcode(commop) : InvalidOid;
2337 
2338  selec = eqjoinsel_semi(commopfuncoid, collation,
2339  &vardata2, &vardata1,
2340  nd2, nd1,
2341  isdefault2, isdefault1,
2342  &sslot2, &sslot1,
2343  stats2, stats1,
2344  have_mcvs2, have_mcvs1,
2345  inner_rel);
2346  }
2347 
2348  /*
2349  * We should never estimate the output of a semijoin to be more
2350  * rows than we estimate for an inner join with the same input
2351  * rels and join condition; it's obviously impossible for that to
2352  * happen. The former estimate is N1 * Ssemi while the latter is
2353  * N1 * N2 * Sinner, so we may clamp Ssemi <= N2 * Sinner. Doing
2354  * this is worthwhile because of the shakier estimation rules we
2355  * use in eqjoinsel_semi, particularly in cases where it has to
2356  * punt entirely.
2357  */
2358  selec = Min(selec, inner_rel->rows * selec_inner);
2359  break;
2360  default:
2361  /* other values not expected here */
2362  elog(ERROR, "unrecognized join type: %d",
2363  (int) sjinfo->jointype);
2364  selec = 0; /* keep compiler quiet */
2365  break;
2366  }
2367 
2368  free_attstatsslot(&sslot1);
2369  free_attstatsslot(&sslot2);
2370 
2371  ReleaseVariableStats(vardata1);
2372  ReleaseVariableStats(vardata2);
2373 
2374  CLAMP_PROBABILITY(selec);
2375 
2376  PG_RETURN_FLOAT8((float8) selec);
2377 }
2378 
2379 /*
2380  * eqjoinsel_inner --- eqjoinsel for normal inner join
2381  *
2382  * We also use this for LEFT/FULL outer joins; it's not presently clear
2383  * that it's worth trying to distinguish them here.
2384  */
2385 static double
2386 eqjoinsel_inner(Oid opfuncoid, Oid collation,
2387  VariableStatData *vardata1, VariableStatData *vardata2,
2388  double nd1, double nd2,
2389  bool isdefault1, bool isdefault2,
2390  AttStatsSlot *sslot1, AttStatsSlot *sslot2,
2391  Form_pg_statistic stats1, Form_pg_statistic stats2,
2392  bool have_mcvs1, bool have_mcvs2)
2393 {
2394  double selec;
2395 
2396  if (have_mcvs1 && have_mcvs2)
2397  {
2398  /*
2399  * We have most-common-value lists for both relations. Run through
2400  * the lists to see which MCVs actually join to each other with the
2401  * given operator. This allows us to determine the exact join
2402  * selectivity for the portion of the relations represented by the MCV
2403  * lists. We still have to estimate for the remaining population, but
2404  * in a skewed distribution this gives us a big leg up in accuracy.
2405  * For motivation see the analysis in Y. Ioannidis and S.
2406  * Christodoulakis, "On the propagation of errors in the size of join
2407  * results", Technical Report 1018, Computer Science Dept., University
2408  * of Wisconsin, Madison, March 1991 (available from ftp.cs.wisc.edu).
2409  */
2410  LOCAL_FCINFO(fcinfo, 2);
2411  FmgrInfo eqproc;
2412  bool *hasmatch1;
2413  bool *hasmatch2;
2414  double nullfrac1 = stats1->stanullfrac;
2415  double nullfrac2 = stats2->stanullfrac;
2416  double matchprodfreq,
2417  matchfreq1,
2418  matchfreq2,
2419  unmatchfreq1,
2420  unmatchfreq2,
2421  otherfreq1,
2422  otherfreq2,
2423  totalsel1,
2424  totalsel2;
2425  int i,
2426  nmatches;
2427 
2428  fmgr_info(opfuncoid, &eqproc);
2429 
2430  /*
2431  * Save a few cycles by setting up the fcinfo struct just once. Using
2432  * FunctionCallInvoke directly also avoids failure if the eqproc
2433  * returns NULL, though really equality functions should never do
2434  * that.
2435  */
2436  InitFunctionCallInfoData(*fcinfo, &eqproc, 2, collation,
2437  NULL, NULL);
2438  fcinfo->args[0].isnull = false;
2439  fcinfo->args[1].isnull = false;
2440 
2441  hasmatch1 = (bool *) palloc0(sslot1->nvalues * sizeof(bool));
2442  hasmatch2 = (bool *) palloc0(sslot2->nvalues * sizeof(bool));
2443 
2444  /*
2445  * Note we assume that each MCV will match at most one member of the
2446  * other MCV list. If the operator isn't really equality, there could
2447  * be multiple matches --- but we don't look for them, both for speed
2448  * and because the math wouldn't add up...
2449  */
2450  matchprodfreq = 0.0;
2451  nmatches = 0;
2452  for (i = 0; i < sslot1->nvalues; i++)
2453  {
2454  int j;
2455 
2456  fcinfo->args[0].value = sslot1->values[i];
2457 
2458  for (j = 0; j < sslot2->nvalues; j++)
2459  {
2460  Datum fresult;
2461 
2462  if (hasmatch2[j])
2463  continue;
2464  fcinfo->args[1].value = sslot2->values[j];
2465  fcinfo->isnull = false;
2466  fresult = FunctionCallInvoke(fcinfo);
2467  if (!fcinfo->isnull && DatumGetBool(fresult))
2468  {
2469  hasmatch1[i] = hasmatch2[j] = true;
2470  matchprodfreq += sslot1->numbers[i] * sslot2->numbers[j];
2471  nmatches++;
2472  break;
2473  }
2474  }
2475  }
2476  CLAMP_PROBABILITY(matchprodfreq);
2477  /* Sum up frequencies of matched and unmatched MCVs */
2478  matchfreq1 = unmatchfreq1 = 0.0;
2479  for (i = 0; i < sslot1->nvalues; i++)
2480  {
2481  if (hasmatch1[i])
2482  matchfreq1 += sslot1->numbers[i];
2483  else
2484  unmatchfreq1 += sslot1->numbers[i];
2485  }
2486  CLAMP_PROBABILITY(matchfreq1);
2487  CLAMP_PROBABILITY(unmatchfreq1);
2488  matchfreq2 = unmatchfreq2 = 0.0;
2489  for (i = 0; i < sslot2->nvalues; i++)
2490  {
2491  if (hasmatch2[i])
2492  matchfreq2 += sslot2->numbers[i];
2493  else
2494  unmatchfreq2 += sslot2->numbers[i];
2495  }
2496  CLAMP_PROBABILITY(matchfreq2);
2497  CLAMP_PROBABILITY(unmatchfreq2);
2498  pfree(hasmatch1);
2499  pfree(hasmatch2);
2500 
2501  /*
2502  * Compute total frequency of non-null values that are not in the MCV
2503  * lists.
2504  */
2505  otherfreq1 = 1.0 - nullfrac1 - matchfreq1 - unmatchfreq1;
2506  otherfreq2 = 1.0 - nullfrac2 - matchfreq2 - unmatchfreq2;
2507  CLAMP_PROBABILITY(otherfreq1);
2508  CLAMP_PROBABILITY(otherfreq2);
2509 
2510  /*
2511  * We can estimate the total selectivity from the point of view of
2512  * relation 1 as: the known selectivity for matched MCVs, plus
2513  * unmatched MCVs that are assumed to match against random members of
2514  * relation 2's non-MCV population, plus non-MCV values that are
2515  * assumed to match against random members of relation 2's unmatched
2516  * MCVs plus non-MCV values.
2517  */
2518  totalsel1 = matchprodfreq;
2519  if (nd2 > sslot2->nvalues)
2520  totalsel1 += unmatchfreq1 * otherfreq2 / (nd2 - sslot2->nvalues);
2521  if (nd2 > nmatches)
2522  totalsel1 += otherfreq1 * (otherfreq2 + unmatchfreq2) /
2523  (nd2 - nmatches);
2524  /* Same estimate from the point of view of relation 2. */
2525  totalsel2 = matchprodfreq;
2526  if (nd1 > sslot1->nvalues)
2527  totalsel2 += unmatchfreq2 * otherfreq1 / (nd1 - sslot1->nvalues);
2528  if (nd1 > nmatches)
2529  totalsel2 += otherfreq2 * (otherfreq1 + unmatchfreq1) /
2530  (nd1 - nmatches);
2531 
2532  /*
2533  * Use the smaller of the two estimates. This can be justified in
2534  * essentially the same terms as given below for the no-stats case: to
2535  * a first approximation, we are estimating from the point of view of
2536  * the relation with smaller nd.
2537  */
2538  selec = (totalsel1 < totalsel2) ? totalsel1 : totalsel2;
2539  }
2540  else
2541  {
2542  /*
2543  * We do not have MCV lists for both sides. Estimate the join
2544  * selectivity as MIN(1/nd1,1/nd2)*(1-nullfrac1)*(1-nullfrac2). This
2545  * is plausible if we assume that the join operator is strict and the
2546  * non-null values are about equally distributed: a given non-null
2547  * tuple of rel1 will join to either zero or N2*(1-nullfrac2)/nd2 rows
2548  * of rel2, so total join rows are at most
2549  * N1*(1-nullfrac1)*N2*(1-nullfrac2)/nd2 giving a join selectivity of
2550  * not more than (1-nullfrac1)*(1-nullfrac2)/nd2. By the same logic it
2551  * is not more than (1-nullfrac1)*(1-nullfrac2)/nd1, so the expression
2552  * with MIN() is an upper bound. Using the MIN() means we estimate
2553  * from the point of view of the relation with smaller nd (since the
2554  * larger nd is determining the MIN). It is reasonable to assume that
2555  * most tuples in this rel will have join partners, so the bound is
2556  * probably reasonably tight and should be taken as-is.
2557  *
2558  * XXX Can we be smarter if we have an MCV list for just one side? It
2559  * seems that if we assume equal distribution for the other side, we
2560  * end up with the same answer anyway.
2561  */
2562  double nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
2563  double nullfrac2 = stats2 ? stats2->stanullfrac : 0.0;
2564 
2565  selec = (1.0 - nullfrac1) * (1.0 - nullfrac2);
2566  if (nd1 > nd2)
2567  selec /= nd1;
2568  else
2569  selec /= nd2;
2570  }
2571 
2572  return selec;
2573 }
2574 
2575 /*
2576  * eqjoinsel_semi --- eqjoinsel for semi join
2577  *
2578  * (Also used for anti join, which we are supposed to estimate the same way.)
2579  * Caller has ensured that vardata1 is the LHS variable.
2580  * Unlike eqjoinsel_inner, we have to cope with opfuncoid being InvalidOid.
2581  */
2582 static double
2583 eqjoinsel_semi(Oid opfuncoid, Oid collation,
2584  VariableStatData *vardata1, VariableStatData *vardata2,
2585  double nd1, double nd2,
2586  bool isdefault1, bool isdefault2,
2587  AttStatsSlot *sslot1, AttStatsSlot *sslot2,
2588  Form_pg_statistic stats1, Form_pg_statistic stats2,
2589  bool have_mcvs1, bool have_mcvs2,
2590  RelOptInfo *inner_rel)
2591 {
2592  double selec;
2593 
2594  /*
2595  * We clamp nd2 to be not more than what we estimate the inner relation's
2596  * size to be. This is intuitively somewhat reasonable since obviously
2597  * there can't be more than that many distinct values coming from the
2598  * inner rel. The reason for the asymmetry (ie, that we don't clamp nd1
2599  * likewise) is that this is the only pathway by which restriction clauses
2600  * applied to the inner rel will affect the join result size estimate,
2601  * since set_joinrel_size_estimates will multiply SEMI/ANTI selectivity by
2602  * only the outer rel's size. If we clamped nd1 we'd be double-counting
2603  * the selectivity of outer-rel restrictions.
2604  *
2605  * We can apply this clamping both with respect to the base relation from
2606  * which the join variable comes (if there is just one), and to the
2607  * immediate inner input relation of the current join.
2608  *
2609  * If we clamp, we can treat nd2 as being a non-default estimate; it's not
2610  * great, maybe, but it didn't come out of nowhere either. This is most
2611  * helpful when the inner relation is empty and consequently has no stats.
2612  */
2613  if (vardata2->rel)
2614  {
2615  if (nd2 >= vardata2->rel->rows)
2616  {
2617  nd2 = vardata2->rel->rows;
2618  isdefault2 = false;
2619  }
2620  }
2621  if (nd2 >= inner_rel->rows)
2622  {
2623  nd2 = inner_rel->rows;
2624  isdefault2 = false;
2625  }
2626 
2627  if (have_mcvs1 && have_mcvs2 && OidIsValid(opfuncoid))
2628  {
2629  /*
2630  * We have most-common-value lists for both relations. Run through
2631  * the lists to see which MCVs actually join to each other with the
2632  * given operator. This allows us to determine the exact join
2633  * selectivity for the portion of the relations represented by the MCV
2634  * lists. We still have to estimate for the remaining population, but
2635  * in a skewed distribution this gives us a big leg up in accuracy.
2636  */
2637  LOCAL_FCINFO(fcinfo, 2);
2638  FmgrInfo eqproc;
2639  bool *hasmatch1;
2640  bool *hasmatch2;
2641  double nullfrac1 = stats1->stanullfrac;
2642  double matchfreq1,
2643  uncertainfrac,
2644  uncertain;
2645  int i,
2646  nmatches,
2647  clamped_nvalues2;
2648 
2649  /*
2650  * The clamping above could have resulted in nd2 being less than
2651  * sslot2->nvalues; in which case, we assume that precisely the nd2
2652  * most common values in the relation will appear in the join input,
2653  * and so compare to only the first nd2 members of the MCV list. Of
2654  * course this is frequently wrong, but it's the best bet we can make.
2655  */
2656  clamped_nvalues2 = Min(sslot2->nvalues, nd2);
2657 
2658  fmgr_info(opfuncoid, &eqproc);
2659 
2660  /*
2661  * Save a few cycles by setting up the fcinfo struct just once. Using
2662  * FunctionCallInvoke directly also avoids failure if the eqproc
2663  * returns NULL, though really equality functions should never do
2664  * that.
2665  */
2666  InitFunctionCallInfoData(*fcinfo, &eqproc, 2, collation,
2667  NULL, NULL);
2668  fcinfo->args[0].isnull = false;
2669  fcinfo->args[1].isnull = false;
2670 
2671  hasmatch1 = (bool *) palloc0(sslot1->nvalues * sizeof(bool));
2672  hasmatch2 = (bool *) palloc0(clamped_nvalues2 * sizeof(bool));
2673 
2674  /*
2675  * Note we assume that each MCV will match at most one member of the
2676  * other MCV list. If the operator isn't really equality, there could
2677  * be multiple matches --- but we don't look for them, both for speed
2678  * and because the math wouldn't add up...
2679  */
2680  nmatches = 0;
2681  for (i = 0; i < sslot1->nvalues; i++)
2682  {
2683  int j;
2684 
2685  fcinfo->args[0].value = sslot1->values[i];
2686 
2687  for (j = 0; j < clamped_nvalues2; j++)
2688  {
2689  Datum fresult;
2690 
2691  if (hasmatch2[j])
2692  continue;
2693  fcinfo->args[1].value = sslot2->values[j];
2694  fcinfo->isnull = false;
2695  fresult = FunctionCallInvoke(fcinfo);
2696  if (!fcinfo->isnull && DatumGetBool(fresult))
2697  {
2698  hasmatch1[i] = hasmatch2[j] = true;
2699  nmatches++;
2700  break;
2701  }
2702  }
2703  }
2704  /* Sum up frequencies of matched MCVs */
2705  matchfreq1 = 0.0;
2706  for (i = 0; i < sslot1->nvalues; i++)
2707  {
2708  if (hasmatch1[i])
2709  matchfreq1 += sslot1->numbers[i];
2710  }
2711  CLAMP_PROBABILITY(matchfreq1);
2712  pfree(hasmatch1);
2713  pfree(hasmatch2);
2714 
2715  /*
2716  * Now we need to estimate the fraction of relation 1 that has at
2717  * least one join partner. We know for certain that the matched MCVs
2718  * do, so that gives us a lower bound, but we're really in the dark
2719  * about everything else. Our crude approach is: if nd1 <= nd2 then
2720  * assume all non-null rel1 rows have join partners, else assume for
2721  * the uncertain rows that a fraction nd2/nd1 have join partners. We
2722  * can discount the known-matched MCVs from the distinct-values counts
2723  * before doing the division.
2724  *
2725  * Crude as the above is, it's completely useless if we don't have
2726  * reliable ndistinct values for both sides. Hence, if either nd1 or
2727  * nd2 is default, punt and assume half of the uncertain rows have
2728  * join partners.
2729  */
2730  if (!isdefault1 && !isdefault2)
2731  {
2732  nd1 -= nmatches;
2733  nd2 -= nmatches;
2734  if (nd1 <= nd2 || nd2 < 0)
2735  uncertainfrac = 1.0;
2736  else
2737  uncertainfrac = nd2 / nd1;
2738  }
2739  else
2740  uncertainfrac = 0.5;
2741  uncertain = 1.0 - matchfreq1 - nullfrac1;
2742  CLAMP_PROBABILITY(uncertain);
2743  selec = matchfreq1 + uncertainfrac * uncertain;
2744  }
2745  else
2746  {
2747  /*
2748  * Without MCV lists for both sides, we can only use the heuristic
2749  * about nd1 vs nd2.
2750  */
2751  double nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
2752 
2753  if (!isdefault1 && !isdefault2)
2754  {
2755  if (nd1 <= nd2 || nd2 < 0)
2756  selec = 1.0 - nullfrac1;
2757  else
2758  selec = (nd2 / nd1) * (1.0 - nullfrac1);
2759  }
2760  else
2761  selec = 0.5 * (1.0 - nullfrac1);
2762  }
2763 
2764  return selec;
2765 }
2766 
2767 /*
2768  * neqjoinsel - Join selectivity of "!="
2769  */
2770 Datum
2772 {
2773  PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
2774  Oid operator = PG_GETARG_OID(1);
2775  List *args = (List *) PG_GETARG_POINTER(2);
2776  JoinType jointype = (JoinType) PG_GETARG_INT16(3);
2778  float8 result;
2779 
2780  if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
2781  {
2782  /*
2783  * For semi-joins, if there is more than one distinct value in the RHS
2784  * relation then every non-null LHS row must find a row to join since
2785  * it can only be equal to one of them. We'll assume that there is
2786  * always more than one distinct RHS value for the sake of stability,
2787  * though in theory we could have special cases for empty RHS
2788  * (selectivity = 0) and single-distinct-value RHS (selectivity =
2789  * fraction of LHS that has the same value as the single RHS value).
2790  *
2791  * For anti-joins, if we use the same assumption that there is more
2792  * than one distinct key in the RHS relation, then every non-null LHS
2793  * row must be suppressed by the anti-join.
2794  *
2795  * So either way, the selectivity estimate should be 1 - nullfrac.
2796  */
2797  VariableStatData leftvar;
2798  VariableStatData rightvar;
2799  bool reversed;
2800  HeapTuple statsTuple;
2801  double nullfrac;
2802 
2803  get_join_variables(root, args, sjinfo, &leftvar, &rightvar, &reversed);
2804  statsTuple = reversed ? rightvar.statsTuple : leftvar.statsTuple;
2805  if (HeapTupleIsValid(statsTuple))
2806  nullfrac = ((Form_pg_statistic) GETSTRUCT(statsTuple))->stanullfrac;
2807  else
2808  nullfrac = 0.0;
2809  ReleaseVariableStats(leftvar);
2810  ReleaseVariableStats(rightvar);
2811 
2812  result = 1.0 - nullfrac;
2813  }
2814  else
2815  {
2816  /*
2817  * We want 1 - eqjoinsel() where the equality operator is the one
2818  * associated with this != operator, that is, its negator.
2819  */
2820  Oid eqop = get_negator(operator);
2821 
2822  if (eqop)
2823  {
2825  PointerGetDatum(root),
2826  ObjectIdGetDatum(eqop),
2827  PointerGetDatum(args),
2828  Int16GetDatum(jointype),
2829  PointerGetDatum(sjinfo)));
2830  }
2831  else
2832  {
2833  /* Use default selectivity (should we raise an error instead?) */
2834  result = DEFAULT_EQ_SEL;
2835  }
2836  result = 1.0 - result;
2837  }
2838 
2839  PG_RETURN_FLOAT8(result);
2840 }
2841 
2842 /*
2843  * scalarltjoinsel - Join selectivity of "<" for scalars
2844  */
2845 Datum
2847 {
2849 }
2850 
2851 /*
2852  * scalarlejoinsel - Join selectivity of "<=" for scalars
2853  */
2854 Datum
2856 {
2858 }
2859 
2860 /*
2861  * scalargtjoinsel - Join selectivity of ">" for scalars
2862  */
2863 Datum
2865 {
2867 }
2868 
2869 /*
2870  * scalargejoinsel - Join selectivity of ">=" for scalars
2871  */
2872 Datum
2874 {
2876 }
2877 
2878 
2879 /*
2880  * mergejoinscansel - Scan selectivity of merge join.
2881  *
2882  * A merge join will stop as soon as it exhausts either input stream.
2883  * Therefore, if we can estimate the ranges of both input variables,
2884  * we can estimate how much of the input will actually be read. This
2885  * can have a considerable impact on the cost when using indexscans.
2886  *
2887  * Also, we can estimate how much of each input has to be read before the
2888  * first join pair is found, which will affect the join's startup time.
2889  *
2890  * clause should be a clause already known to be mergejoinable. opfamily,
2891  * strategy, and nulls_first specify the sort ordering being used.
2892  *
2893  * The outputs are:
2894  * *leftstart is set to the fraction of the left-hand variable expected
2895  * to be scanned before the first join pair is found (0 to 1).
2896  * *leftend is set to the fraction of the left-hand variable expected
2897  * to be scanned before the join terminates (0 to 1).
2898  * *rightstart, *rightend similarly for the right-hand variable.
2899  */
2900 void
2902  Oid opfamily, int strategy, bool nulls_first,
2903  Selectivity *leftstart, Selectivity *leftend,
2904  Selectivity *rightstart, Selectivity *rightend)
2905 {
2906  Node *left,
2907  *right;
2908  VariableStatData leftvar,
2909  rightvar;
2910  int op_strategy;
2911  Oid op_lefttype;
2912  Oid op_righttype;
2913  Oid opno,
2914  collation,
2915  lsortop,
2916  rsortop,
2917  lstatop,
2918  rstatop,
2919  ltop,
2920  leop,
2921  revltop,
2922  revleop;
2923  bool isgt;
2924  Datum leftmin,
2925  leftmax,
2926  rightmin,
2927  rightmax;
2928  double selec;
2929 
2930  /* Set default results if we can't figure anything out. */
2931  /* XXX should default "start" fraction be a bit more than 0? */
2932  *leftstart = *rightstart = 0.0;
2933  *leftend = *rightend = 1.0;
2934 
2935  /* Deconstruct the merge clause */
2936  if (!is_opclause(clause))
2937  return; /* shouldn't happen */
2938  opno = ((OpExpr *) clause)->opno;
2939  collation = ((OpExpr *) clause)->inputcollid;
2940  left = get_leftop((Expr *) clause);
2941  right = get_rightop((Expr *) clause);
2942  if (!right)
2943  return; /* shouldn't happen */
2944 
2945  /* Look for stats for the inputs */
2946  examine_variable(root, left, 0, &leftvar);
2947  examine_variable(root, right, 0, &rightvar);
2948 
2949  /* Extract the operator's declared left/right datatypes */
2950  get_op_opfamily_properties(opno, opfamily, false,
2951  &op_strategy,
2952  &op_lefttype,
2953  &op_righttype);
2954  Assert(op_strategy == BTEqualStrategyNumber);
2955 
2956  /*
2957  * Look up the various operators we need. If we don't find them all, it
2958  * probably means the opfamily is broken, but we just fail silently.
2959  *
2960  * Note: we expect that pg_statistic histograms will be sorted by the '<'
2961  * operator, regardless of which sort direction we are considering.
2962  */
2963  switch (strategy)
2964  {
2965  case BTLessStrategyNumber:
2966  isgt = false;
2967  if (op_lefttype == op_righttype)
2968  {
2969  /* easy case */
2970  ltop = get_opfamily_member(opfamily,
2971  op_lefttype, op_righttype,
2973  leop = get_opfamily_member(opfamily,
2974  op_lefttype, op_righttype,
2976  lsortop = ltop;
2977  rsortop = ltop;
2978  lstatop = lsortop;
2979  rstatop = rsortop;
2980  revltop = ltop;
2981  revleop = leop;
2982  }
2983  else
2984  {
2985  ltop = get_opfamily_member(opfamily,
2986  op_lefttype, op_righttype,
2988  leop = get_opfamily_member(opfamily,
2989  op_lefttype, op_righttype,
2991  lsortop = get_opfamily_member(opfamily,
2992  op_lefttype, op_lefttype,
2994  rsortop = get_opfamily_member(opfamily,
2995  op_righttype, op_righttype,
2997  lstatop = lsortop;
2998  rstatop = rsortop;
2999  revltop = get_opfamily_member(opfamily,
3000  op_righttype, op_lefttype,
3002  revleop = get_opfamily_member(opfamily,
3003  op_righttype, op_lefttype,
3005  }
3006  break;
3008  /* descending-order case */
3009  isgt = true;
3010  if (op_lefttype == op_righttype)
3011  {
3012  /* easy case */
3013  ltop = get_opfamily_member(opfamily,
3014  op_lefttype, op_righttype,
3016  leop = get_opfamily_member(opfamily,
3017  op_lefttype, op_righttype,
3019  lsortop = ltop;
3020  rsortop = ltop;
3021  lstatop = get_opfamily_member(opfamily,
3022  op_lefttype, op_lefttype,
3024  rstatop = lstatop;
3025  revltop = ltop;
3026  revleop = leop;
3027  }
3028  else
3029  {
3030  ltop = get_opfamily_member(opfamily,
3031  op_lefttype, op_righttype,
3033  leop = get_opfamily_member(opfamily,
3034  op_lefttype, op_righttype,
3036  lsortop = get_opfamily_member(opfamily,
3037  op_lefttype, op_lefttype,
3039  rsortop = get_opfamily_member(opfamily,
3040  op_righttype, op_righttype,
3042  lstatop = get_opfamily_member(opfamily,
3043  op_lefttype, op_lefttype,
3045  rstatop = get_opfamily_member(opfamily,
3046  op_righttype, op_righttype,
3048  revltop = get_opfamily_member(opfamily,
3049  op_righttype, op_lefttype,
3051  revleop = get_opfamily_member(opfamily,
3052  op_righttype, op_lefttype,
3054  }
3055  break;
3056  default:
3057  goto fail; /* shouldn't get here */
3058  }
3059 
3060  if (!OidIsValid(lsortop) ||
3061  !OidIsValid(rsortop) ||
3062  !OidIsValid(lstatop) ||
3063  !OidIsValid(rstatop) ||
3064  !OidIsValid(ltop) ||
3065  !OidIsValid(leop) ||
3066  !OidIsValid(revltop) ||
3067  !OidIsValid(revleop))
3068  goto fail; /* insufficient info in catalogs */
3069 
3070  /* Try to get ranges of both inputs */
3071  if (!isgt)
3072  {
3073  if (!get_variable_range(root, &leftvar, lstatop, collation,
3074  &leftmin, &leftmax))
3075  goto fail; /* no range available from stats */
3076  if (!get_variable_range(root, &rightvar, rstatop, collation,
3077  &rightmin, &rightmax))
3078  goto fail; /* no range available from stats */
3079  }
3080  else
3081  {
3082  /* need to swap the max and min */
3083  if (!get_variable_range(root, &leftvar, lstatop, collation,
3084  &leftmax, &leftmin))
3085  goto fail; /* no range available from stats */
3086  if (!get_variable_range(root, &rightvar, rstatop, collation,
3087  &rightmax, &rightmin))
3088  goto fail; /* no range available from stats */
3089  }
3090 
3091  /*
3092  * Now, the fraction of the left variable that will be scanned is the
3093  * fraction that's <= the right-side maximum value. But only believe
3094  * non-default estimates, else stick with our 1.0.
3095  */
3096  selec = scalarineqsel(root, leop, isgt, true, collation, &leftvar,
3097  rightmax, op_righttype);
3098  if (selec != DEFAULT_INEQ_SEL)
3099  *leftend = selec;
3100 
3101  /* And similarly for the right variable. */
3102  selec = scalarineqsel(root, revleop, isgt, true, collation, &rightvar,
3103  leftmax, op_lefttype);
3104  if (selec != DEFAULT_INEQ_SEL)
3105  *rightend = selec;
3106 
3107  /*
3108  * Only one of the two "end" fractions can really be less than 1.0;
3109  * believe the smaller estimate and reset the other one to exactly 1.0. If
3110  * we get exactly equal estimates (as can easily happen with self-joins),
3111  * believe neither.
3112  */
3113  if (*leftend > *rightend)
3114  *leftend = 1.0;
3115  else if (*leftend < *rightend)
3116  *rightend = 1.0;
3117  else
3118  *leftend = *rightend = 1.0;
3119 
3120  /*
3121  * Also, the fraction of the left variable that will be scanned before the
3122  * first join pair is found is the fraction that's < the right-side
3123  * minimum value. But only believe non-default estimates, else stick with
3124  * our own default.
3125  */
3126  selec = scalarineqsel(root, ltop, isgt, false, collation, &leftvar,
3127  rightmin, op_righttype);
3128  if (selec != DEFAULT_INEQ_SEL)
3129  *leftstart = selec;
3130 
3131  /* And similarly for the right variable. */
3132  selec = scalarineqsel(root, revltop, isgt, false, collation, &rightvar,
3133  leftmin, op_lefttype);
3134  if (selec != DEFAULT_INEQ_SEL)
3135  *rightstart = selec;
3136 
3137  /*
3138  * Only one of the two "start" fractions can really be more than zero;
3139  * believe the larger estimate and reset the other one to exactly 0.0. If
3140  * we get exactly equal estimates (as can easily happen with self-joins),
3141  * believe neither.
3142  */
3143  if (*leftstart < *rightstart)
3144  *leftstart = 0.0;
3145  else if (*leftstart > *rightstart)
3146  *rightstart = 0.0;
3147  else
3148  *leftstart = *rightstart = 0.0;
3149 
3150  /*
3151  * If the sort order is nulls-first, we're going to have to skip over any
3152  * nulls too. These would not have been counted by scalarineqsel, and we
3153  * can safely add in this fraction regardless of whether we believe
3154  * scalarineqsel's results or not. But be sure to clamp the sum to 1.0!
3155  */
3156  if (nulls_first)
3157  {
3158  Form_pg_statistic stats;
3159 
3160  if (HeapTupleIsValid(leftvar.statsTuple))
3161  {
3162  stats = (Form_pg_statistic) GETSTRUCT(leftvar.statsTuple);
3163  *leftstart += stats->stanullfrac;
3164  CLAMP_PROBABILITY(*leftstart);
3165  *leftend += stats->stanullfrac;
3166  CLAMP_PROBABILITY(*leftend);
3167  }
3168  if (HeapTupleIsValid(rightvar.statsTuple))
3169  {
3170  stats = (Form_pg_statistic) GETSTRUCT(rightvar.statsTuple);
3171  *rightstart += stats->stanullfrac;
3172  CLAMP_PROBABILITY(*rightstart);
3173  *rightend += stats->stanullfrac;
3174  CLAMP_PROBABILITY(*rightend);
3175  }
3176  }
3177 
3178  /* Disbelieve start >= end, just in case that can happen */
3179  if (*leftstart >= *leftend)
3180  {
3181  *leftstart = 0.0;
3182  *leftend = 1.0;
3183  }
3184  if (*rightstart >= *rightend)
3185  {
3186  *rightstart = 0.0;
3187  *rightend = 1.0;
3188  }
3189 
3190 fail:
3191  ReleaseVariableStats(leftvar);
3192  ReleaseVariableStats(rightvar);
3193 }
3194 
3195 
3196 /*
3197  * matchingsel -- generic matching-operator selectivity support
3198  *
3199  * Use these for any operators that (a) are on data types for which we collect
3200  * standard statistics, and (b) have behavior for which the default estimate
3201  * (twice DEFAULT_EQ_SEL) is sane. Typically that is good for match-like
3202  * operators.
3203  */
3204 
3205 Datum
3207 {
3208  PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
3209  Oid operator = PG_GETARG_OID(1);
3210  List *args = (List *) PG_GETARG_POINTER(2);
3211  int varRelid = PG_GETARG_INT32(3);
3212  Oid collation = PG_GET_COLLATION();
3213  double selec;
3214 
3215  /* Use generic restriction selectivity logic. */
3216  selec = generic_restriction_selectivity(root, operator, collation,
3217  args, varRelid,
3219 
3220  PG_RETURN_FLOAT8((float8) selec);
3221 }
3222 
3223 Datum
3225 {
3226  /* Just punt, for the moment. */
3228 }
3229 
3230 
3231 /*
3232  * Helper routine for estimate_num_groups: add an item to a list of
3233  * GroupVarInfos, but only if it's not known equal to any of the existing
3234  * entries.
3235  */
3236 typedef struct
3237 {
3238  Node *var; /* might be an expression, not just a Var */
3239  RelOptInfo *rel; /* relation it belongs to */
3240  double ndistinct; /* # distinct values */
3241 } GroupVarInfo;
3242 
3243 static List *
3245  Node *var, VariableStatData *vardata)
3246 {
3247  GroupVarInfo *varinfo;
3248  double ndistinct;
3249  bool isdefault;
3250  ListCell *lc;
3251 
3252  ndistinct = get_variable_numdistinct(vardata, &isdefault);
3253 
3254  foreach(lc, varinfos)
3255  {
3256  varinfo = (GroupVarInfo *) lfirst(lc);
3257 
3258  /* Drop exact duplicates */
3259  if (equal(var, varinfo->var))
3260  return varinfos;
3261 
3262  /*
3263  * Drop known-equal vars, but only if they belong to different
3264  * relations (see comments for estimate_num_groups)
3265  */
3266  if (vardata->rel != varinfo->rel &&
3267  exprs_known_equal(root, var, varinfo->var))
3268  {
3269  if (varinfo->ndistinct <= ndistinct)
3270  {
3271  /* Keep older item, forget new one */
3272  return varinfos;
3273  }
3274  else
3275  {
3276  /* Delete the older item */
3277  varinfos = foreach_delete_current(varinfos, lc);
3278  }
3279  }
3280  }
3281 
3282  varinfo = (GroupVarInfo *) palloc(sizeof(GroupVarInfo));
3283 
3284  varinfo->var = var;
3285  varinfo->rel = vardata->rel;
3286  varinfo->ndistinct = ndistinct;
3287  varinfos = lappend(varinfos, varinfo);
3288  return varinfos;
3289 }
3290 
3291 /*
3292  * estimate_num_groups - Estimate number of groups in a grouped query
3293  *
3294  * Given a query having a GROUP BY clause, estimate how many groups there
3295  * will be --- ie, the number of distinct combinations of the GROUP BY
3296  * expressions.
3297  *
3298  * This routine is also used to estimate the number of rows emitted by
3299  * a DISTINCT filtering step; that is an isomorphic problem. (Note:
3300  * actually, we only use it for DISTINCT when there's no grouping or
3301  * aggregation ahead of the DISTINCT.)
3302  *
3303  * Inputs:
3304  * root - the query
3305  * groupExprs - list of expressions being grouped by
3306  * input_rows - number of rows estimated to arrive at the group/unique
3307  * filter step
3308  * pgset - NULL, or a List** pointing to a grouping set to filter the
3309  * groupExprs against
3310  *
3311  * Given the lack of any cross-correlation statistics in the system, it's
3312  * impossible to do anything really trustworthy with GROUP BY conditions
3313  * involving multiple Vars. We should however avoid assuming the worst
3314  * case (all possible cross-product terms actually appear as groups) since
3315  * very often the grouped-by Vars are highly correlated. Our current approach
3316  * is as follows:
3317  * 1. Expressions yielding boolean are assumed to contribute two groups,
3318  * independently of their content, and are ignored in the subsequent
3319  * steps. This is mainly because tests like "col IS NULL" break the
3320  * heuristic used in step 2 especially badly.
3321  * 2. Reduce the given expressions to a list of unique Vars used. For
3322  * example, GROUP BY a, a + b is treated the same as GROUP BY a, b.
3323  * It is clearly correct not to count the same Var more than once.
3324  * It is also reasonable to treat f(x) the same as x: f() cannot
3325  * increase the number of distinct values (unless it is volatile,
3326  * which we consider unlikely for grouping), but it probably won't
3327  * reduce the number of distinct values much either.
3328  * As a special case, if a GROUP BY expression can be matched to an
3329  * expressional index for which we have statistics, then we treat the
3330  * whole expression as though it were just a Var.
3331  * 3. If the list contains Vars of different relations that are known equal
3332  * due to equivalence classes, then drop all but one of the Vars from each
3333  * known-equal set, keeping the one with smallest estimated # of values
3334  * (since the extra values of the others can't appear in joined rows).
3335  * Note the reason we only consider Vars of different relations is that
3336  * if we considered ones of the same rel, we'd be double-counting the
3337  * restriction selectivity of the equality in the next step.
3338  * 4. For Vars within a single source rel, we multiply together the numbers
3339  * of values, clamp to the number of rows in the rel (divided by 10 if
3340  * more than one Var), and then multiply by a factor based on the
3341  * selectivity of the restriction clauses for that rel. When there's
3342  * more than one Var, the initial product is probably too high (it's the
3343  * worst case) but clamping to a fraction of the rel's rows seems to be a
3344  * helpful heuristic for not letting the estimate get out of hand. (The
3345  * factor of 10 is derived from pre-Postgres-7.4 practice.) The factor
3346  * we multiply by to adjust for the restriction selectivity assumes that
3347  * the restriction clauses are independent of the grouping, which may not
3348  * be a valid assumption, but it's hard to do better.
3349  * 5. If there are Vars from multiple rels, we repeat step 4 for each such
3350  * rel, and multiply the results together.
3351  * Note that rels not containing grouped Vars are ignored completely, as are
3352  * join clauses. Such rels cannot increase the number of groups, and we
3353  * assume such clauses do not reduce the number either (somewhat bogus,
3354  * but we don't have the info to do better).
3355  */
3356 double
3357 estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows,
3358  List **pgset)
3359 {
3360  List *varinfos = NIL;
3361  double srf_multiplier = 1.0;
3362  double numdistinct;
3363  ListCell *l;
3364  int i;
3365 
3366  /*
3367  * We don't ever want to return an estimate of zero groups, as that tends
3368  * to lead to division-by-zero and other unpleasantness. The input_rows
3369  * estimate is usually already at least 1, but clamp it just in case it
3370  * isn't.
3371  */
3372  input_rows = clamp_row_est(input_rows);
3373 
3374  /*
3375  * If no grouping columns, there's exactly one group. (This can't happen
3376  * for normal cases with GROUP BY or DISTINCT, but it is possible for
3377  * corner cases with set operations.)
3378  */
3379  if (groupExprs == NIL || (pgset && list_length(*pgset) < 1))
3380  return 1.0;
3381 
3382  /*
3383  * Count groups derived from boolean grouping expressions. For other
3384  * expressions, find the unique Vars used, treating an expression as a Var
3385  * if we can find stats for it. For each one, record the statistical
3386  * estimate of number of distinct values (total in its table, without
3387  * regard for filtering).
3388  */
3389  numdistinct = 1.0;
3390 
3391  i = 0;
3392  foreach(l, groupExprs)
3393  {
3394  Node *groupexpr = (Node *) lfirst(l);
3395  double this_srf_multiplier;
3396  VariableStatData vardata;
3397  List *varshere;
3398  ListCell *l2;
3399 
3400  /* is expression in this grouping set? */
3401  if (pgset && !list_member_int(*pgset, i++))
3402  continue;
3403 
3404  /*
3405  * Set-returning functions in grouping columns are a bit problematic.
3406  * The code below will effectively ignore their SRF nature and come up
3407  * with a numdistinct estimate as though they were scalar functions.
3408  * We compensate by scaling up the end result by the largest SRF
3409  * rowcount estimate. (This will be an overestimate if the SRF
3410  * produces multiple copies of any output value, but it seems best to
3411  * assume the SRF's outputs are distinct. In any case, it's probably
3412  * pointless to worry too much about this without much better
3413  * estimates for SRF output rowcounts than we have today.)
3414  */
3415  this_srf_multiplier = expression_returns_set_rows(root, groupexpr);
3416  if (srf_multiplier < this_srf_multiplier)
3417  srf_multiplier = this_srf_multiplier;
3418 
3419  /* Short-circuit for expressions returning boolean */
3420  if (exprType(groupexpr) == BOOLOID)
3421  {
3422  numdistinct *= 2.0;
3423  continue;
3424  }
3425 
3426  /*
3427  * If examine_variable is able to deduce anything about the GROUP BY
3428  * expression, treat it as a single variable even if it's really more
3429  * complicated.
3430  */
3431  examine_variable(root, groupexpr, 0, &vardata);
3432  if (HeapTupleIsValid(vardata.statsTuple) || vardata.isunique)
3433  {
3434  varinfos = add_unique_group_var(root, varinfos,
3435  groupexpr, &vardata);
3436  ReleaseVariableStats(vardata);
3437  continue;
3438  }
3439  ReleaseVariableStats(vardata);
3440 
3441  /*
3442  * Else pull out the component Vars. Handle PlaceHolderVars by
3443  * recursing into their arguments (effectively assuming that the
3444  * PlaceHolderVar doesn't change the number of groups, which boils
3445  * down to ignoring the possible addition of nulls to the result set).
3446  */
3447  varshere = pull_var_clause(groupexpr,
3451 
3452  /*
3453  * If we find any variable-free GROUP BY item, then either it is a
3454  * constant (and we can ignore it) or it contains a volatile function;
3455  * in the latter case we punt and assume that each input row will
3456  * yield a distinct group.
3457  */
3458  if (varshere == NIL)
3459  {
3460  if (contain_volatile_functions(groupexpr))
3461  return input_rows;
3462  continue;
3463  }
3464 
3465  /*
3466  * Else add variables to varinfos list
3467  */
3468  foreach(l2, varshere)
3469  {
3470  Node *var = (Node *) lfirst(l2);
3471 
3472  examine_variable(root, var, 0, &vardata);
3473  varinfos = add_unique_group_var(root, varinfos, var, &vardata);
3474  ReleaseVariableStats(vardata);
3475  }
3476  }
3477 
3478  /*
3479  * If now no Vars, we must have an all-constant or all-boolean GROUP BY
3480  * list.
3481  */
3482  if (varinfos == NIL)
3483  {
3484  /* Apply SRF multiplier as we would do in the long path */
3485  numdistinct *= srf_multiplier;
3486  /* Round off */
3487  numdistinct = ceil(numdistinct);
3488  /* Guard against out-of-range answers */
3489  if (numdistinct > input_rows)
3490  numdistinct = input_rows;
3491  if (numdistinct < 1.0)
3492  numdistinct = 1.0;
3493  return numdistinct;
3494  }
3495 
3496  /*
3497  * Group Vars by relation and estimate total numdistinct.
3498  *
3499  * For each iteration of the outer loop, we process the frontmost Var in
3500  * varinfos, plus all other Vars in the same relation. We remove these
3501  * Vars from the newvarinfos list for the next iteration. This is the
3502  * easiest way to group Vars of same rel together.
3503  */
3504  do
3505  {
3506  GroupVarInfo *varinfo1 = (GroupVarInfo *) linitial(varinfos);
3507  RelOptInfo *rel = varinfo1->rel;
3508  double reldistinct = 1;
3509  double relmaxndistinct = reldistinct;
3510  int relvarcount = 0;
3511  List *newvarinfos = NIL;
3512  List *relvarinfos = NIL;
3513 
3514  /*
3515  * Split the list of varinfos in two - one for the current rel, one
3516  * for remaining Vars on other rels.
3517  */
3518  relvarinfos = lappend(relvarinfos, varinfo1);
3519  for_each_cell(l, varinfos, list_second_cell(varinfos))
3520  {
3521  GroupVarInfo *varinfo2 = (GroupVarInfo *) lfirst(l);
3522 
3523  if (varinfo2->rel == varinfo1->rel)
3524  {
3525  /* varinfos on current rel */
3526  relvarinfos = lappend(relvarinfos, varinfo2);
3527  }
3528  else
3529  {
3530  /* not time to process varinfo2 yet */
3531  newvarinfos = lappend(newvarinfos, varinfo2);
3532  }
3533  }
3534 
3535  /*
3536  * Get the numdistinct estimate for the Vars of this rel. We
3537  * iteratively search for multivariate n-distinct with maximum number
3538  * of vars; assuming that each var group is independent of the others,
3539  * we multiply them together. Any remaining relvarinfos after no more
3540  * multivariate matches are found are assumed independent too, so
3541  * their individual ndistinct estimates are multiplied also.
3542  *
3543  * While iterating, count how many separate numdistinct values we
3544  * apply. We apply a fudge factor below, but only if we multiplied
3545  * more than one such values.
3546  */
3547  while (relvarinfos)
3548  {
3549  double mvndistinct;
3550 
3551  if (estimate_multivariate_ndistinct(root, rel, &relvarinfos,
3552  &mvndistinct))
3553  {
3554  reldistinct *= mvndistinct;
3555  if (relmaxndistinct < mvndistinct)
3556  relmaxndistinct = mvndistinct;
3557  relvarcount++;
3558  }
3559  else
3560  {
3561  foreach(l, relvarinfos)
3562  {
3563  GroupVarInfo *varinfo2 = (GroupVarInfo *) lfirst(l);
3564 
3565  reldistinct *= varinfo2->ndistinct;
3566  if (relmaxndistinct < varinfo2->ndistinct)
3567  relmaxndistinct = varinfo2->ndistinct;
3568  relvarcount++;
3569  }
3570 
3571  /* we're done with this relation */
3572  relvarinfos = NIL;
3573  }
3574  }
3575 
3576  /*
3577  * Sanity check --- don't divide by zero if empty relation.
3578  */
3579  Assert(IS_SIMPLE_REL(rel));
3580  if (rel->tuples > 0)
3581  {
3582  /*
3583  * Clamp to size of rel, or size of rel / 10 if multiple Vars. The
3584  * fudge factor is because the Vars are probably correlated but we
3585  * don't know by how much. We should never clamp to less than the
3586  * largest ndistinct value for any of the Vars, though, since
3587  * there will surely be at least that many groups.
3588  */
3589  double clamp = rel->tuples;
3590 
3591  if (relvarcount > 1)
3592  {
3593  clamp *= 0.1;
3594  if (clamp < relmaxndistinct)
3595  {
3596  clamp = relmaxndistinct;
3597  /* for sanity in case some ndistinct is too large: */
3598  if (clamp > rel->tuples)
3599  clamp = rel->tuples;
3600  }
3601  }
3602  if (reldistinct > clamp)
3603  reldistinct = clamp;
3604 
3605  /*
3606  * Update the estimate based on the restriction selectivity,
3607  * guarding against division by zero when reldistinct is zero.
3608  * Also skip this if we know that we are returning all rows.
3609  */
3610  if (reldistinct > 0 && rel->rows < rel->tuples)
3611  {
3612  /*
3613  * Given a table containing N rows with n distinct values in a
3614  * uniform distribution, if we select p rows at random then
3615  * the expected number of distinct values selected is
3616  *
3617  * n * (1 - product((N-N/n-i)/(N-i), i=0..p-1))
3618  *
3619  * = n * (1 - (N-N/n)! / (N-N/n-p)! * (N-p)! / N!)
3620  *
3621  * See "Approximating block accesses in database
3622  * organizations", S. B. Yao, Communications of the ACM,
3623  * Volume 20 Issue 4, April 1977 Pages 260-261.
3624  *
3625  * Alternatively, re-arranging the terms from the factorials,
3626  * this may be written as
3627  *
3628  * n * (1 - product((N-p-i)/(N-i), i=0..N/n-1))
3629  *
3630  * This form of the formula is more efficient to compute in
3631  * the common case where p is larger than N/n. Additionally,
3632  * as pointed out by Dell'Era, if i << N for all terms in the
3633  * product, it can be approximated by
3634  *
3635  * n * (1 - ((N-p)/N)^(N/n))
3636  *
3637  * See "Expected distinct values when selecting from a bag
3638  * without replacement", Alberto Dell'Era,
3639  * http://www.adellera.it/investigations/distinct_balls/.
3640  *
3641  * The condition i << N is equivalent to n >> 1, so this is a
3642  * good approximation when the number of distinct values in
3643  * the table is large. It turns out that this formula also
3644  * works well even when n is small.
3645  */
3646  reldistinct *=
3647  (1 - pow((rel->tuples - rel->rows) / rel->tuples,
3648  rel->tuples / reldistinct));
3649  }
3650  reldistinct = clamp_row_est(reldistinct);
3651 
3652  /*
3653  * Update estimate of total distinct groups.
3654  */
3655  numdistinct *= reldistinct;
3656  }
3657 
3658  varinfos = newvarinfos;
3659  } while (varinfos != NIL);
3660 
3661  /* Now we can account for the effects of any SRFs */
3662  numdistinct *= srf_multiplier;
3663 
3664  /* Round off */
3665  numdistinct = ceil(numdistinct);
3666 
3667  /* Guard against out-of-range answers */
3668  if (numdistinct > input_rows)
3669  numdistinct = input_rows;
3670  if (numdistinct < 1.0)
3671  numdistinct = 1.0;
3672 
3673  return numdistinct;
3674 }
3675 
3676 /*
3677  * Estimate hash bucket statistics when the specified expression is used
3678  * as a hash key for the given number of buckets.
3679  *
3680  * This attempts to determine two values:
3681  *
3682  * 1. The frequency of the most common value of the expression (returns
3683  * zero into *mcv_freq if we can't get that).
3684  *
3685  * 2. The "bucketsize fraction", ie, average number of entries in a bucket
3686  * divided by total tuples in relation.
3687  *
3688  * XXX This is really pretty bogus since we're effectively assuming that the
3689  * distribution of hash keys will be the same after applying restriction
3690  * clauses as it was in the underlying relation. However, we are not nearly
3691  * smart enough to figure out how the restrict clauses might change the
3692  * distribution, so this will have to do for now.
3693  *
3694  * We are passed the number of buckets the executor will use for the given
3695  * input relation. If the data were perfectly distributed, with the same
3696  * number of tuples going into each available bucket, then the bucketsize
3697  * fraction would be 1/nbuckets. But this happy state of affairs will occur
3698  * only if (a) there are at least nbuckets distinct data values, and (b)
3699  * we have a not-too-skewed data distribution. Otherwise the buckets will
3700  * be nonuniformly occupied. If the other relation in the join has a key
3701  * distribution similar to this one's, then the most-loaded buckets are
3702  * exactly those that will be probed most often. Therefore, the "average"
3703  * bucket size for costing purposes should really be taken as something close
3704  * to the "worst case" bucket size. We try to estimate this by adjusting the
3705  * fraction if there are too few distinct data values, and then scaling up
3706  * by the ratio of the most common value's frequency to the average frequency.
3707  *
3708  * If no statistics are available, use a default estimate of 0.1. This will
3709  * discourage use of a hash rather strongly if the inner relation is large,
3710  * which is what we want. We do not want to hash unless we know that the
3711  * inner rel is well-dispersed (or the alternatives seem much worse).
3712  *
3713  * The caller should also check that the mcv_freq is not so large that the
3714  * most common value would by itself require an impractically large bucket.
3715  * In a hash join, the executor can split buckets if they get too big, but
3716  * obviously that doesn't help for a bucket that contains many duplicates of
3717  * the same value.
3718  */
3719 void
3720 estimate_hash_bucket_stats(PlannerInfo *root, Node *hashkey, double nbuckets,
3721  Selectivity *mcv_freq,
3722  Selectivity *bucketsize_frac)
3723 {
3724  VariableStatData vardata;
3725  double estfract,
3726  ndistinct,
3727  stanullfrac,
3728  avgfreq;
3729  bool isdefault;
3730  AttStatsSlot sslot;
3731 
3732  examine_variable(root, hashkey, 0, &vardata);
3733 
3734  /* Look up the frequency of the most common value, if available */
3735  *mcv_freq = 0.0;
3736 
3737  if (HeapTupleIsValid(vardata.statsTuple))
3738  {
3739  if (get_attstatsslot(&sslot, vardata.statsTuple,
3740  STATISTIC_KIND_MCV, InvalidOid,
3742  {
3743  /*
3744  * The first MCV stat is for the most common value.
3745  */
3746  if (sslot.nnumbers > 0)
3747  *mcv_freq = sslot.numbers[0];
3748  free_attstatsslot(&sslot);
3749  }
3750  }
3751 
3752  /* Get number of distinct values */
3753  ndistinct = get_variable_numdistinct(&vardata, &isdefault);
3754 
3755  /*
3756  * If ndistinct isn't real, punt. We normally return 0.1, but if the
3757  * mcv_freq is known to be even higher than that, use it instead.
3758  */
3759  if (isdefault)
3760  {
3761  *bucketsize_frac = (Selectivity) Max(0.1, *mcv_freq);
3762  ReleaseVariableStats(vardata);
3763  return;
3764  }
3765 
3766  /* Get fraction that are null */
3767  if (HeapTupleIsValid(vardata.statsTuple))
3768  {
3769  Form_pg_statistic stats;
3770 
3771  stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
3772  stanullfrac = stats->stanullfrac;
3773  }
3774  else
3775  stanullfrac = 0.0;
3776 
3777  /* Compute avg freq of all distinct data values in raw relation */
3778  avgfreq = (1.0 - stanullfrac) / ndistinct;
3779 
3780  /*
3781  * Adjust ndistinct to account for restriction clauses. Observe we are
3782  * assuming that the data distribution is affected uniformly by the
3783  * restriction clauses!
3784  *
3785  * XXX Possibly better way, but much more expensive: multiply by
3786  * selectivity of rel's restriction clauses that mention the target Var.
3787  */
3788  if (vardata.rel && vardata.rel->tuples > 0)
3789  {
3790  ndistinct *= vardata.rel->rows / vardata.rel->tuples;
3791  ndistinct = clamp_row_est(ndistinct);
3792  }
3793 
3794  /*
3795  * Initial estimate of bucketsize fraction is 1/nbuckets as long as the
3796  * number of buckets is less than the expected number of distinct values;
3797  * otherwise it is 1/ndistinct.
3798  */
3799  if (ndistinct > nbuckets)
3800  estfract = 1.0 / nbuckets;
3801  else
3802  estfract = 1.0 / ndistinct;
3803 
3804  /*
3805  * Adjust estimated bucketsize upward to account for skewed distribution.
3806  */
3807  if (avgfreq > 0.0 && *mcv_freq > avgfreq)
3808  estfract *= *mcv_freq / avgfreq;
3809 
3810  /*
3811  * Clamp bucketsize to sane range (the above adjustment could easily
3812  * produce an out-of-range result). We set the lower bound a little above
3813  * zero, since zero isn't a very sane result.
3814  */
3815  if (estfract < 1.0e-6)
3816  estfract = 1.0e-6;
3817  else if (estfract > 1.0)
3818  estfract = 1.0;
3819 
3820  *bucketsize_frac = (Selectivity) estfract;
3821 
3822  ReleaseVariableStats(vardata);
3823 }
3824 
3825 /*
3826  * estimate_hashagg_tablesize
3827  * estimate the number of bytes that a hash aggregate hashtable will
3828  * require based on the agg_costs, path width and number of groups.
3829  *
3830  * We return the result as "double" to forestall any possible overflow
3831  * problem in the multiplication by dNumGroups.
3832  *
3833  * XXX this may be over-estimating the size now that hashagg knows to omit
3834  * unneeded columns from the hashtable. Also for mixed-mode grouping sets,
3835  * grouping columns not in the hashed set are counted here even though hashagg
3836  * won't store them. Is this a problem?
3837  */
3838 double
3840  double dNumGroups)
3841 {
3842  Size hashentrysize = hash_agg_entry_size(agg_costs->numAggs,
3843  path->pathtarget->width,
3844  agg_costs->transitionSpace);
3845 
3846  /*
3847  * Note that this disregards the effect of fill-factor and growth policy
3848  * of the hash table. That's probably ok, given that the default
3849  * fill-factor is relatively high. It'd be hard to meaningfully factor in
3850  * "double-in-size" growth policies here.
3851  */
3852  return hashentrysize * dNumGroups;
3853 }
3854 
3855 
3856 /*-------------------------------------------------------------------------
3857  *
3858  * Support routines
3859  *
3860  *-------------------------------------------------------------------------
3861  */
3862 
3863 /*
3864  * Find applicable ndistinct statistics for the given list of VarInfos (which
3865  * must all belong to the given rel), and update *ndistinct to the estimate of
3866  * the MVNDistinctItem that best matches. If a match it found, *varinfos is
3867  * updated to remove the list of matched varinfos.
3868  *
3869  * Varinfos that aren't for simple Vars are ignored.
3870  *
3871  * Return true if we're able to find a match, false otherwise.
3872  */
3873 static bool
3875  List **varinfos, double *ndistinct)
3876 {
3877  ListCell *lc;
3878  Bitmapset *attnums = NULL;
3879  int nmatches;
3880  Oid statOid = InvalidOid;
3881  MVNDistinct *stats;
3882  Bitmapset *matched = NULL;
3883 
3884  /* bail out immediately if the table has no extended statistics */
3885  if (!rel->statlist)
3886  return false;
3887 
3888  /* Determine the attnums we're looking for */
3889  foreach(lc, *varinfos)
3890  {
3891  GroupVarInfo *varinfo = (GroupVarInfo *) lfirst(lc);
3893 
3894  Assert(varinfo->rel == rel);
3895 
3896  if (!IsA(varinfo->var, Var))
3897  continue;
3898 
3899  attnum = ((Var *) varinfo->var)->varattno;
3900 
3901  if (!AttrNumberIsForUserDefinedAttr(attnum))
3902  continue;
3903 
3904  attnums = bms_add_member(attnums, attnum);
3905  }
3906 
3907  /* look for the ndistinct statistics matching the most vars */
3908  nmatches = 1; /* we require at least two matches */
3909  foreach(lc, rel->statlist)
3910  {
3911  StatisticExtInfo *info = (StatisticExtInfo *) lfirst(lc);
3912  Bitmapset *shared;
3913  int nshared;
3914 
3915  /* skip statistics of other kinds */
3916  if (info->kind != STATS_EXT_NDISTINCT)
3917  continue;
3918 
3919  /* compute attnums shared by the vars and the statistics object */
3920  shared = bms_intersect(info->keys, attnums);
3921  nshared = bms_num_members(shared);
3922 
3923  /*
3924  * Does this statistics object match more columns than the currently
3925  * best object? If so, use this one instead.
3926  *
3927  * XXX This should break ties using name of the object, or something
3928  * like that, to make the outcome stable.
3929  */
3930  if (nshared > nmatches)
3931  {
3932  statOid = info->statOid;
3933  nmatches = nshared;
3934  matched = shared;
3935  }
3936  }
3937 
3938  /* No match? */
3939  if (statOid == InvalidOid)
3940  return false;
3941  Assert(nmatches > 1 && matched != NULL);
3942 
3943  stats = statext_ndistinct_load(statOid);
3944 
3945  /*
3946  * If we have a match, search it for the specific item that matches (there
3947  * must be one), and construct the output values.
3948  */
3949  if (stats)
3950  {
3951  int i;
3952  List *newlist = NIL;
3953  MVNDistinctItem *item = NULL;
3954 
3955  /* Find the specific item that exactly matches the combination */
3956  for (i = 0; i < stats->nitems; i++)
3957  {
3958  MVNDistinctItem *tmpitem = &stats->items[i];
3959 
3960  if (bms_subset_compare(tmpitem->attrs, matched) == BMS_EQUAL)
3961  {
3962  item = tmpitem;
3963  break;
3964  }
3965  }
3966 
3967  /* make sure we found an item */
3968  if (!item)
3969  elog(ERROR, "corrupt MVNDistinct entry");
3970 
3971  /* Form the output varinfo list, keeping only unmatched ones */
3972  foreach(lc, *varinfos)
3973  {
3974  GroupVarInfo *varinfo = (GroupVarInfo *) lfirst(lc);
3976 
3977  if (!IsA(varinfo->var, Var))
3978  {
3979  newlist = lappend(newlist, varinfo);
3980  continue;
3981  }
3982 
3983  attnum = ((Var *) varinfo->var)->varattno;
3984 
3985  if (!AttrNumberIsForUserDefinedAttr(attnum))
3986  continue;
3987 
3988  if (!bms_is_member(attnum, matched))
3989  newlist = lappend(newlist, varinfo);
3990  }
3991 
3992  *varinfos = newlist;
3993  *ndistinct = item->ndistinct;
3994  return true;
3995  }
3996 
3997  return false;
3998 }
3999 
4000 /*
4001  * convert_to_scalar
4002  * Convert non-NULL values of the indicated types to the comparison
4003  * scale needed by scalarineqsel().
4004  * Returns "true" if successful.
4005  *
4006  * XXX this routine is a hack: ideally we should look up the conversion
4007  * subroutines in pg_type.
4008  *
4009  * All numeric datatypes are simply converted to their equivalent
4010  * "double" values. (NUMERIC values that are outside the range of "double"
4011  * are clamped to +/- HUGE_VAL.)
4012  *
4013  * String datatypes are converted by convert_string_to_scalar(),
4014  * which is explained below. The reason why this routine deals with
4015  * three values at a time, not just one, is that we need it for strings.
4016  *
4017  * The bytea datatype is just enough different from strings that it has
4018  * to be treated separately.
4019  *
4020  * The several datatypes representing absolute times are all converted
4021  * to Timestamp, which is actually an int64, and then we promote that to
4022  * a double. Note this will give correct results even for the "special"
4023  * values of Timestamp, since those are chosen to compare correctly;
4024  * see timestamp_cmp.
4025  *
4026  * The several datatypes representing relative times (intervals) are all
4027  * converted to measurements expressed in seconds.
4028  */
4029 static bool
4030 convert_to_scalar(Datum value, Oid valuetypid, Oid collid, double *scaledvalue,
4031  Datum lobound, Datum hibound, Oid boundstypid,
4032  double *scaledlobound, double *scaledhibound)
4033 {
4034  bool failure = false;
4035 
4036  /*
4037  * Both the valuetypid and the boundstypid should exactly match the
4038  * declared input type(s) of the operator we are invoked for. However,
4039  * extensions might try to use scalarineqsel as estimator for operators
4040  * with input type(s) we don't handle here; in such cases, we want to
4041  * return false, not fail. In any case, we mustn't assume that valuetypid
4042  * and boundstypid are identical.
4043  *
4044  * XXX The histogram we are interpolating between points of could belong
4045  * to a column that's only binary-compatible with the declared type. In
4046  * essence we are assuming that the semantics of binary-compatible types
4047  * are enough alike that we can use a histogram generated with one type's
4048  * operators to estimate selectivity for the other's. This is outright
4049  * wrong in some cases --- in particular signed versus unsigned
4050  * interpretation could trip us up. But it's useful enough in the
4051  * majority of cases that we do it anyway. Should think about more
4052  * rigorous ways to do it.
4053  */
4054  switch (valuetypid)
4055  {
4056  /*
4057  * Built-in numeric types
4058  */
4059  case BOOLOID:
4060  case INT2OID:
4061  case INT4OID:
4062  case INT8OID:
4063  case FLOAT4OID:
4064  case FLOAT8OID:
4065  case NUMERICOID:
4066  case OIDOID:
4067  case REGPROCOID:
4068  case REGPROCEDUREOID:
4069  case REGOPEROID:
4070  case REGOPERATOROID:
4071  case REGCLASSOID:
4072  case REGTYPEOID:
4073  case REGCONFIGOID:
4074  case REGDICTIONARYOID:
4075  case REGROLEOID:
4076  case REGNAMESPACEOID:
4077  *scaledvalue = convert_numeric_to_scalar(value, valuetypid,
4078  &failure);
4079  *scaledlobound = convert_numeric_to_scalar(lobound, boundstypid,
4080  &failure);
4081  *scaledhibound = convert_numeric_to_scalar(hibound, boundstypid,
4082  &failure);
4083  return !failure;
4084 
4085  /*
4086  * Built-in string types
4087  */
4088  case CHAROID:
4089  case BPCHAROID:
4090  case VARCHAROID:
4091  case TEXTOID:
4092  case NAMEOID:
4093  {
4094  char *valstr = convert_string_datum(value, valuetypid,
4095  collid, &failure);
4096  char *lostr = convert_string_datum(lobound, boundstypid,
4097  collid, &failure);
4098  char *histr = convert_string_datum(hibound, boundstypid,
4099  collid, &failure);
4100 
4101  /*
4102  * Bail out if any of the values is not of string type. We
4103  * might leak converted strings for the other value(s), but
4104  * that's not worth troubling over.
4105  */
4106  if (failure)
4107  return false;
4108 
4109  convert_string_to_scalar(valstr, scaledvalue,
4110  lostr, scaledlobound,
4111  histr, scaledhibound);
4112  pfree(valstr);
4113  pfree(lostr);
4114  pfree(histr);
4115  return true;
4116  }
4117 
4118  /*
4119  * Built-in bytea type
4120  */
4121  case BYTEAOID:
4122  {
4123  /* We only support bytea vs bytea comparison */
4124  if (boundstypid != BYTEAOID)
4125  return false;
4126  convert_bytea_to_scalar(value, scaledvalue,
4127  lobound, scaledlobound,
4128  hibound, scaledhibound);
4129  return true;
4130  }
4131 
4132  /*
4133  * Built-in time types
4134  */
4135  case TIMESTAMPOID:
4136  case TIMESTAMPTZOID:
4137  case DATEOID:
4138  case INTERVALOID:
4139  case TIMEOID:
4140  case TIMETZOID:
4141  *scaledvalue = convert_timevalue_to_scalar(value, valuetypid,
4142  &failure);
4143  *scaledlobound = convert_timevalue_to_scalar(lobound, boundstypid,
4144  &failure);
4145  *scaledhibound = convert_timevalue_to_scalar(hibound, boundstypid,
4146  &failure);
4147  return !failure;
4148 
4149  /*
4150  * Built-in network types
4151  */
4152  case INETOID:
4153  case CIDROID:
4154  case MACADDROID:
4155  case MACADDR8OID:
4156  *scaledvalue = convert_network_to_scalar(value, valuetypid,
4157  &failure);
4158  *scaledlobound = convert_network_to_scalar(lobound, boundstypid,
4159  &failure);
4160  *scaledhibound = convert_network_to_scalar(hibound, boundstypid,
4161  &failure);
4162  return !failure;
4163  }
4164  /* Don't know how to convert */
4165  *scaledvalue = *scaledlobound = *scaledhibound = 0;
4166  return false;
4167 }
4168 
4169 /*
4170  * Do convert_to_scalar()'s work for any numeric data type.
4171  *
4172  * On failure (e.g., unsupported typid), set *failure to true;
4173  * otherwise, that variable is not changed.
4174  */
4175 static double
4177 {
4178  switch (typid)
4179  {
4180  case BOOLOID:
4181  return (double) DatumGetBool(value);
4182  case INT2OID:
4183  return (double) DatumGetInt16(value);
4184  case INT4OID:
4185  return (double) DatumGetInt32(value);
4186  case INT8OID:
4187  return (double) DatumGetInt64(value);
4188  case FLOAT4OID:
4189  return (double) DatumGetFloat4(value);
4190  case FLOAT8OID:
4191  return (double) DatumGetFloat8(value);
4192  case NUMERICOID:
4193  /* Note: out-of-range values will be clamped to +-HUGE_VAL */
4194  return (double)
4196  value));
4197  case OIDOID:
4198  case REGPROCOID:
4199  case REGPROCEDUREOID:
4200  case REGOPEROID:
4201  case REGOPERATOROID:
4202  case REGCLASSOID:
4203  case REGTYPEOID:
4204  case REGCONFIGOID:
4205  case REGDICTIONARYOID:
4206  case REGROLEOID:
4207  case REGNAMESPACEOID:
4208  /* we can treat OIDs as integers... */
4209  return (double) DatumGetObjectId(value);
4210  }
4211 
4212  *failure = true;
4213  return 0;
4214 }
4215 
4216 /*
4217  * Do convert_to_scalar()'s work for any character-string data type.
4218  *
4219  * String datatypes are converted to a scale that ranges from 0 to 1,
4220  * where we visualize the bytes of the string as fractional digits.
4221  *
4222  * We do not want the base to be 256, however, since that tends to
4223  * generate inflated selectivity estimates; few databases will have
4224  * occurrences of all 256 possible byte values at each position.
4225  * Instead, use the smallest and largest byte values seen in the bounds
4226  * as the estimated range for each byte, after some fudging to deal with
4227  * the fact that we probably aren't going to see the full range that way.
4228  *
4229  * An additional refinement is that we discard any common prefix of the
4230  * three strings before computing the scaled values. This allows us to
4231  * "zoom in" when we encounter a narrow data range. An example is a phone
4232  * number database where all the values begin with the same area code.
4233  * (Actually, the bounds will be adjacent histogram-bin-boundary values,
4234  * so this is more likely to happen than you might think.)
4235  */
4236 static void
4238  double *scaledvalue,
4239  char *lobound,
4240  double *scaledlobound,
4241  char *hibound,
4242  double *scaledhibound)
4243 {
4244  int rangelo,
4245  rangehi;
4246  char *sptr;
4247 
4248  rangelo = rangehi = (unsigned char) hibound[0];
4249  for (sptr = lobound; *sptr; sptr++)
4250  {
4251  if (rangelo > (unsigned char) *sptr)
4252  rangelo = (unsigned char) *sptr;
4253  if (rangehi < (unsigned char) *sptr)
4254  rangehi = (unsigned char) *sptr;
4255  }
4256  for (sptr = hibound; *sptr; sptr++)
4257  {
4258  if (rangelo > (unsigned char) *sptr)
4259  rangelo = (unsigned char) *sptr;
4260  if (rangehi < (unsigned char) *sptr)
4261  rangehi = (unsigned char) *sptr;
4262  }
4263  /* If range includes any upper-case ASCII chars, make it include all */
4264  if (rangelo <= 'Z' && rangehi >= 'A')
4265  {
4266  if (rangelo > 'A')
4267  rangelo = 'A';
4268  if (rangehi < 'Z')
4269  rangehi = 'Z';
4270  }
4271  /* Ditto lower-case */
4272  if (rangelo <= 'z' && rangehi >= 'a')
4273  {
4274  if (rangelo > 'a')
4275  rangelo = 'a';
4276  if (rangehi < 'z')
4277  rangehi = 'z';
4278  }
4279  /* Ditto digits */
4280  if (rangelo <= '9' && rangehi >= '0')
4281  {
4282  if (rangelo > '0')
4283  rangelo = '0';
4284  if (rangehi < '9')
4285  rangehi = '9';
4286  }
4287 
4288  /*
4289  * If range includes less than 10 chars, assume we have not got enough
4290  * data, and make it include regular ASCII set.
4291  */
4292  if (rangehi - rangelo < 9)
4293  {
4294  rangelo = ' ';
4295  rangehi = 127;
4296  }
4297 
4298  /*
4299  * Now strip any common prefix of the three strings.
4300  */
4301  while (*lobound)
4302  {
4303  if (*lobound != *hibound || *lobound != *value)
4304  break;
4305  lobound++, hibound++, value++;
4306  }
4307 
4308  /*
4309  * Now we can do the conversions.
4310  */
4311  *scaledvalue = convert_one_string_to_scalar(value, rangelo, rangehi);
4312  *scaledlobound = convert_one_string_to_scalar(lobound, rangelo, rangehi);
4313  *scaledhibound = convert_one_string_to_scalar(hibound, rangelo, rangehi);
4314 }
4315 
4316 static double
4317 convert_one_string_to_scalar(char *value, int rangelo, int rangehi)
4318 {
4319  int slen = strlen(value);
4320  double num,
4321  denom,
4322  base;
4323 
4324  if (slen <= 0)
4325  return 0.0; /* empty string has scalar value 0 */
4326 
4327  /*
4328  * There seems little point in considering more than a dozen bytes from
4329  * the string. Since base is at least 10, that will give us nominal
4330  * resolution of at least 12 decimal digits, which is surely far more
4331  * precision than this estimation technique has got anyway (especially in
4332  * non-C locales). Also, even with the maximum possible base of 256, this
4333  * ensures denom cannot grow larger than 256^13 = 2.03e31, which will not
4334  * overflow on any known machine.
4335  */
4336  if (slen > 12)
4337  slen = 12;
4338 
4339  /* Convert initial characters to fraction */
4340  base = rangehi - rangelo + 1;
4341  num = 0.0;
4342  denom = base;
4343  while (slen-- > 0)
4344  {
4345  int ch = (unsigned char) *value++;
4346 
4347  if (ch < rangelo)
4348  ch = rangelo - 1;
4349  else if (ch > rangehi)
4350  ch = rangehi + 1;
4351  num += ((double) (ch - rangelo)) / denom;
4352  denom *= base;
4353  }
4354 
4355  return num;
4356 }
4357 
4358 /*
4359  * Convert a string-type Datum into a palloc'd, null-terminated string.
4360  *
4361  * On failure (e.g., unsupported typid), set *failure to true;
4362  * otherwise, that variable is not changed. (We'll return NULL on failure.)
4363  *
4364  * When using a non-C locale, we must pass the string through strxfrm()
4365  * before continuing, so as to generate correct locale-specific results.
4366  */
4367 static char *
4368 convert_string_datum(Datum value, Oid typid, Oid collid, bool *failure)
4369 {
4370  char *val;
4371 
4372  switch (typid)
4373  {
4374  case CHAROID:
4375  val = (char *) palloc(2);
4376  val[0] = DatumGetChar(value);
4377  val[1] = '\0';
4378  break;
4379  case BPCHAROID:
4380  case VARCHAROID:
4381  case TEXTOID:
4382  val = TextDatumGetCString(value);
4383  break;
4384  case NAMEOID:
4385  {
4386  NameData *nm = (NameData *) DatumGetPointer(value);
4387 
4388  val = pstrdup(NameStr(*nm));
4389  break;
4390  }
4391  default:
4392  *failure = true;
4393  return NULL;
4394  }
4395 
4396  if (!lc_collate_is_c(collid))
4397  {
4398  char *xfrmstr;
4399  size_t xfrmlen;
4400  size_t xfrmlen2 PG_USED_FOR_ASSERTS_ONLY;
4401 
4402  /*
4403  * XXX: We could guess at a suitable output buffer size and only call
4404  * strxfrm twice if our guess is too small.
4405  *
4406  * XXX: strxfrm doesn't support UTF-8 encoding on Win32, it can return
4407  * bogus data or set an error. This is not really a problem unless it
4408  * crashes since it will only give an estimation error and nothing
4409  * fatal.
4410  */
4411  xfrmlen = strxfrm(NULL, val, 0);
4412 #ifdef WIN32
4413 
4414  /*
4415  * On Windows, strxfrm returns INT_MAX when an error occurs. Instead
4416  * of trying to allocate this much memory (and fail), just return the
4417  * original string unmodified as if we were in the C locale.
4418  */
4419  if (xfrmlen == INT_MAX)
4420  return val;
4421 #endif
4422  xfrmstr = (char *) palloc(xfrmlen + 1);
4423  xfrmlen2 = strxfrm(xfrmstr, val, xfrmlen + 1);
4424 
4425  /*
4426  * Some systems (e.g., glibc) can return a smaller value from the
4427  * second call than the first; thus the Assert must be <= not ==.
4428  */
4429  Assert(xfrmlen2 <= xfrmlen);
4430  pfree(val);
4431  val = xfrmstr;
4432  }
4433 
4434  return val;
4435 }
4436 
4437 /*
4438  * Do convert_to_scalar()'s work for any bytea data type.
4439  *
4440  * Very similar to convert_string_to_scalar except we can't assume
4441  * null-termination and therefore pass explicit lengths around.
4442  *
4443  * Also, assumptions about likely "normal" ranges of characters have been
4444  * removed - a data range of 0..255 is always used, for now. (Perhaps
4445  * someday we will add information about actual byte data range to
4446  * pg_statistic.)
4447  */
4448 static void
4450  double *scaledvalue,
4451  Datum lobound,
4452  double *scaledlobound,
4453  Datum hibound,
4454  double *scaledhibound)
4455 {
4456  bytea *valuep = DatumGetByteaPP(value);
4457  bytea *loboundp = DatumGetByteaPP(lobound);
4458  bytea *hiboundp = DatumGetByteaPP(hibound);
4459  int rangelo,
4460  rangehi,
4461  valuelen = VARSIZE_ANY_EXHDR(valuep),
4462  loboundlen = VARSIZE_ANY_EXHDR(loboundp),
4463  hiboundlen = VARSIZE_ANY_EXHDR(hiboundp),
4464  i,
4465  minlen;
4466  unsigned char *valstr = (unsigned char *) VARDATA_ANY(valuep);
4467  unsigned char *lostr = (unsigned char *) VARDATA_ANY(loboundp);
4468  unsigned char *histr = (unsigned char *) VARDATA_ANY(hiboundp);
4469 
4470  /*
4471  * Assume bytea data is uniformly distributed across all byte values.
4472  */
4473  rangelo = 0;
4474  rangehi = 255;
4475 
4476  /*
4477  * Now strip any common prefix of the three strings.
4478  */
4479  minlen = Min(Min(valuelen, loboundlen), hiboundlen);
4480  for (i = 0; i < minlen; i++)
4481  {
4482  if (*lostr != *histr || *lostr != *valstr)
4483  break;
4484  lostr++, histr++, valstr++;
4485  loboundlen--, hiboundlen--, valuelen--;
4486  }
4487 
4488  /*
4489  * Now we can do the conversions.
4490  */
4491  *scaledvalue = convert_one_bytea_to_scalar(valstr, valuelen, rangelo, rangehi);
4492  *scaledlobound = convert_one_bytea_to_scalar(lostr, loboundlen, rangelo, rangehi);
4493  *scaledhibound = convert_one_bytea_to_scalar(histr, hiboundlen, rangelo, rangehi);
4494 }
4495 
4496 static double
4497 convert_one_bytea_to_scalar(unsigned char *value, int valuelen,
4498  int rangelo, int rangehi)
4499 {
4500  double num,
4501  denom,
4502  base;
4503 
4504  if (valuelen <= 0)
4505  return 0.0; /* empty string has scalar value 0 */
4506 
4507  /*
4508  * Since base is 256, need not consider more than about 10 chars (even
4509  * this many seems like overkill)
4510  */
4511  if (valuelen > 10)
4512  valuelen = 10;
4513 
4514  /* Convert initial characters to fraction */
4515  base = rangehi - rangelo + 1;
4516  num = 0.0;
4517  denom = base;
4518  while (valuelen-- > 0)
4519  {
4520  int ch = *value++;
4521 
4522  if (ch < rangelo)
4523  ch = rangelo - 1;
4524  else if (ch > rangehi)
4525  ch = rangehi + 1;
4526  num += ((double) (ch - rangelo)) / denom;
4527  denom *= base;
4528  }
4529 
4530  return num;
4531 }
4532 
4533 /*
4534  * Do convert_to_scalar()'s work for any timevalue data type.
4535  *
4536  * On failure (e.g., unsupported typid), set *failure to true;
4537  * otherwise, that variable is not changed.
4538  */
4539 static double
4541 {
4542  switch (typid)
4543  {
4544  case TIMESTAMPOID:
4545  return DatumGetTimestamp(value);
4546  case TIMESTAMPTZOID:
4547  return DatumGetTimestampTz(value);
4548  case DATEOID:
4550  case INTERVALOID:
4551  {
4553 
4554  /*
4555  * Convert the month part of Interval to days using assumed
4556  * average month length of 365.25/12.0 days. Not too
4557  * accurate, but plenty good enough for our purposes.
4558  */
4559  return interval->time + interval->day * (double) USECS_PER_DAY +
4560  interval->month * ((DAYS_PER_YEAR / (double) MONTHS_PER_YEAR) * USECS_PER_DAY);
4561  }
4562  case TIMEOID:
4563  return DatumGetTimeADT(value);
4564  case TIMETZOID:
4565  {
4566  TimeTzADT *timetz = DatumGetTimeTzADTP(value);
4567 
4568  /* use GMT-equivalent time */
4569  return (double) (timetz->time + (timetz->zone * 1000000.0));
4570  }
4571  }
4572 
4573  *failure = true;
4574  return 0;
4575 }
4576 
4577 
4578 /*
4579  * get_restriction_variable
4580  * Examine the args of a restriction clause to see if it's of the
4581  * form (variable op pseudoconstant) or (pseudoconstant op variable),
4582  * where "variable" could be either a Var or an expression in vars of a
4583  * single relation. If so, extract information about the variable,
4584  * and also indicate which side it was on and the other argument.
4585  *
4586  * Inputs:
4587  * root: the planner info
4588  * args: clause argument list
4589  * varRelid: see specs for restriction selectivity functions
4590  *
4591  * Outputs: (these are valid only if true is returned)
4592  * *vardata: gets information about variable (see examine_variable)
4593  * *other: gets other clause argument, aggressively reduced to a constant
4594  * *varonleft: set true if variable is on the left, false if on the right
4595  *
4596  * Returns true if a variable is identified, otherwise false.
4597  *
4598  * Note: if there are Vars on both sides of the clause, we must fail, because
4599  * callers are expecting that the other side will act like a pseudoconstant.
4600  */
4601 bool
4603  VariableStatData *vardata, Node **other,
4604  bool *varonleft)
4605 {
4606  Node *left,
4607  *right;
4608  VariableStatData rdata;
4609 
4610  /* Fail if not a binary opclause (probably shouldn't happen) */
4611  if (list_length(args) != 2)
4612  return false;
4613 
4614  left = (Node *) linitial(args);
4615  right = (Node *) lsecond(args);
4616 
4617  /*
4618  * Examine both sides. Note that when varRelid is nonzero, Vars of other
4619  * relations will be treated as pseudoconstants.
4620  */
4621  examine_variable(root, left, varRelid, vardata);
4622  examine_variable(root, right, varRelid, &rdata);
4623 
4624  /*
4625  * If one side is a variable and the other not, we win.
4626  */
4627  if (vardata->rel && rdata.rel == NULL)
4628  {
4629  *varonleft = true;
4630  *other = estimate_expression_value(root, rdata.var);
4631  /* Assume we need no ReleaseVariableStats(rdata) here */
4632  return true;
4633  }
4634 
4635  if (vardata->rel == NULL && rdata.rel)
4636  {
4637  *varonleft = false;
4638  *other = estimate_expression_value(root, vardata->var);
4639  /* Assume we need no ReleaseVariableStats(*vardata) here */
4640  *vardata = rdata;
4641  return true;
4642  }
4643 
4644  /* Oops, clause has wrong structure (probably var op var) */
4645  ReleaseVariableStats(*vardata);
4646  ReleaseVariableStats(rdata);
4647 
4648  return false;
4649 }
4650 
4651 /*
4652  * get_join_variables
4653  * Apply examine_variable() to each side of a join clause.
4654  * Also, attempt to identify whether the join clause has the same
4655  * or reversed sense compared to the SpecialJoinInfo.
4656  *
4657  * We consider the join clause "normal" if it is "lhs_var OP rhs_var",
4658  * or "reversed" if it is "rhs_var OP lhs_var". In complicated cases
4659  * where we can't tell for sure, we default to assuming it's normal.
4660  */
4661 void
4663  VariableStatData *vardata1, VariableStatData *vardata2,
4664  bool *join_is_reversed)
4665 {
4666  Node *left,
4667  *right;
4668 
4669  if (list_length(args) != 2)
4670  elog(ERROR, "join operator should take two arguments");
4671 
4672  left = (Node *) linitial(args);
4673  right = (Node *) lsecond(args);
4674 
4675  examine_variable(root, left, 0, vardata1);
4676  examine_variable(root, right, 0, vardata2);
4677 
4678  if (vardata1->rel &&
4679  bms_is_subset(vardata1->rel->relids, sjinfo->syn_righthand))
4680  *join_is_reversed = true; /* var1 is on RHS */
4681  else if (vardata2->rel &&
4682  bms_is_subset(vardata2->rel->relids, sjinfo->syn_lefthand))
4683  *join_is_reversed = true; /* var2 is on LHS */
4684  else
4685  *join_is_reversed = false;
4686 }
4687 
4688 /*
4689  * examine_variable
4690  * Try to look up statistical data about an expression.
4691  * Fill in a VariableStatData struct to describe the expression.
4692  *
4693  * Inputs:
4694  * root: the planner info
4695  * node: the expression tree to examine
4696  * varRelid: see specs for restriction selectivity functions
4697  *
4698  * Outputs: *vardata is filled as follows:
4699  * var: the input expression (with any binary relabeling stripped, if
4700  * it is or contains a variable; but otherwise the type is preserved)
4701  * rel: RelOptInfo for relation containing variable; NULL if expression
4702  * contains no Vars (NOTE this could point to a RelOptInfo of a
4703  * subquery, not one in the current query).
4704  * statsTuple: the pg_statistic entry for the variable, if one exists;
4705  * otherwise NULL.
4706  * freefunc: pointer to a function to release statsTuple with.
4707  * vartype: exposed type of the expression; this should always match
4708  * the declared input type of the operator we are estimating for.
4709  * atttype, atttypmod: actual type/typmod of the "var" expression. This is
4710  * commonly the same as the exposed type of the variable argument,
4711  * but can be different in binary-compatible-type cases.
4712  * isunique: true if we were able to match the var to a unique index or a
4713  * single-column DISTINCT clause, implying its values are unique for
4714  * this query. (Caution: this should be trusted for statistical
4715  * purposes only, since we do not check indimmediate nor verify that
4716  * the exact same definition of equality applies.)
4717  * acl_ok: true if current user has permission to read the column(s)
4718  * underlying the pg_statistic entry. This is consulted by
4719  * statistic_proc_security_check().
4720  *
4721  * Caller is responsible for doing ReleaseVariableStats() before exiting.
4722  */
4723 void
4724 examine_variable(PlannerInfo *root, Node *node, int varRelid,
4725  VariableStatData *vardata)
4726 {
4727  Node *basenode;
4728  Relids varnos;
4729  RelOptInfo *onerel;
4730 
4731  /* Make sure we don't return dangling pointers in vardata */
4732  MemSet(vardata, 0, sizeof(VariableStatData));
4733 
4734  /* Save the exposed type of the expression */
4735  vardata->vartype = exprType(node);
4736 
4737  /* Look inside any binary-compatible relabeling */
4738 
4739  if (IsA(node, RelabelType))
4740  basenode = (Node *) ((RelabelType *) node)->arg;
4741  else
4742  basenode = node;
4743 
4744  /* Fast path for a simple Var */
4745 
4746  if (IsA(basenode, Var) &&
4747  (varRelid == 0 || varRelid == ((Var *) basenode)->varno))
4748  {
4749  Var *var = (Var *) basenode;
4750 
4751  /* Set up result fields other than the stats tuple */
4752  vardata->var = basenode; /* return Var without relabeling */
4753  vardata->rel = find_base_rel(root, var->varno);
4754  vardata->atttype = var->vartype;
4755  vardata->atttypmod = var->vartypmod;
4756  vardata->isunique = has_unique_index(vardata->rel, var->varattno);
4757 
4758  /* Try to locate some stats */
4759  examine_simple_variable(root, var, vardata);
4760 
4761  return;
4762  }
4763 
4764  /*
4765  * Okay, it's a more complicated expression. Determine variable
4766  * membership. Note that when varRelid isn't zero, only vars of that
4767  * relation are considered "real" vars.
4768  */
4769  varnos = pull_varnos(basenode);
4770 
4771  onerel = NULL;
4772 
4773  switch (bms_membership(varnos))
4774  {
4775  case BMS_EMPTY_SET:
4776  /* No Vars at all ... must be pseudo-constant clause */
4777  break;
4778  case BMS_SINGLETON:
4779  if (varRelid == 0 || bms_is_member(varRelid, varnos))
4780  {
4781  onerel = find_base_rel(root,
4782  (varRelid ? varRelid : bms_singleton_member(varnos)));
4783  vardata->rel = onerel;
4784  node = basenode; /* strip any relabeling */
4785  }
4786  /* else treat it as a constant */
4787  break;
4788  case BMS_MULTIPLE:
4789  if (varRelid == 0)
4790  {
4791  /* treat it as a variable of a join relation */
4792  vardata->rel = find_join_rel(root, varnos);
4793  node = basenode; /* strip any relabeling */
4794  }
4795  else if (bms_is_member(varRelid, varnos))
4796  {
4797  /* ignore the vars belonging to other relations */
4798  vardata->rel = find_base_rel(root, varRelid);
4799  node = basenode; /* strip any relabeling */
4800  /* note: no point in expressional-index search here */
4801  }
4802  /* else treat it as a constant */
4803  break;
4804  }
4805 
4806  bms_free(varnos);
4807 
4808  vardata->var = node;
4809  vardata->atttype = exprType(node);
4810  vardata->atttypmod = exprTypmod(node);
4811 
4812  if (onerel)
4813  {
4814  /*
4815  * We have an expression in vars of a single relation. Try to match
4816  * it to expressional index columns, in hopes of finding some
4817  * statistics.
4818  *
4819  * Note that we consider all index columns including INCLUDE columns,
4820  * since there could be stats for such columns. But the test for
4821  * uniqueness needs to be warier.
4822  *
4823  * XXX it's conceivable that there are multiple matches with different
4824  * index opfamilies; if so, we need to pick one that matches the
4825  * operator we are estimating for. FIXME later.
4826  */
4827  ListCell *ilist;
4828 
4829  foreach(ilist, onerel->indexlist)
4830  {
4831  IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
4832  ListCell *indexpr_item;
4833  int pos;
4834 
4835  indexpr_item = list_head(index->indexprs);
4836  if (indexpr_item == NULL)
4837  continue; /* no expressions here... */
4838 
4839  for (pos = 0; pos < index->ncolumns; pos++)
4840  {
4841  if (index->indexkeys[pos] == 0)
4842  {
4843  Node *indexkey;
4844 
4845  if (indexpr_item == NULL)
4846  elog(ERROR, "too few entries in indexprs list");
4847  indexkey = (Node *) lfirst(indexpr_item);
4848  if (indexkey && IsA(indexkey, RelabelType))
4849  indexkey = (Node *) ((RelabelType *) indexkey)->arg;
4850  if (equal(node, indexkey))
4851  {
4852  /*
4853  * Found a match ... is it a unique index? Tests here
4854  * should match has_unique_index().
4855  */
4856  if (index->unique &&
4857  index->nkeycolumns == 1 &&
4858  pos == 0 &&
4859  (index->indpred == NIL || index->predOK))
4860  vardata->isunique = true;
4861 
4862  /*
4863  * Has it got stats? We only consider stats for
4864  * non-partial indexes, since partial indexes probably
4865  * don't reflect whole-relation statistics; the above
4866  * check for uniqueness is the only info we take from
4867  * a partial index.
4868  *
4869  * An index stats hook, however, must make its own
4870  * decisions about what to do with partial indexes.
4871  */
4872  if (get_index_stats_hook &&
4873  (*get_index_stats_hook) (root, index->indexoid,
4874  pos + 1, vardata))
4875  {
4876  /*
4877  * The hook took control of acquiring a stats
4878  * tuple. If it did supply a tuple, it'd better
4879  * have supplied a freefunc.
4880  */
4881  if (HeapTupleIsValid(vardata->statsTuple) &&
4882  !vardata->freefunc)
4883  elog(ERROR, "no function provided to release variable stats with");
4884  }
4885  else if (index->indpred == NIL)
4886  {
4887  vardata->statsTuple =
4889  ObjectIdGetDatum(index->indexoid),
4890  Int16GetDatum(pos + 1),
4891  BoolGetDatum(false));
4892  vardata->freefunc = ReleaseSysCache;
4893 
4894  if (HeapTupleIsValid(vardata->statsTuple))
4895  {
4896  /* Get index's table for permission check */
4897  RangeTblEntry *rte;
4898  Oid userid;
4899 
4900  rte = planner_rt_fetch(index->rel->relid, root);
4901  Assert(rte->rtekind == RTE_RELATION);
4902 
4903  /*
4904  * Use checkAsUser if it's set, in case we're
4905  * accessing the table via a view.
4906  */
4907  userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
4908 
4909  /*
4910  * For simplicity, we insist on the whole
4911  * table being selectable, rather than trying
4912  * to identify which column(s) the index
4913  * depends on. Also require all rows to be
4914  * selectable --- there must be no
4915  * securityQuals from security barrier views
4916  * or RLS policies.
4917  */
4918  vardata->acl_ok =
4919  rte->securityQuals == NIL &&
4920  (pg_class_aclcheck(rte->relid, userid,
4921  ACL_SELECT) == ACLCHECK_OK);
4922 
4923  /*
4924  * If the user doesn't have permissions to
4925  * access an inheritance child relation, check
4926  * the permissions of the table actually
4927  * mentioned in the query, since most likely
4928  * the user does have that permission. Note
4929  * that whole-table select privilege on the
4930  * parent doesn't quite guarantee that the
4931  * user could read all columns of the child.
4932  * But in practice it's unlikely that any
4933  * interesting security violation could result
4934  * from allowing access to the expression
4935  * index's stats, so we allow it anyway. See
4936  * similar code in examine_simple_variable()
4937  * for additional comments.
4938  */
4939  if (!vardata->acl_ok &&
4940  root->append_rel_array != NULL)
4941  {
4942  AppendRelInfo *appinfo;
4943  Index varno = index->rel->relid;
4944 
4945  appinfo = root->append_rel_array[varno];
4946  while (appinfo &&
4947  planner_rt_fetch(appinfo->parent_relid,
4948  root)->rtekind == RTE_RELATION)
4949  {
4950  varno = appinfo->parent_relid;
4951  appinfo = root->append_rel_array[varno];
4952  }
4953  if (varno != index->rel->relid)
4954  {
4955  /* Repeat access check on this rel */
4956  rte = planner_rt_fetch(varno, root);
4957  Assert(rte->rtekind == RTE_RELATION);
4958 
4959  userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
4960 
4961  vardata->acl_ok =
4962  rte->securityQuals == NIL &&
4963  (pg_class_aclcheck(rte->relid,
4964  userid,
4965  ACL_SELECT) == ACLCHECK_OK);
4966  }
4967  }
4968  }
4969  else
4970  {
4971  /* suppress leakproofness checks later */
4972  vardata->acl_ok = true;
4973  }
4974  }
4975  if (vardata->statsTuple)
4976  break;
4977  }
4978  indexpr_item = lnext(index->indexprs, indexpr_item);
4979  }
4980  }
4981  if (vardata->statsTuple)
4982  break;
4983  }
4984  }
4985 }
4986 
4987 /*
4988  * examine_simple_variable
4989  * Handle a simple Var for examine_variable
4990  *
4991  * This is split out as a subroutine so that we can recurse to deal with
4992  * Vars referencing subqueries.
4993  *
4994  * We already filled in all the fields of *vardata except for the stats tuple.
4995  */
4996 static void
4998  VariableStatData *vardata)
4999 {
5000  RangeTblEntry *rte = root->simple_rte_array[var->varno];
5001 
5002  Assert(IsA(rte, RangeTblEntry));
5003 
5005  (*get_relation_stats_hook) (root, rte, var->varattno, vardata))
5006  {
5007  /*
5008  * The hook took control of acquiring a stats tuple. If it did supply
5009  * a tuple, it'd better have supplied a freefunc.
5010  */
5011  if (HeapTupleIsValid(vardata->statsTuple) &&
5012  !vardata->freefunc)
5013  elog(ERROR, "no function provided to release variable stats with");
5014  }
5015  else if (rte->rtekind == RTE_RELATION)
5016  {
5017  /*
5018  * Plain table or parent of an inheritance appendrel, so look up the
5019  * column in pg_statistic
5020  */
5022  ObjectIdGetDatum(rte->relid),
5023  Int16GetDatum(var->varattno),
5024  BoolGetDatum(rte->inh));
5025  vardata->freefunc = ReleaseSysCache;
5026 
5027  if (HeapTupleIsValid(vardata->statsTuple))
5028  {
5029  Oid userid;
5030 
5031  /*
5032  * Check if user has permission to read this column. We require
5033  * all rows to be accessible, so there must be no securityQuals
5034  * from security barrier views or RLS policies. Use checkAsUser
5035  * if it's set, in case we're accessing the table via a view.
5036  */
5037  userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
5038 
5039  vardata->acl_ok =
5040  rte->securityQuals == NIL &&
5041  ((pg_class_aclcheck(rte->relid, userid,
5042  ACL_SELECT) == ACLCHECK_OK) ||
5043  (pg_attribute_aclcheck(rte->relid, var->varattno, userid,
5044  ACL_SELECT) == ACLCHECK_OK));
5045 
5046  /*
5047  * If the user doesn't have permissions to access an inheritance
5048  * child relation or specifically this attribute, check the
5049  * permissions of the table/column actually mentioned in the
5050  * query, since most likely the user does have that permission
5051  * (else the query will fail at runtime), and if the user can read
5052  * the column there then he can get the values of the child table
5053  * too. To do that, we must find out which of the root parent's
5054  * attributes the child relation's attribute corresponds to.
5055  */
5056  if (!vardata->acl_ok && var->varattno > 0 &&
5057  root->append_rel_array != NULL)
5058  {
5059  AppendRelInfo *appinfo;
5060  Index varno = var->varno;
5061  int varattno = var->varattno;
5062  bool found = false;
5063 
5064  appinfo = root->append_rel_array[varno];
5065 
5066  /*
5067  * Partitions are mapped to their immediate parent, not the
5068  * root parent, so must be ready to walk up multiple
5069  * AppendRelInfos. But stop if we hit a parent that is not
5070  * RTE_RELATION --- that's a flattened UNION ALL subquery, not
5071  * an inheritance parent.
5072  */
5073  while (appinfo &&
5074  planner_rt_fetch(appinfo->parent_relid,
5075  root)->rtekind == RTE_RELATION)
5076  {
5077  int parent_varattno;
5078 
5079  found = false;
5080  if (varattno <= 0 || varattno > appinfo->num_child_cols)
5081  break; /* safety check */
5082  parent_varattno = appinfo->parent_colnos[varattno - 1];
5083  if (parent_varattno == 0)
5084  break; /* Var is local to child */
5085 
5086  varno = appinfo->parent_relid;
5087  varattno = parent_varattno;
5088  found = true;
5089 
5090  /* If the parent is itself a child, continue up. */
5091  appinfo = root->append_rel_array[varno];
5092  }
5093 
5094  /*
5095  * In rare cases, the Var may be local to the child table, in
5096  * which case, we've got to live with having no access to this
5097  * column's stats.
5098  */
5099  if (!found)
5100  return;
5101 
5102  /* Repeat the access check on this parent rel & column */
5103  rte = planner_rt_fetch(varno, root);
5104  Assert(rte->rtekind == RTE_RELATION);
5105 
5106  userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
5107 
5108  vardata->acl_ok =
5109  rte->securityQuals == NIL &&
5110  ((pg_class_aclcheck(rte->relid, userid,
5111  ACL_SELECT) == ACLCHECK_OK) ||
5112  (pg_attribute_aclcheck(rte->relid, varattno, userid,
5113  ACL_SELECT) == ACLCHECK_OK));
5114  }
5115  }
5116  else
5117  {
5118  /* suppress any possible leakproofness checks later */
5119  vardata->acl_ok = true;
5120  }
5121  }
5122  else if (rte->rtekind == RTE_SUBQUERY && !rte->inh)
5123  {
5124  /*
5125  * Plain subquery (not one that was converted to an appendrel).
5126  */
5127  Query *subquery = rte->subquery;
5128  RelOptInfo *rel;
5129  TargetEntry *ste;
5130 
5131  /*
5132  * Punt if it's a whole-row var rather than a plain column reference.
5133  */
5134  if (var->varattno == InvalidAttrNumber)
5135  return;
5136 
5137  /*
5138  * Punt if subquery uses set operations or GROUP BY, as these will
5139  * mash underlying columns' stats beyond recognition. (Set ops are
5140  * particularly nasty; if we forged ahead, we would return stats
5141  * relevant to only the leftmost subselect...) DISTINCT is also
5142  * problematic, but we check that later because there is a possibility
5143  * of learning something even with it.
5144  */
5145  if (subquery->setOperations ||
5146  subquery->groupClause)
5147  return;
5148 
5149  /*
5150  * OK, fetch RelOptInfo for subquery. Note that we don't change the
5151  * rel returned in vardata, since caller expects it to be a rel of the
5152  * caller's query level. Because we might already be recursing, we
5153  * can't use that rel pointer either, but have to look up the Var's
5154  * rel afresh.
5155  */
5156  rel = find_base_rel(root, var->varno);
5157 
5158  /* If the subquery hasn't been planned yet, we have to punt */
5159  if (rel->subroot == NULL)
5160  return;
5161  Assert(IsA(rel->subroot, PlannerInfo));
5162 
5163  /*
5164  * Switch our attention to the subquery as mangled by the planner. It
5165  * was okay to look at the pre-planning version for the tests above,
5166  * but now we need a Var that will refer to the subroot's live
5167  * RelOptInfos. For instance, if any subquery pullup happened during
5168  * planning, Vars in the targetlist might have gotten replaced, and we
5169  * need to see the replacement expressions.
5170  */
5171  subquery = rel->subroot->parse;
5172  Assert(IsA(subquery, Query));
5173 
5174  /* Get the subquery output expression referenced by the upper Var */
5175  ste = get_tle_by_resno(subquery->targetList, var->varattno);
5176  if (ste == NULL || ste->resjunk)
5177  elog(ERROR, "subquery %s does not have attribute %d",
5178  rte->eref->aliasname, var->varattno);
5179  var = (Var *) ste->expr;
5180 
5181  /*
5182  * If subquery uses DISTINCT, we can't make use of any stats for the
5183  * variable ... but, if it's the only DISTINCT column, we are entitled
5184  * to consider it unique. We do the test this way so that it works
5185  * for cases involving DISTINCT ON.
5186  */
5187  if (subquery->distinctClause)
5188  {
5189  if (list_length(subquery->distinctClause) == 1 &&
5190  targetIsInSortList(ste, InvalidOid, subquery->distinctClause))
5191  vardata->isunique = true;
5192  /* cannot go further */
5193  return;
5194  }
5195 
5196  /*
5197  * If the sub-query originated from a view with the security_barrier
5198  * attribute, we must not look at the variable's statistics, though it
5199  * seems all right to notice the existence of a DISTINCT clause. So
5200  * stop here.
5201  *
5202  * This is probably a harsher restriction than necessary; it's
5203  * certainly OK for the selectivity estimator (which is a C function,
5204  * and therefore omnipotent anyway) to look at the statistics. But
5205  * many selectivity estimators will happily *invoke the operator
5206  * function* to try to work out a good estimate - and that's not OK.
5207  * So for now, don't dig down for stats.
5208  */
5209  if (rte->security_barrier)
5210  return;
5211 
5212  /* Can only handle a simple Var of subquery's query level */
5213  if (var && IsA(var, Var) &&
5214  var->varlevelsup == 0)
5215  {
5216  /*
5217  * OK, recurse into the subquery. Note that the original setting
5218  * of vardata->isunique (which will surely be false) is left
5219  * unchanged in this situation. That's what we want, since even
5220  * if the underlying column is unique, the subquery may have
5221  * joined to other tables in a way that creates duplicates.
5222  */
5223  examine_simple_variable(rel->subroot, var, vardata);
5224  }
5225  }
5226  else
5227  {
5228  /*
5229  * Otherwise, the Var comes from a FUNCTION, VALUES, or CTE RTE. (We
5230  * won't see RTE_JOIN here because join alias Vars have already been
5231  * flattened.) There's not much we can do with function outputs, but
5232  * maybe someday try to be smarter about VALUES and/or CTEs.
5233  */
5234  }
5235 }
5236 
5237 /*
5238  * Check whether it is permitted to call func_oid passing some of the
5239  * pg_statistic data in vardata. We allow this either if the user has SELECT
5240  * privileges on the table or column underlying the pg_statistic data or if
5241  * the function is marked leak-proof.
5242  */
5243 bool
5245 {
5246  if (vardata->acl_ok)
5247  return true;
5248 
5249  if (!OidIsValid(func_oid))
5250  return false;
5251 
5252  if (get_func_leakproof(func_oid))
5253  return true;
5254 
5255  ereport(DEBUG2,
5256  (errmsg_internal("not using statistics because function \"%s\" is not leak-proof",
5257  get_func_name(func_oid))));
5258  return false;
5259 }
5260 
5261 /*
5262  * get_variable_numdistinct
5263  * Estimate the number of distinct values of a variable.
5264  *
5265  * vardata: results of examine_variable
5266  * *isdefault: set to true if the result is a default rather than based on
5267  * anything meaningful.
5268  *
5269  * NB: be careful to produce a positive integral result, since callers may
5270  * compare the result to exact integer counts, or might divide by it.
5271  */
5272 double
5273 get_variable_numdistinct(VariableStatData *vardata, bool *isdefault)
5274 {
5275  double stadistinct;
5276  double stanullfrac = 0.0;
5277  double ntuples;
5278 
5279  *isdefault = false;
5280 
5281  /*
5282  * Determine the stadistinct value to use. There are cases where we can
5283  * get an estimate even without a pg_statistic entry, or can get a better
5284  * value than is in pg_statistic. Grab stanullfrac too if we can find it
5285  * (otherwise, assume no nulls, for lack of any better idea).
5286  */
5287  if (HeapTupleIsValid(vardata->statsTuple))
5288  {
5289  /* Use the pg_statistic entry */
5290  Form_pg_statistic stats;
5291 
5292  stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
5293  stadistinct = stats->stadistinct;
5294  stanullfrac = stats->stanullfrac;
5295  }
5296  else if (vardata->vartype == BOOLOID)
5297  {
5298  /*
5299  * Special-case boolean columns: presumably, two distinct values.
5300  *
5301  * Are there any other datatypes we should wire in special estimates
5302  * for?
5303  */
5304  stadistinct = 2.0;
5305  }
5306  else if (vardata->rel && vardata->rel->rtekind == RTE_VALUES)
5307  {
5308  /*
5309  * If the Var represents a column of a VALUES RTE, assume it's unique.
5310  * This could of course be very wrong, but it should tend to be true
5311  * in well-written queries. We could consider examining the VALUES'
5312  * contents to get some real statistics; but that only works if the
5313  * entries are all constants, and it would be pretty expensive anyway.
5314  */
5315  stadistinct = -1.0; /* unique (and all non null) */
5316  }
5317  else
5318  {
5319  /*
5320  * We don't keep statistics for system columns, but in some cases we
5321  * can infer distinctness anyway.
5322  */
5323  if (vardata->var && IsA(vardata->var, Var))
5324  {
5325  switch (((Var *) vardata->var)->varattno)
5326  {
5328  stadistinct = -1.0; /* unique (and all non null) */
5329  break;
5331  stadistinct = 1.0; /* only 1 value */
5332  break;
5333  default:
5334  stadistinct = 0.0; /* means "unknown" */
5335  break;
5336  }
5337  }
5338  else
5339  stadistinct = 0.0; /* means "unknown" */
5340 
5341  /*
5342  * XXX consider using estimate_num_groups on expressions?
5343  */
5344  }
5345 
5346  /*
5347  * If there is a unique index or DISTINCT clause for the variable, assume
5348  * it is unique no matter what pg_statistic says; the statistics could be
5349  * out of date, or we might have found a partial unique index that proves
5350  * the var is unique for this query. However, we'd better still believe
5351  * the null-fraction statistic.
5352  */
5353  if (vardata->isunique)
5354  stadistinct = -1.0 * (1.0 - stanullfrac);
5355 
5356  /*
5357  * If we had an absolute estimate, use that.
5358  */
5359  if (stadistinct > 0.0)
5360  return clamp_row_est(stadistinct);
5361 
5362  /*
5363  * Otherwise we need to get the relation size; punt if not available.
5364  */
5365  if (vardata->rel == NULL)
5366  {
5367  *isdefault = true;
5368  return DEFAULT_NUM_DISTINCT;
5369  }
5370  ntuples = vardata->rel->tuples;
5371  if (ntuples <= 0.0)
5372  {
5373  *isdefault = true;
5374  return DEFAULT_NUM_DISTINCT;
5375  }
5376 
5377  /*
5378  * If we had a relative estimate, use that.
5379  */
5380  if (stadistinct < 0.0)
5381  return clamp_row_est(-stadistinct * ntuples);
5382 
5383  /*
5384  * With no data, estimate ndistinct = ntuples if the table is small, else
5385  * use default. We use DEFAULT_NUM_DISTINCT as the cutoff for "small" so
5386  * that the behavior isn't discontinuous.
5387  */
5388  if (ntuples < DEFAULT_NUM_DISTINCT)
5389  return clamp_row_est(ntuples);
5390 
5391  *isdefault = true;
5392  return DEFAULT_NUM_DISTINCT;
5393 }
5394 
5395 /*
5396  * get_variable_range
5397  * Estimate the minimum and maximum value of the specified variable.
5398  * If successful, store values in *min and *max, and return true.
5399  * If no data available, return false.
5400  *
5401  * sortop is the "<" comparison operator to use. This should generally
5402  * be "<" not ">", as only the former is likely to be found in pg_statistic.
5403  * The collation must be specified too.
5404  */
5405 static bool
5407  Oid sortop, Oid collation,
5408  Datum *min, Datum *max)
5409 {
5410  Datum tmin = 0;
5411  Datum tmax = 0;
5412  bool have_data = false;
5413  int16 typLen;
5414  bool typByVal;
5415  Oid opfuncoid;
5416  FmgrInfo opproc;
5417  AttStatsSlot sslot;
5418 
5419  /*
5420  * XXX It's very tempting to try to use the actual column min and max, if
5421  * we can get them relatively-cheaply with an index probe. However, since
5422  * this function is called many times during join planning, that could
5423  * have unpleasant effects on planning speed. Need more investigation
5424  * before enabling this.
5425  */
5426 #ifdef NOT_USED
5427  if (get_actual_variable_range(root, vardata, sortop, collation, min, max))
5428  return true;
5429 #endif
5430 
5431  if (!HeapTupleIsValid(vardata->statsTuple))
5432  {
5433  /* no stats available, so default result */
5434  return false;
5435  }
5436 
5437  /*
5438  * If we can't apply the sortop to the stats data, just fail. In
5439  * principle, if there's a histogram and no MCVs, we could return the
5440  * histogram endpoints without ever applying the sortop ... but it's
5441  * probably not worth trying, because whatever the caller wants to do with
5442  * the endpoints would likely fail the security check too.
5443  */
5444  if (!statistic_proc_security_check(vardata,
5445  (opfuncoid = get_opcode(sortop))))
5446  return false;
5447 
5448  opproc.fn_oid = InvalidOid; /* mark this as not looked up yet */
5449 
5450  get_typlenbyval(vardata->atttype, &typLen, &typByVal);
5451 
5452  /*
5453  * If there is a histogram with the ordering we want, grab the first and
5454  * last values.
5455  */
5456  if (get_attstatsslot(&sslot, vardata->statsTuple,
5457  STATISTIC_KIND_HISTOGRAM, sortop,
5459  {
5460  if (sslot.stacoll == collation && sslot.nvalues > 0)
5461  {
5462  tmin = datumCopy(sslot.values[0], typByVal, typLen);
5463  tmax = datumCopy(sslot.values[sslot.nvalues - 1], typByVal, typLen);
5464  have_data = true;
5465  }
5466  free_attstatsslot(&sslot);
5467  }
5468 
5469  /*
5470  * Otherwise, if there is a histogram with some other ordering, scan it
5471  * and get the min and max values according to the ordering we want. This
5472  * of course may not find values that are really extremal according to our
5473  * ordering, but it beats ignoring available data.
5474  */
5475  if (!have_data &&
5476  get_attstatsslot(&sslot, vardata->statsTuple,
5477  STATISTIC_KIND_HISTOGRAM, InvalidOid,
5479  {
5480  get_stats_slot_range(&sslot, opfuncoid, &opproc,
5481  collation, typLen, typByVal,
5482  &tmin, &tmax, &have_data);
5483  free_attstatsslot(&sslot);
5484  }
5485 
5486  /*
5487  * If we have most-common-values info, look for extreme MCVs. This is
5488  * needed even if we also have a histogram, since the histogram excludes
5489  * the MCVs.
5490  */
5491  if (get_attstatsslot(&sslot, vardata->statsTuple,
5492  STATISTIC_KIND_MCV, InvalidOid,
5494  {
5495  get_stats_slot_range(&sslot, opfuncoid, &opproc,
5496  collation, typLen, typByVal,
5497  &tmin, &tmax, &have_data);
5498  free_attstatsslot(&sslot);
5499  }
5500 
5501  *min = tmin;
5502  *max = tmax;
5503  return have_data;
5504 }
5505 
5506 /*
5507  * get_stats_slot_range: scan sslot for min/max values
5508  *
5509  * Subroutine for get_variable_range: update min/max/have_data according
5510  * to what we find in the statistics array.
5511  */
5512 static void
5513 get_stats_slot_range(AttStatsSlot *sslot, Oid opfuncoid, FmgrInfo *opproc,
5514  Oid collation, int16 typLen, bool typByVal,
5515  Datum *min, Datum *max, bool *p_have_data)
5516 {
5517  Datum tmin = *min;
5518  Datum tmax = *max;
5519  bool have_data = *p_have_data;
5520  bool found_tmin = false;
5521  bool found_tmax = false;
5522 
5523  /* Look up the comparison function, if we didn't already do so */
5524  if (opproc->fn_oid != opfuncoid)
5525  fmgr_info(opfuncoid, opproc);
5526 
5527  /* Scan all the slot's values */
5528  for (int i = 0; i < sslot->nvalues; i++)
5529  {
5530  if (!have_data)
5531  {
5532  tmin = tmax = sslot->values[i];
5533  found_tmin = found_tmax = true;
5534  *p_have_data = have_data = true;
5535  continue;
5536  }
5537  if (DatumGetBool(FunctionCall2Coll(opproc,
5538  collation,
5539  sslot->values[i], tmin)))
5540  {
5541  tmin = sslot->values[i];
5542  found_tmin = true;
5543  }
5544  if (DatumGetBool(FunctionCall2Coll(opproc,
5545  collation,
5546  tmax, sslot->values[i])))
5547  {
5548  tmax = sslot->values[i];
5549  found_tmax = true;
5550  }
5551  }
5552 
5553  /*
5554  * Copy the slot's values, if we found new extreme values.
5555  */
5556  if (found_tmin)
5557  *min = datumCopy(tmin, typByVal, typLen);
5558  if (found_tmax)
5559  *max = datumCopy(tmax, typByVal, typLen);
5560 }
5561 
5562 
5563 /*
5564  * get_actual_variable_range
5565  * Attempt to identify the current *actual* minimum and/or maximum
5566  * of the specified variable, by looking for a suitable btree index
5567  * and fetching its low and/or high values.
5568  * If successful, store values in *min and *max, and return true.
5569  * (Either pointer can be NULL if that endpoint isn't needed.)
5570  * If no data available, return false.
5571  *
5572  * sortop is the "<" comparison operator to use.
5573  * collation is the required collation.
5574  */
5575 static bool
5577  Oid sortop, Oid collation,
5578  Datum *min, Datum *max)
5579 {
5580  bool have_data = false;
5581  RelOptInfo *rel = vardata->rel;
5582  RangeTblEntry *rte;
5583  ListCell *lc;
5584 
5585  /* No hope if no relation or it doesn't have indexes */
5586  if (rel == NULL || rel->indexlist == NIL)
5587  return false;
5588  /* If it has indexes it must be a plain relation */
5589  rte = root->simple_rte_array[rel->relid];
5590  Assert(rte->rtekind == RTE_RELATION);
5591 
5592  /* Search through the indexes to see if any match our problem */
5593  foreach(lc, rel->indexlist)
5594  {
5596  ScanDirection indexscandir;
5597 
5598  /* Ignore non-btree indexes */
5599  if (index->relam != BTREE_AM_OID)
5600  continue;
5601 
5602  /*
5603  * Ignore partial indexes --- we only want stats that cover the entire
5604  * relation.
5605  */
5606  if (index->indpred != NIL)
5607  continue;
5608 
5609  /*
5610  * The index list might include hypothetical indexes inserted by a
5611  * get_relation_info hook --- don't try to access them.
5612  */
5613  if (index->hypothetical)
5614  continue;
5615 
5616  /*
5617  * The first index column must match the desired variable, sortop, and
5618  * collation --- but we can use a descending-order index.
5619  */
5620  if (collation != index->indexcollations[0])
5621  continue; /* test first 'cause it's cheapest */
5622  if (!match_index_to_operand(vardata->var, 0, index))
5623  continue;
5624  switch (get_op_opfamily_strategy(sortop, index->sortopfamily[0]))
5625  {
5626  case BTLessStrategyNumber:
5627  if (index->reverse_sort[0])
5628  indexscandir = BackwardScanDirection;
5629  else
5630  indexscandir = ForwardScanDirection;
5631  break;
5633  if (index->reverse_sort[0])
5634  indexscandir = ForwardScanDirection;
5635  else
5636  indexscandir = BackwardScanDirection;
5637  break;
5638  default:
5639  /* index doesn't match the sortop */
5640  continue;
5641  }
5642 
5643  /*
5644  * Found a suitable index to extract data from. Set up some data that
5645  * can be used by both invocations of get_actual_variable_endpoint.
5646  */
5647  {
5648  MemoryContext tmpcontext;
5649  MemoryContext oldcontext;
5650  Relation heapRel;
5651  Relation indexRel;
5652  TupleTableSlot *slot;
5653  int16 typLen;
5654  bool typByVal;
5655  ScanKeyData scankeys[1];
5656 
5657  /* Make sure any cruft gets recycled when we're done */
5659  "get_actual_variable_range workspace",
5661  oldcontext = MemoryContextSwitchTo(tmpcontext);
5662 
5663  /*
5664  * Open the table and index so we can read from them. We should
5665  * already have some type of lock on each.
5666  */
5667  heapRel = table_open(rte->relid, NoLock);
5668  indexRel = index_open(index->indexoid, NoLock);
5669 
5670  /* build some stuff needed for indexscan execution */
5671  slot = table_slot_create(heapRel, NULL);
5672  get_typlenbyval(vardata->atttype, &typLen, &typByVal);
5673 
5674  /* set up an IS NOT NULL scan key so that we ignore nulls */
5675  ScanKeyEntryInitialize(&scankeys[0],
5677  1, /* index col to scan */
5678  InvalidStrategy, /* no strategy */
5679  InvalidOid, /* no strategy subtype */
5680  InvalidOid, /* no collation */
5681  InvalidOid, /* no reg proc for this */
5682  (Datum) 0); /* constant */
5683 
5684  /* If min is requested ... */
5685  if (min)
5686  {
5687  have_data = get_actual_variable_endpoint(heapRel,
5688  indexRel,
5689  indexscandir,
5690  scankeys,
5691  typLen,
5692  typByVal,
5693  slot,
5694  oldcontext,
5695  min);
5696  }
5697  else
5698  {
5699  /* If min not requested, assume index is nonempty */
5700  have_data = true;
5701  }
5702 
5703  /* If max is requested, and we didn't find the index is empty */
5704  if (max && have_data)
5705  {
5706  /* scan in the opposite direction; all else is the same */
5707  have_data = get_actual_variable_endpoint(heapRel,
5708  indexRel,
5709  -indexscandir,
5710  scankeys,
5711  typLen,
5712  typByVal,
5713  slot,
5714  oldcontext,
5715  max);
5716  }
5717 
5718  /* Clean everything up */
5720 
5721  index_close(indexRel, NoLock);
5722  table_close(heapRel, NoLock);
5723 
5724  MemoryContextSwitchTo(oldcontext);
5725  MemoryContextDelete(tmpcontext);
5726 
5727  /* And we're done */
5728  break;
5729  }
5730  }
5731 
5732  return have_data;
5733 }
5734 
5735 /*
5736  * Get one endpoint datum (min or max depending on indexscandir) from the
5737  * specified index. Return true if successful, false if index is empty.
5738  * On success, endpoint value is stored to *endpointDatum (and copied into
5739  * outercontext).
5740  *
5741  * scankeys is a 1-element scankey array set up to reject nulls.
5742  * typLen/typByVal describe the datatype of the index's first column.
5743  * tableslot is a slot suitable to hold table tuples, in case we need
5744  * to probe the heap.
5745  * (We could compute these values locally, but that would mean computing them
5746  * twice when get_actual_variable_range needs both the min and the max.)
5747  */
5748 static bool
5750  Relation indexRel,
5751  ScanDirection indexscandir,
5752  ScanKey scankeys,
5753  int16 typLen,
5754  bool typByVal,
5755  TupleTableSlot *tableslot,
5756  MemoryContext outercontext,
5757  Datum *endpointDatum)
5758 {
5759  bool have_data = false;
5760  SnapshotData SnapshotNonVacuumable;
5761  IndexScanDesc index_scan;
5762  Buffer vmbuffer = InvalidBuffer;
5763  ItemPointer tid;
5765  bool isnull[INDEX_MAX_KEYS];
5766  MemoryContext oldcontext;
5767 
5768  /*
5769  * We use the index-only-scan machinery for this. With mostly-static
5770  * tables that's a win because it avoids a heap visit. It's also a win
5771  * for dynamic data, but the reason is less obvious; read on for details.
5772  *
5773  * In principle, we should scan the index with our current active
5774  * snapshot, which is the best approximation we've got to what the query
5775  * will see when executed. But that won't be exact if a new snap is taken
5776  * before running the query, and it can be very expensive if a lot of
5777  * recently-dead or uncommitted rows exist at the beginning or end of the
5778  * index (because we'll laboriously fetch each one and reject it).
5779  * Instead, we use SnapshotNonVacuumable. That will accept recently-dead
5780  * and uncommitted rows as well as normal visible rows. On the other
5781  * hand, it will reject known-dead rows, and thus not give a bogus answer
5782  * when the extreme value has been deleted (unless the deletion was quite
5783  * recent); that case motivates not using SnapshotAny here.
5784  *
5785  * A crucial point here is that SnapshotNonVacuumable, with
5786  * RecentGlobalXmin as horizon, yields the inverse of the condition that
5787  * the indexscan will use to decide that index entries are killable (see
5788  * heap_hot_search_buffer()). Therefore, if the snapshot rejects a tuple
5789  * (or more precisely, all tuples of a HOT chain) and we have to continue
5790  * scanning past it, we know that the indexscan will mark that index entry
5791  * killed. That means that the next get_actual_variable_endpoint() call
5792  * will not have to re-consider that index entry. In this way we avoid
5793  * repetitive work when this function is used a lot during planning.
5794  *
5795  * But using SnapshotNonVacuumable creates a hazard of its own. In a
5796  * recently-created index, some index entries may point at "broken" HOT
5797  * chains in which not all the tuple versions contain data matching the
5798  * index entry. The live tuple version(s) certainly do match the index,
5799  * but SnapshotNonVacuumable can accept recently-dead tuple versions that
5800  * don't match. Hence, if we took data from the selected heap tuple, we
5801  * might get a bogus answer that's not close to the index extremal value,
5802  * or could even be NULL. We avoid this hazard because we take the data
5803  * from the index entry not the heap.
5804  */
5805  InitNonVacuumableSnapshot(SnapshotNonVacuumable, RecentGlobalXmin);
5806 
5807  index_scan = index_beginscan(heapRel, indexRel,
5808  &SnapshotNonVacuumable,
5809  1, 0);
5810  /* Set it up for index-only scan */
5811  index_scan->xs_want_itup = true;
5812  index_rescan(index_scan, scankeys, 1, NULL, 0);
5813 
5814  /* Fetch first/next tuple in specified direction */
5815  while ((tid = index_getnext_tid(index_scan, indexscandir)) != NULL)
5816  {
5817  if (!VM_ALL_VISIBLE(heapRel,
5819  &vmbuffer))
5820  {
5821  /* Rats, we have to visit the heap to check visibility */
5822  if (!index_fetch_heap(index_scan, tableslot))
5823  continue; /* no visible tuple, try next index entry */
5824 
5825  /* We don't actually need the heap tuple for anything */
5826  ExecClearTuple(tableslot);
5827 
5828  /*
5829  * We don't care whether there's more than one visible tuple in
5830  * the HOT chain; if any are visible, that's good enough.
5831  */
5832  }
5833 
5834  /*
5835  * We expect that btree will return data in IndexTuple not HeapTuple
5836  * format. It's not lossy either.
5837  */
5838  if (!index_scan->xs_itup)
5839  elog(ERROR, "no data returned for index-only scan");
5840  if (index_scan->xs_recheck)
5841  elog(ERROR, "unexpected recheck indication from btree");
5842 
5843  /* OK to deconstruct the index tuple */
5844  index_deform_tuple(index_scan->xs_itup,
5845  index_scan->xs_itupdesc,
5846  values, isnull);
5847 
5848  /* Shouldn't have got a null, but be careful */
5849  if (isnull[0])
5850  elog(ERROR, "found unexpected null value in index \"%s\"",
5851  RelationGetRelationName(indexRel));
5852 
5853  /* Copy the index column value out to caller's context */
5854  oldcontext = MemoryContextSwitchTo(outercontext);
5855  *endpointDatum = datumCopy(values[0], typByVal, typLen);
5856  MemoryContextSwitchTo(oldcontext);
5857  have_data = true;
5858  break;
5859  }
5860 
5861  if (vmbuffer != InvalidBuffer)
5862  ReleaseBuffer(vmbuffer);
5863  index_endscan(index_scan);
5864 
5865  return have_data;
5866 }
5867 
5868 /*
5869  * find_join_input_rel
5870  * Look up the input relation for a join.
5871  *
5872  * We assume that the input relation's RelOptInfo must have been constructed
5873  * already.
5874  */
5875 static RelOptInfo *
5877 {
5878  RelOptInfo *rel = NULL;
5879 
5880  switch (bms_membership(relids))
5881  {
5882  case BMS_EMPTY_SET:
5883  /* should not happen */
5884  break;
5885  case BMS_SINGLETON:
5886  rel = find_base_rel(root, bms_singleton_member(relids));
5887  break;
5888  case BMS_MULTIPLE:
5889  rel = find_join_rel(root, relids);
5890  break;
5891  }
5892 
5893  if (rel == NULL)
5894  elog(ERROR, "could not find RelOptInfo for given relids");
5895 
5896  return rel;
5897 }
5898 
5899 
5900 /*-------------------------------------------------------------------------
5901  *
5902  * Index cost estimation functions
5903  *
5904  *-------------------------------------------------------------------------
5905  */
5906 
5907 /*
5908  * Extract the actual indexquals (as RestrictInfos) from an IndexClause list
5909  */
5910 List *
5912 {
5913  List *result = NIL;
5914  ListCell *lc;
5915 
5916  foreach(lc, indexclauses)
5917  {
5918  IndexClause *iclause = lfirst_node(IndexClause, lc);
5919  ListCell *lc2;
5920 
5921  foreach(lc2, iclause->indexquals)
5922  {
5923  RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc2);
5924 
5925  result = lappend(result, rinfo);
5926  }
5927  }
5928  return result;
5929 }
5930 
5931 /*
5932  * Compute the total evaluation cost of the comparison operands in a list
5933  * of index qual expressions. Since we know these will be evaluated just
5934  * once per scan, there's no need to distinguish startup from per-row cost.
5935  *
5936  * This can be used either on the result of get_quals_from_indexclauses(),
5937  * or directly on an indexorderbys list. In both cases, we expect that the
5938  * index key expression is on the left side of binary clauses.
5939  */
5940 Cost
5942 {
5943  Cost qual_arg_cost = 0;
5944  ListCell *lc;
5945 
5946  foreach(lc, indexquals)
5947  {
5948  Expr *clause = (Expr *) lfirst(lc);
5949  Node *other_operand;
5950  QualCost index_qual_cost;
5951 
5952  /*
5953  * Index quals will have RestrictInfos, indexorderbys won't. Look
5954  * through RestrictInfo if present.
5955  */
5956  if (IsA(clause, RestrictInfo))
5957  clause = ((RestrictInfo *) clause)->clause;
5958 
5959  if (IsA(clause, OpExpr))
5960  {
5961  OpExpr *op = (OpExpr *) clause;
5962 
5963  other_operand = (Node *) lsecond(op->args);
5964  }
5965  else if (IsA(clause, RowCompareExpr))
5966  {
5967  RowCompareExpr *rc = (RowCompareExpr *) clause;
5968 
5969  other_operand = (Node *) rc->rargs;
5970  }
5971  else if (IsA(clause, ScalarArrayOpExpr))
5972  {
5973  ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
5974 
5975  other_operand = (Node *) lsecond(saop->args);
5976  }
5977  else if (IsA(clause, NullTest))
5978  {
5979  other_operand = NULL;
5980  }
5981  else
5982  {
5983  elog(ERROR, "unsupported indexqual type: %d",
5984  (int) nodeTag(clause));
5985  other_operand = NULL; /* keep compiler quiet */
5986  }
5987 
5988  cost_qual_eval_node(&index_qual_cost, other_operand, root);
5989  qual_arg_cost += index_qual_cost.startup + index_qual_cost.per_tuple;
5990  }
5991  return qual_arg_cost;
5992 }
5993 
5994 void
5996  IndexPath *path,
5997  double loop_count,
5998  GenericCosts *costs)
5999 {
6000  IndexOptInfo *index = path->indexinfo;
6001  List *indexQuals = get_quals_from_indexclauses(path->indexclauses);
6002  List *indexOrderBys = path->indexorderbys;
6003  Cost indexStartupCost;
6004  Cost indexTotalCost;
6005  Selectivity indexSelectivity;
6006  double indexCorrelation;
6007  double numIndexPages;
6008  double numIndexTuples;
6009  double spc_random_page_cost;
6010  double num_sa_scans;
6011  double num_outer_scans;
6012  double num_scans;
6013  double qual_op_cost;
6014  double qual_arg_cost;
6015  List *selectivityQuals;
6016  ListCell *l;
6017 
6018  /*
6019  * If the index is partial, AND the index predicate with the explicitly
6020  * given indexquals to produce a more accurate idea of the index
6021  * selectivity.
6022  */
6023  selectivityQuals = add_predicate_to_index_quals(index, indexQuals);
6024 
6025  /*
6026  * Check for ScalarArrayOpExpr index quals, and estimate the number of
6027  * index scans that will be performed.
6028  */
6029  num_sa_scans = 1;
6030  foreach(l, indexQuals)
6031  {
6032  RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
6033 
6034  if (IsA(rinfo->clause, ScalarArrayOpExpr))
6035  {
6036  ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) rinfo->clause;
6037  int alength = estimate_array_length(lsecond(saop->args));
6038 
6039  if (alength > 1)
6040  num_sa_scans *= alength;
6041  }
6042  }
6043 
6044  /* Estimate the fraction of main-table tuples that will be visited */
6045  indexSelectivity = clauselist_selectivity(root, selectivityQuals,
6046  index->rel->relid,
6047  JOIN_INNER,
6048  NULL);
6049 
6050  /*
6051  * If caller didn't give us an estimate, estimate the number of index
6052  * tuples that will be visited. We do it in this rather peculiar-looking
6053  * way in order to get the right answer for partial indexes.
6054  */
6055  numIndexTuples = costs->numIndexTuples;
6056  if (numIndexTuples <= 0.0)
6057  {
6058  numIndexTuples = indexSelectivity * index->rel->tuples;
6059 
6060  /*
6061  * The above calculation counts all the tuples visited across all
6062  * scans induced by ScalarArrayOpExpr nodes. We want to consider the
6063  * average per-indexscan number, so adjust. This is a handy place to
6064  * round to integer, too. (If caller supplied tuple estimate, it's
6065  * responsible for handling these considerations.)
6066  */
6067  numIndexTuples = rint(numIndexTuples / num_sa_scans);
6068  }
6069 
6070  /*
6071  * We can bound the number of tuples by the index size in any case. Also,
6072  * always estimate at least one tuple is touched, even when
6073  * indexSelectivity estimate is tiny.
6074  */
6075  if (numIndexTuples > index->tuples)
6076  numIndexTuples = index->tuples;
6077  if (numIndexTuples < 1.0)
6078  numIndexTuples = 1.0;
6079 
6080  /*
6081  * Estimate the number of index pages that will be retrieved.
6082  *
6083  * We use the simplistic method of taking a pro-rata fraction of the total
6084  * number of index pages. In effect, this counts only leaf pages and not
6085  * any overhead such as index metapage or upper tree levels.
6086  *
6087  * In practice access to upper index levels is often nearly free because
6088  * those tend to stay in cache under load; moreover, the cost involved is
6089  * highly dependent on index type. We therefore ignore such costs here
6090  * and leave it to the caller to add a suitable charge if needed.
6091  */
6092  if (index->pages > 1 && index->tuples > 1)
6093  numIndexPages = ceil(numIndexTuples * index->pages / index->tuples);
6094  else
6095  numIndexPages = 1.0;
6096 
6097  /* fetch estimated page cost for tablespace containing index */
6099  &spc_random_page_cost,
6100  NULL);
6101 
6102  /*
6103  * Now compute the disk access costs.
6104  *
6105  * The above calculations are all per-index-scan. However, if we are in a
6106  * nestloop inner scan, we can expect the scan to be repeated (with
6107  * different search keys) for each row of the outer relation. Likewise,
6108  * ScalarArrayOpExpr quals result in multiple index scans. This creates
6109  * the potential for cache effects to reduce the number of disk page
6110  * fetches needed. We want to estimate the average per-scan I/O cost in
6111  * the presence of caching.
6112  *
6113  * We use the Mackert-Lohman formula (see costsize.c for details) to
6114  * estimate the total number of page fetches that occur. While this
6115  * wasn't what it was designed for, it seems a reasonable model anyway.
6116  * Note that we are counting pages not tuples anymore, so we take N = T =
6117  * index size, as if there were one "tuple" per page.
6118  */
6119  num_outer_scans = loop_count;
6120  num_scans = num_sa_scans * num_outer_scans;
6121 
6122  if (num_scans > 1)
6123  {
6124  double pages_fetched;
6125 
6126  /* total page fetches ignoring cache effects */
6127  pages_fetched = numIndexPages * num_scans;
6128 
6129  /* use Mackert and Lohman formula to adjust for cache effects */
6130  pages_fetched = index_pages_fetched(pages_fetched,
6131  index->pages,
6132  (double) index->pages,
6133  root);
6134 
6135  /*
6136  * Now compute the total disk access cost, and then report a pro-rated
6137  * share for each outer scan. (Don't pro-rate for ScalarArrayOpExpr,
6138  * since that's internal to the indexscan.)
6139  */
6140  indexTotalCost = (pages_fetched * spc_random_page_cost)
6141  / num_outer_scans;
6142  }
6143  else
6144  {
6145  /*
6146  * For a single index scan, we just charge spc_random_page_cost per
6147  * page touched.
6148  */
6149  indexTotalCost = numIndexPages * spc_random_page_cost;
6150  }
6151 
6152  /*
6153  * CPU cost: any complex expressions in the indexquals will need to be
6154  * evaluated once at the start of the scan to reduce them to runtime keys
6155  * to pass to the index AM (see nodeIndexscan.c). We model the per-tuple
6156  * CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per
6157  * indexqual operator. Because we have numIndexTuples as a per-scan
6158  * number, we have to multiply by num_sa_scans to get the correct result
6159  * for ScalarArrayOpExpr cases. Similarly add in costs for any index
6160  * ORDER BY expressions.
6161  *
6162  * Note: this neglects the possible costs of rechecking lossy operators.
6163  * Detecting that that might be needed seems more expensive than it's
6164  * worth, though, considering all the other inaccuracies here ...
6165  */
6166  qual_arg_cost = index_other_operands_eval_cost(root, indexQuals) +
6167  index_other_operands_eval_cost(root, indexOrderBys);
6168  qual_op_cost = cpu_operator_cost *
6169  (list_length(indexQuals) + list_length(indexOrderBys));
6170 
6171  indexStartupCost = qual_arg_cost;
6172  indexTotalCost += qual_arg_cost;
6173  indexTotalCost += numIndexTuples * num_sa_scans * (cpu_index_tuple_cost + qual_op_cost);
6174 
6175  /*
6176  * Generic assumption about index correlation: there isn't any.
6177  */
6178  indexCorrelation = 0.0;
6179 
6180  /*
6181  * Return everything to caller.
6182  */
6183  costs->indexStartupCost = indexStartupCost;
6184  costs->indexTotalCost = indexTotalCost;
6185  costs->indexSelectivity = indexSelectivity;
6186  costs->indexCorrelation = indexCorrelation;
6187  costs->numIndexPages = numIndexPages;
6188  costs->numIndexTuples = numIndexTuples;
6189  costs->spc_random_page_cost