PostgreSQL Source Code  git master
selfuncs.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * selfuncs.c
4  * Selectivity functions and index cost estimation functions for
5  * standard operators and index access methods.
6  *
7  * Selectivity routines are registered in the pg_operator catalog
8  * in the "oprrest" and "oprjoin" attributes.
9  *
10  * Index cost functions are located via the index AM's API struct,
11  * which is obtained from the handler function registered in pg_am.
12  *
13  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
14  * Portions Copyright (c) 1994, Regents of the University of California
15  *
16  *
17  * IDENTIFICATION
18  * src/backend/utils/adt/selfuncs.c
19  *
20  *-------------------------------------------------------------------------
21  */
22 
23 /*----------
24  * Operator selectivity estimation functions are called to estimate the
25  * selectivity of WHERE clauses whose top-level operator is their operator.
26  * We divide the problem into two cases:
27  * Restriction clause estimation: the clause involves vars of just
28  * one relation.
29  * Join clause estimation: the clause involves vars of multiple rels.
30  * Join selectivity estimation is far more difficult and usually less accurate
31  * than restriction estimation.
32  *
33  * When dealing with the inner scan of a nestloop join, we consider the
34  * join's joinclauses as restriction clauses for the inner relation, and
35  * treat vars of the outer relation as parameters (a/k/a constants of unknown
36  * values). So, restriction estimators need to be able to accept an argument
37  * telling which relation is to be treated as the variable.
38  *
39  * The call convention for a restriction estimator (oprrest function) is
40  *
41  * Selectivity oprrest (PlannerInfo *root,
42  * Oid operator,
43  * List *args,
44  * int varRelid);
45  *
46  * root: general information about the query (rtable and RelOptInfo lists
47  * are particularly important for the estimator).
48  * operator: OID of the specific operator in question.
49  * args: argument list from the operator clause.
50  * varRelid: if not zero, the relid (rtable index) of the relation to
51  * be treated as the variable relation. May be zero if the args list
52  * is known to contain vars of only one relation.
53  *
54  * This is represented at the SQL level (in pg_proc) as
55  *
56  * float8 oprrest (internal, oid, internal, int4);
57  *
58  * The result is a selectivity, that is, a fraction (0 to 1) of the rows
59  * of the relation that are expected to produce a TRUE result for the
60  * given operator.
61  *
62  * The call convention for a join estimator (oprjoin function) is similar
63  * except that varRelid is not needed, and instead join information is
64  * supplied:
65  *
66  * Selectivity oprjoin (PlannerInfo *root,
67  * Oid operator,
68  * List *args,
69  * JoinType jointype,
70  * SpecialJoinInfo *sjinfo);
71  *
72  * float8 oprjoin (internal, oid, internal, int2, internal);
73  *
74  * (Before Postgres 8.4, join estimators had only the first four of these
75  * parameters. That signature is still allowed, but deprecated.) The
76  * relationship between jointype and sjinfo is explained in the comments for
77  * clause_selectivity() --- the short version is that jointype is usually
78  * best ignored in favor of examining sjinfo.
79  *
80  * Join selectivity for regular inner and outer joins is defined as the
81  * fraction (0 to 1) of the cross product of the relations that is expected
82  * to produce a TRUE result for the given operator. For both semi and anti
83  * joins, however, the selectivity is defined as the fraction of the left-hand
84  * side relation's rows that are expected to have a match (ie, at least one
85  * row with a TRUE result) in the right-hand side.
86  *
87  * For both oprrest and oprjoin functions, the operator's input collation OID
88  * (if any) is passed using the standard fmgr mechanism, so that the estimator
89  * function can fetch it with PG_GET_COLLATION(). Note, however, that all
90  * statistics in pg_statistic are currently built using the relevant column's
91  * collation.
92  *----------
93  */
94 
95 #include "postgres.h"
96 
97 #include <ctype.h>
98 #include <math.h>
99 
100 #include "access/brin.h"
101 #include "access/brin_page.h"
102 #include "access/gin.h"
103 #include "access/table.h"
104 #include "access/tableam.h"
105 #include "access/visibilitymap.h"
106 #include "catalog/pg_am.h"
107 #include "catalog/pg_collation.h"
108 #include "catalog/pg_operator.h"
109 #include "catalog/pg_statistic.h"
111 #include "executor/nodeAgg.h"
112 #include "miscadmin.h"
113 #include "nodes/makefuncs.h"
114 #include "nodes/nodeFuncs.h"
115 #include "optimizer/clauses.h"
116 #include "optimizer/cost.h"
117 #include "optimizer/optimizer.h"
118 #include "optimizer/pathnode.h"
119 #include "optimizer/paths.h"
120 #include "optimizer/plancat.h"
121 #include "parser/parse_clause.h"
122 #include "parser/parsetree.h"
123 #include "statistics/statistics.h"
124 #include "storage/bufmgr.h"
125 #include "utils/acl.h"
126 #include "utils/builtins.h"
127 #include "utils/date.h"
128 #include "utils/datum.h"
129 #include "utils/fmgroids.h"
130 #include "utils/index_selfuncs.h"
131 #include "utils/lsyscache.h"
132 #include "utils/memutils.h"
133 #include "utils/pg_locale.h"
134 #include "utils/rel.h"
135 #include "utils/selfuncs.h"
136 #include "utils/snapmgr.h"
137 #include "utils/spccache.h"
138 #include "utils/syscache.h"
139 #include "utils/timestamp.h"
140 #include "utils/typcache.h"
141 
142 
143 /* Hooks for plugins to get control when we ask for stats */
146 
147 static double eqsel_internal(PG_FUNCTION_ARGS, bool negate);
148 static double eqjoinsel_inner(Oid opfuncoid, Oid collation,
149  VariableStatData *vardata1, VariableStatData *vardata2,
150  double nd1, double nd2,
151  bool isdefault1, bool isdefault2,
152  AttStatsSlot *sslot1, AttStatsSlot *sslot2,
153  Form_pg_statistic stats1, Form_pg_statistic stats2,
154  bool have_mcvs1, bool have_mcvs2);
155 static double eqjoinsel_semi(Oid opfuncoid, Oid collation,
156  VariableStatData *vardata1, VariableStatData *vardata2,
157  double nd1, double nd2,
158  bool isdefault1, bool isdefault2,
159  AttStatsSlot *sslot1, AttStatsSlot *sslot2,
160  Form_pg_statistic stats1, Form_pg_statistic stats2,
161  bool have_mcvs1, bool have_mcvs2,
162  RelOptInfo *inner_rel);
164  RelOptInfo *rel, List **varinfos, double *ndistinct);
165 static bool convert_to_scalar(Datum value, Oid valuetypid, Oid collid,
166  double *scaledvalue,
167  Datum lobound, Datum hibound, Oid boundstypid,
168  double *scaledlobound, double *scaledhibound);
169 static double convert_numeric_to_scalar(Datum value, Oid typid, bool *failure);
170 static void convert_string_to_scalar(char *value,
171  double *scaledvalue,
172  char *lobound,
173  double *scaledlobound,
174  char *hibound,
175  double *scaledhibound);
177  double *scaledvalue,
178  Datum lobound,
179  double *scaledlobound,
180  Datum hibound,
181  double *scaledhibound);
182 static double convert_one_string_to_scalar(char *value,
183  int rangelo, int rangehi);
184 static double convert_one_bytea_to_scalar(unsigned char *value, int valuelen,
185  int rangelo, int rangehi);
186 static char *convert_string_datum(Datum value, Oid typid, Oid collid,
187  bool *failure);
188 static double convert_timevalue_to_scalar(Datum value, Oid typid,
189  bool *failure);
190 static void examine_simple_variable(PlannerInfo *root, Var *var,
191  VariableStatData *vardata);
192 static bool get_variable_range(PlannerInfo *root, VariableStatData *vardata,
193  Oid sortop, Oid collation,
194  Datum *min, Datum *max);
195 static void get_stats_slot_range(AttStatsSlot *sslot,
196  Oid opfuncoid, FmgrInfo *opproc,
197  Oid collation, int16 typLen, bool typByVal,
198  Datum *min, Datum *max, bool *p_have_data);
199 static bool get_actual_variable_range(PlannerInfo *root,
200  VariableStatData *vardata,
201  Oid sortop, Oid collation,
202  Datum *min, Datum *max);
203 static bool get_actual_variable_endpoint(Relation heapRel,
204  Relation indexRel,
205  ScanDirection indexscandir,
206  ScanKey scankeys,
207  int16 typLen,
208  bool typByVal,
209  TupleTableSlot *tableslot,
210  MemoryContext outercontext,
211  Datum *endpointDatum);
212 static RelOptInfo *find_join_input_rel(PlannerInfo *root, Relids relids);
213 
214 
215 /*
216  * eqsel - Selectivity of "=" for any data types.
217  *
218  * Note: this routine is also used to estimate selectivity for some
219  * operators that are not "=" but have comparable selectivity behavior,
220  * such as "~=" (geometric approximate-match). Even for "=", we must
221  * keep in mind that the left and right datatypes may differ.
222  */
223 Datum
225 {
226  PG_RETURN_FLOAT8((float8) eqsel_internal(fcinfo, false));
227 }
228 
229 /*
230  * Common code for eqsel() and neqsel()
231  */
232 static double
234 {
236  Oid operator = PG_GETARG_OID(1);
237  List *args = (List *) PG_GETARG_POINTER(2);
238  int varRelid = PG_GETARG_INT32(3);
239  Oid collation = PG_GET_COLLATION();
240  VariableStatData vardata;
241  Node *other;
242  bool varonleft;
243  double selec;
244 
245  /*
246  * When asked about <>, we do the estimation using the corresponding =
247  * operator, then convert to <> via "1.0 - eq_selectivity - nullfrac".
248  */
249  if (negate)
250  {
251  operator = get_negator(operator);
252  if (!OidIsValid(operator))
253  {
254  /* Use default selectivity (should we raise an error instead?) */
255  return 1.0 - DEFAULT_EQ_SEL;
256  }
257  }
258 
259  /*
260  * If expression is not variable = something or something = variable, then
261  * punt and return a default estimate.
262  */
263  if (!get_restriction_variable(root, args, varRelid,
264  &vardata, &other, &varonleft))
265  return negate ? (1.0 - DEFAULT_EQ_SEL) : DEFAULT_EQ_SEL;
266 
267  /*
268  * We can do a lot better if the something is a constant. (Note: the
269  * Const might result from estimation rather than being a simple constant
270  * in the query.)
271  */
272  if (IsA(other, Const))
273  selec = var_eq_const(&vardata, operator, collation,
274  ((Const *) other)->constvalue,
275  ((Const *) other)->constisnull,
276  varonleft, negate);
277  else
278  selec = var_eq_non_const(&vardata, operator, collation, other,
279  varonleft, negate);
280 
281  ReleaseVariableStats(vardata);
282 
283  return selec;
284 }
285 
286 /*
287  * var_eq_const --- eqsel for var = const case
288  *
289  * This is exported so that some other estimation functions can use it.
290  */
291 double
292 var_eq_const(VariableStatData *vardata, Oid operator, Oid collation,
293  Datum constval, bool constisnull,
294  bool varonleft, bool negate)
295 {
296  double selec;
297  double nullfrac = 0.0;
298  bool isdefault;
299  Oid opfuncoid;
300 
301  /*
302  * If the constant is NULL, assume operator is strict and return zero, ie,
303  * operator will never return TRUE. (It's zero even for a negator op.)
304  */
305  if (constisnull)
306  return 0.0;
307 
308  /*
309  * Grab the nullfrac for use below. Note we allow use of nullfrac
310  * regardless of security check.
311  */
312  if (HeapTupleIsValid(vardata->statsTuple))
313  {
314  Form_pg_statistic stats;
315 
316  stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
317  nullfrac = stats->stanullfrac;
318  }
319 
320  /*
321  * If we matched the var to a unique index or DISTINCT clause, assume
322  * there is exactly one match regardless of anything else. (This is
323  * slightly bogus, since the index or clause's equality operator might be
324  * different from ours, but it's much more likely to be right than
325  * ignoring the information.)
326  */
327  if (vardata->isunique && vardata->rel && vardata->rel->tuples >= 1.0)
328  {
329  selec = 1.0 / vardata->rel->tuples;
330  }
331  else if (HeapTupleIsValid(vardata->statsTuple) &&
333  (opfuncoid = get_opcode(operator))))
334  {
335  AttStatsSlot sslot;
336  bool match = false;
337  int i;
338 
339  /*
340  * Is the constant "=" to any of the column's most common values?
341  * (Although the given operator may not really be "=", we will assume
342  * that seeing whether it returns TRUE is an appropriate test. If you
343  * don't like this, maybe you shouldn't be using eqsel for your
344  * operator...)
345  */
346  if (get_attstatsslot(&sslot, vardata->statsTuple,
347  STATISTIC_KIND_MCV, InvalidOid,
349  {
350  LOCAL_FCINFO(fcinfo, 2);
351  FmgrInfo eqproc;
352 
353  fmgr_info(opfuncoid, &eqproc);
354 
355  /*
356  * Save a few cycles by setting up the fcinfo struct just once.
357  * Using FunctionCallInvoke directly also avoids failure if the
358  * eqproc returns NULL, though really equality functions should
359  * never do that.
360  */
361  InitFunctionCallInfoData(*fcinfo, &eqproc, 2, collation,
362  NULL, NULL);
363  fcinfo->args[0].isnull = false;
364  fcinfo->args[1].isnull = false;
365  /* be careful to apply operator right way 'round */
366  if (varonleft)
367  fcinfo->args[1].value = constval;
368  else
369  fcinfo->args[0].value = constval;
370 
371  for (i = 0; i < sslot.nvalues; i++)
372  {
373  Datum fresult;
374 
375  if (varonleft)
376  fcinfo->args[0].value = sslot.values[i];
377  else
378  fcinfo->args[1].value = sslot.values[i];
379  fcinfo->isnull = false;
380  fresult = FunctionCallInvoke(fcinfo);
381  if (!fcinfo->isnull && DatumGetBool(fresult))
382  {
383  match = true;
384  break;
385  }
386  }
387  }
388  else
389  {
390  /* no most-common-value info available */
391  i = 0; /* keep compiler quiet */
392  }
393 
394  if (match)
395  {
396  /*
397  * Constant is "=" to this common value. We know selectivity
398  * exactly (or as exactly as ANALYZE could calculate it, anyway).
399  */
400  selec = sslot.numbers[i];
401  }
402  else
403  {
404  /*
405  * Comparison is against a constant that is neither NULL nor any
406  * of the common values. Its selectivity cannot be more than
407  * this:
408  */
409  double sumcommon = 0.0;
410  double otherdistinct;
411 
412  for (i = 0; i < sslot.nnumbers; i++)
413  sumcommon += sslot.numbers[i];
414  selec = 1.0 - sumcommon - nullfrac;
415  CLAMP_PROBABILITY(selec);
416 
417  /*
418  * and in fact it's probably a good deal less. We approximate that
419  * all the not-common values share this remaining fraction
420  * equally, so we divide by the number of other distinct values.
421  */
422  otherdistinct = get_variable_numdistinct(vardata, &isdefault) -
423  sslot.nnumbers;
424  if (otherdistinct > 1)
425  selec /= otherdistinct;
426 
427  /*
428  * Another cross-check: selectivity shouldn't be estimated as more
429  * than the least common "most common value".
430  */
431  if (sslot.nnumbers > 0 && selec > sslot.numbers[sslot.nnumbers - 1])
432  selec = sslot.numbers[sslot.nnumbers - 1];
433  }
434 
435  free_attstatsslot(&sslot);
436  }
437  else
438  {
439  /*
440  * No ANALYZE stats available, so make a guess using estimated number
441  * of distinct values and assuming they are equally common. (The guess
442  * is unlikely to be very good, but we do know a few special cases.)
443  */
444  selec = 1.0 / get_variable_numdistinct(vardata, &isdefault);
445  }
446 
447  /* now adjust if we wanted <> rather than = */
448  if (negate)
449  selec = 1.0 - selec - nullfrac;
450 
451  /* result should be in range, but make sure... */
452  CLAMP_PROBABILITY(selec);
453 
454  return selec;
455 }
456 
457 /*
458  * var_eq_non_const --- eqsel for var = something-other-than-const case
459  *
460  * This is exported so that some other estimation functions can use it.
461  */
462 double
463 var_eq_non_const(VariableStatData *vardata, Oid operator, Oid collation,
464  Node *other,
465  bool varonleft, bool negate)
466 {
467  double selec;
468  double nullfrac = 0.0;
469  bool isdefault;
470 
471  /*
472  * Grab the nullfrac for use below.
473  */
474  if (HeapTupleIsValid(vardata->statsTuple))
475  {
476  Form_pg_statistic stats;
477 
478  stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
479  nullfrac = stats->stanullfrac;
480  }
481 
482  /*
483  * If we matched the var to a unique index or DISTINCT clause, assume
484  * there is exactly one match regardless of anything else. (This is
485  * slightly bogus, since the index or clause's equality operator might be
486  * different from ours, but it's much more likely to be right than
487  * ignoring the information.)
488  */
489  if (vardata->isunique && vardata->rel && vardata->rel->tuples >= 1.0)
490  {
491  selec = 1.0 / vardata->rel->tuples;
492  }
493  else if (HeapTupleIsValid(vardata->statsTuple))
494  {
495  double ndistinct;
496  AttStatsSlot sslot;
497 
498  /*
499  * Search is for a value that we do not know a priori, but we will
500  * assume it is not NULL. Estimate the selectivity as non-null
501  * fraction divided by number of distinct values, so that we get a
502  * result averaged over all possible values whether common or
503  * uncommon. (Essentially, we are assuming that the not-yet-known
504  * comparison value is equally likely to be any of the possible
505  * values, regardless of their frequency in the table. Is that a good
506  * idea?)
507  */
508  selec = 1.0 - nullfrac;
509  ndistinct = get_variable_numdistinct(vardata, &isdefault);
510  if (ndistinct > 1)
511  selec /= ndistinct;
512 
513  /*
514  * Cross-check: selectivity should never be estimated as more than the
515  * most common value's.
516  */
517  if (get_attstatsslot(&sslot, vardata->statsTuple,
518  STATISTIC_KIND_MCV, InvalidOid,
520  {
521  if (sslot.nnumbers > 0 && selec > sslot.numbers[0])
522  selec = sslot.numbers[0];
523  free_attstatsslot(&sslot);
524  }
525  }
526  else
527  {
528  /*
529  * No ANALYZE stats available, so make a guess using estimated number
530  * of distinct values and assuming they are equally common. (The guess
531  * is unlikely to be very good, but we do know a few special cases.)
532  */
533  selec = 1.0 / get_variable_numdistinct(vardata, &isdefault);
534  }
535 
536  /* now adjust if we wanted <> rather than = */
537  if (negate)
538  selec = 1.0 - selec - nullfrac;
539 
540  /* result should be in range, but make sure... */
541  CLAMP_PROBABILITY(selec);
542 
543  return selec;
544 }
545 
546 /*
547  * neqsel - Selectivity of "!=" for any data types.
548  *
549  * This routine is also used for some operators that are not "!="
550  * but have comparable selectivity behavior. See above comments
551  * for eqsel().
552  */
553 Datum
555 {
556  PG_RETURN_FLOAT8((float8) eqsel_internal(fcinfo, true));
557 }
558 
559 /*
560  * scalarineqsel - Selectivity of "<", "<=", ">", ">=" for scalars.
561  *
562  * This is the guts of scalarltsel/scalarlesel/scalargtsel/scalargesel.
563  * The isgt and iseq flags distinguish which of the four cases apply.
564  *
565  * The caller has commuted the clause, if necessary, so that we can treat
566  * the variable as being on the left. The caller must also make sure that
567  * the other side of the clause is a non-null Const, and dissect that into
568  * a value and datatype. (This definition simplifies some callers that
569  * want to estimate against a computed value instead of a Const node.)
570  *
571  * This routine works for any datatype (or pair of datatypes) known to
572  * convert_to_scalar(). If it is applied to some other datatype,
573  * it will return an approximate estimate based on assuming that the constant
574  * value falls in the middle of the bin identified by binary search.
575  */
576 static double
577 scalarineqsel(PlannerInfo *root, Oid operator, bool isgt, bool iseq,
578  Oid collation,
579  VariableStatData *vardata, Datum constval, Oid consttype)
580 {
581  Form_pg_statistic stats;
582  FmgrInfo opproc;
583  double mcv_selec,
584  hist_selec,
585  sumcommon;
586  double selec;
587 
588  if (!HeapTupleIsValid(vardata->statsTuple))
589  {
590  /*
591  * No stats are available. Typically this means we have to fall back
592  * on the default estimate; but if the variable is CTID then we can
593  * make an estimate based on comparing the constant to the table size.
594  */
595  if (vardata->var && IsA(vardata->var, Var) &&
596  ((Var *) vardata->var)->varattno == SelfItemPointerAttributeNumber)
597  {
598  ItemPointer itemptr;
599  double block;
600  double density;
601 
602  /*
603  * If the relation's empty, we're going to include all of it.
604  * (This is mostly to avoid divide-by-zero below.)
605  */
606  if (vardata->rel->pages == 0)
607  return 1.0;
608 
609  itemptr = (ItemPointer) DatumGetPointer(constval);
610  block = ItemPointerGetBlockNumberNoCheck(itemptr);
611 
612  /*
613  * Determine the average number of tuples per page (density).
614  *
615  * Since the last page will, on average, be only half full, we can
616  * estimate it to have half as many tuples as earlier pages. So
617  * give it half the weight of a regular page.
618  */
619  density = vardata->rel->tuples / (vardata->rel->pages - 0.5);
620 
621  /* If target is the last page, use half the density. */
622  if (block >= vardata->rel->pages - 1)
623  density *= 0.5;
624 
625  /*
626  * Using the average tuples per page, calculate how far into the
627  * page the itemptr is likely to be and adjust block accordingly,
628  * by adding that fraction of a whole block (but never more than a
629  * whole block, no matter how high the itemptr's offset is). Here
630  * we are ignoring the possibility of dead-tuple line pointers,
631  * which is fairly bogus, but we lack the info to do better.
632  */
633  if (density > 0.0)
634  {
636 
637  block += Min(offset / density, 1.0);
638  }
639 
640  /*
641  * Convert relative block number to selectivity. Again, the last
642  * page has only half weight.
643  */
644  selec = block / (vardata->rel->pages - 0.5);
645 
646  /*
647  * The calculation so far gave us a selectivity for the "<=" case.
648  * We'll have one less tuple for "<" and one additional tuple for
649  * ">=", the latter of which we'll reverse the selectivity for
650  * below, so we can simply subtract one tuple for both cases. The
651  * cases that need this adjustment can be identified by iseq being
652  * equal to isgt.
653  */
654  if (iseq == isgt && vardata->rel->tuples >= 1.0)
655  selec -= (1.0 / vardata->rel->tuples);
656 
657  /* Finally, reverse the selectivity for the ">", ">=" cases. */
658  if (isgt)
659  selec = 1.0 - selec;
660 
661  CLAMP_PROBABILITY(selec);
662  return selec;
663  }
664 
665  /* no stats available, so default result */
666  return DEFAULT_INEQ_SEL;
667  }
668  stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
669 
670  fmgr_info(get_opcode(operator), &opproc);
671 
672  /*
673  * If we have most-common-values info, add up the fractions of the MCV
674  * entries that satisfy MCV OP CONST. These fractions contribute directly
675  * to the result selectivity. Also add up the total fraction represented
676  * by MCV entries.
677  */
678  mcv_selec = mcv_selectivity(vardata, &opproc, collation, constval, true,
679  &sumcommon);
680 
681  /*
682  * If there is a histogram, determine which bin the constant falls in, and
683  * compute the resulting contribution to selectivity.
684  */
685  hist_selec = ineq_histogram_selectivity(root, vardata,
686  operator, &opproc, isgt, iseq,
687  collation,
688  constval, consttype);
689 
690  /*
691  * Now merge the results from the MCV and histogram calculations,
692  * realizing that the histogram covers only the non-null values that are
693  * not listed in MCV.
694  */
695  selec = 1.0 - stats->stanullfrac - sumcommon;
696 
697  if (hist_selec >= 0.0)
698  selec *= hist_selec;
699  else
700  {
701  /*
702  * If no histogram but there are values not accounted for by MCV,
703  * arbitrarily assume half of them will match.
704  */
705  selec *= 0.5;
706  }
707 
708  selec += mcv_selec;
709 
710  /* result should be in range, but make sure... */
711  CLAMP_PROBABILITY(selec);
712 
713  return selec;
714 }
715 
716 /*
717  * mcv_selectivity - Examine the MCV list for selectivity estimates
718  *
719  * Determine the fraction of the variable's MCV population that satisfies
720  * the predicate (VAR OP CONST), or (CONST OP VAR) if !varonleft. Also
721  * compute the fraction of the total column population represented by the MCV
722  * list. This code will work for any boolean-returning predicate operator.
723  *
724  * The function result is the MCV selectivity, and the fraction of the
725  * total population is returned into *sumcommonp. Zeroes are returned
726  * if there is no MCV list.
727  */
728 double
729 mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc, Oid collation,
730  Datum constval, bool varonleft,
731  double *sumcommonp)
732 {
733  double mcv_selec,
734  sumcommon;
735  AttStatsSlot sslot;
736  int i;
737 
738  mcv_selec = 0.0;
739  sumcommon = 0.0;
740 
741  if (HeapTupleIsValid(vardata->statsTuple) &&
742  statistic_proc_security_check(vardata, opproc->fn_oid) &&
743  get_attstatsslot(&sslot, vardata->statsTuple,
744  STATISTIC_KIND_MCV, InvalidOid,
746  {
747  LOCAL_FCINFO(fcinfo, 2);
748 
749  /*
750  * We invoke the opproc "by hand" so that we won't fail on NULL
751  * results. Such cases won't arise for normal comparison functions,
752  * but generic_restriction_selectivity could perhaps be used with
753  * operators that can return NULL. A small side benefit is to not
754  * need to re-initialize the fcinfo struct from scratch each time.
755  */
756  InitFunctionCallInfoData(*fcinfo, opproc, 2, collation,
757  NULL, NULL);
758  fcinfo->args[0].isnull = false;
759  fcinfo->args[1].isnull = false;
760  /* be careful to apply operator right way 'round */
761  if (varonleft)
762  fcinfo->args[1].value = constval;
763  else
764  fcinfo->args[0].value = constval;
765 
766  for (i = 0; i < sslot.nvalues; i++)
767  {
768  Datum fresult;
769 
770  if (varonleft)
771  fcinfo->args[0].value = sslot.values[i];
772  else
773  fcinfo->args[1].value = sslot.values[i];
774  fcinfo->isnull = false;
775  fresult = FunctionCallInvoke(fcinfo);
776  if (!fcinfo->isnull && DatumGetBool(fresult))
777  mcv_selec += sslot.numbers[i];
778  sumcommon += sslot.numbers[i];
779  }
780  free_attstatsslot(&sslot);
781  }
782 
783  *sumcommonp = sumcommon;
784  return mcv_selec;
785 }
786 
787 /*
788  * histogram_selectivity - Examine the histogram for selectivity estimates
789  *
790  * Determine the fraction of the variable's histogram entries that satisfy
791  * the predicate (VAR OP CONST), or (CONST OP VAR) if !varonleft.
792  *
793  * This code will work for any boolean-returning predicate operator, whether
794  * or not it has anything to do with the histogram sort operator. We are
795  * essentially using the histogram just as a representative sample. However,
796  * small histograms are unlikely to be all that representative, so the caller
797  * should be prepared to fall back on some other estimation approach when the
798  * histogram is missing or very small. It may also be prudent to combine this
799  * approach with another one when the histogram is small.
800  *
801  * If the actual histogram size is not at least min_hist_size, we won't bother
802  * to do the calculation at all. Also, if the n_skip parameter is > 0, we
803  * ignore the first and last n_skip histogram elements, on the grounds that
804  * they are outliers and hence not very representative. Typical values for
805  * these parameters are 10 and 1.
806  *
807  * The function result is the selectivity, or -1 if there is no histogram
808  * or it's smaller than min_hist_size.
809  *
810  * The output parameter *hist_size receives the actual histogram size,
811  * or zero if no histogram. Callers may use this number to decide how
812  * much faith to put in the function result.
813  *
814  * Note that the result disregards both the most-common-values (if any) and
815  * null entries. The caller is expected to combine this result with
816  * statistics for those portions of the column population. It may also be
817  * prudent to clamp the result range, ie, disbelieve exact 0 or 1 outputs.
818  */
819 double
821  FmgrInfo *opproc, Oid collation,
822  Datum constval, bool varonleft,
823  int min_hist_size, int n_skip,
824  int *hist_size)
825 {
826  double result;
827  AttStatsSlot sslot;
828 
829  /* check sanity of parameters */
830  Assert(n_skip >= 0);
831  Assert(min_hist_size > 2 * n_skip);
832 
833  if (HeapTupleIsValid(vardata->statsTuple) &&
834  statistic_proc_security_check(vardata, opproc->fn_oid) &&
835  get_attstatsslot(&sslot, vardata->statsTuple,
836  STATISTIC_KIND_HISTOGRAM, InvalidOid,
838  {
839  *hist_size = sslot.nvalues;
840  if (sslot.nvalues >= min_hist_size)
841  {
842  LOCAL_FCINFO(fcinfo, 2);
843  int nmatch = 0;
844  int i;
845 
846  /*
847  * We invoke the opproc "by hand" so that we won't fail on NULL
848  * results. Such cases won't arise for normal comparison
849  * functions, but generic_restriction_selectivity could perhaps be
850  * used with operators that can return NULL. A small side benefit
851  * is to not need to re-initialize the fcinfo struct from scratch
852  * each time.
853  */
854  InitFunctionCallInfoData(*fcinfo, opproc, 2, collation,
855  NULL, NULL);
856  fcinfo->args[0].isnull = false;
857  fcinfo->args[1].isnull = false;
858  /* be careful to apply operator right way 'round */
859  if (varonleft)
860  fcinfo->args[1].value = constval;
861  else
862  fcinfo->args[0].value = constval;
863 
864  for (i = n_skip; i < sslot.nvalues - n_skip; i++)
865  {
866  Datum fresult;
867 
868  if (varonleft)
869  fcinfo->args[0].value = sslot.values[i];
870  else
871  fcinfo->args[1].value = sslot.values[i];
872  fcinfo->isnull = false;
873  fresult = FunctionCallInvoke(fcinfo);
874  if (!fcinfo->isnull && DatumGetBool(fresult))
875  nmatch++;
876  }
877  result = ((double) nmatch) / ((double) (sslot.nvalues - 2 * n_skip));
878  }
879  else
880  result = -1;
881  free_attstatsslot(&sslot);
882  }
883  else
884  {
885  *hist_size = 0;
886  result = -1;
887  }
888 
889  return result;
890 }
891 
892 /*
893  * generic_restriction_selectivity - Selectivity for almost anything
894  *
895  * This function estimates selectivity for operators that we don't have any
896  * special knowledge about, but are on data types that we collect standard
897  * MCV and/or histogram statistics for. (Additional assumptions are that
898  * the operator is strict and immutable, or at least stable.)
899  *
900  * If we have "VAR OP CONST" or "CONST OP VAR", selectivity is estimated by
901  * applying the operator to each element of the column's MCV and/or histogram
902  * stats, and merging the results using the assumption that the histogram is
903  * a reasonable random sample of the column's non-MCV population. Note that
904  * if the operator's semantics are related to the histogram ordering, this
905  * might not be such a great assumption; other functions such as
906  * scalarineqsel() are probably a better match in such cases.
907  *
908  * Otherwise, fall back to the default selectivity provided by the caller.
909  */
910 double
912  List *args, int varRelid,
913  double default_selectivity)
914 {
915  double selec;
916  VariableStatData vardata;
917  Node *other;
918  bool varonleft;
919 
920  /*
921  * If expression is not variable OP something or something OP variable,
922  * then punt and return the default estimate.
923  */
924  if (!get_restriction_variable(root, args, varRelid,
925  &vardata, &other, &varonleft))
926  return default_selectivity;
927 
928  /*
929  * If the something is a NULL constant, assume operator is strict and
930  * return zero, ie, operator will never return TRUE.
931  */
932  if (IsA(other, Const) &&
933  ((Const *) other)->constisnull)
934  {
935  ReleaseVariableStats(vardata);
936  return 0.0;
937  }
938 
939  if (IsA(other, Const))
940  {
941  /* Variable is being compared to a known non-null constant */
942  Datum constval = ((Const *) other)->constvalue;
943  FmgrInfo opproc;
944  double mcvsum;
945  double mcvsel;
946  double nullfrac;
947  int hist_size;
948 
949  fmgr_info(get_opcode(oproid), &opproc);
950 
951  /*
952  * Calculate the selectivity for the column's most common values.
953  */
954  mcvsel = mcv_selectivity(&vardata, &opproc, collation,
955  constval, varonleft,
956  &mcvsum);
957 
958  /*
959  * If the histogram is large enough, see what fraction of it matches
960  * the query, and assume that's representative of the non-MCV
961  * population. Otherwise use the default selectivity for the non-MCV
962  * population.
963  */
964  selec = histogram_selectivity(&vardata, &opproc, collation,
965  constval, varonleft,
966  10, 1, &hist_size);
967  if (selec < 0)
968  {
969  /* Nope, fall back on default */
970  selec = default_selectivity;
971  }
972  else if (hist_size < 100)
973  {
974  /*
975  * For histogram sizes from 10 to 100, we combine the histogram
976  * and default selectivities, putting increasingly more trust in
977  * the histogram for larger sizes.
978  */
979  double hist_weight = hist_size / 100.0;
980 
981  selec = selec * hist_weight +
982  default_selectivity * (1.0 - hist_weight);
983  }
984 
985  /* In any case, don't believe extremely small or large estimates. */
986  if (selec < 0.0001)
987  selec = 0.0001;
988  else if (selec > 0.9999)
989  selec = 0.9999;
990 
991  /* Don't forget to account for nulls. */
992  if (HeapTupleIsValid(vardata.statsTuple))
993  nullfrac = ((Form_pg_statistic) GETSTRUCT(vardata.statsTuple))->stanullfrac;
994  else
995  nullfrac = 0.0;
996 
997  /*
998  * Now merge the results from the MCV and histogram calculations,
999  * realizing that the histogram covers only the non-null values that
1000  * are not listed in MCV.
1001  */
1002  selec *= 1.0 - nullfrac - mcvsum;
1003  selec += mcvsel;
1004  }
1005  else
1006  {
1007  /* Comparison value is not constant, so we can't do anything */
1008  selec = default_selectivity;
1009  }
1010 
1011  ReleaseVariableStats(vardata);
1012 
1013  /* result should be in range, but make sure... */
1014  CLAMP_PROBABILITY(selec);
1015 
1016  return selec;
1017 }
1018 
1019 /*
1020  * ineq_histogram_selectivity - Examine the histogram for scalarineqsel
1021  *
1022  * Determine the fraction of the variable's histogram population that
1023  * satisfies the inequality condition, ie, VAR < (or <=, >, >=) CONST.
1024  * The isgt and iseq flags distinguish which of the four cases apply.
1025  *
1026  * While opproc could be looked up from the operator OID, common callers
1027  * also need to call it separately, so we make the caller pass both.
1028  *
1029  * Returns -1 if there is no histogram (valid results will always be >= 0).
1030  *
1031  * Note that the result disregards both the most-common-values (if any) and
1032  * null entries. The caller is expected to combine this result with
1033  * statistics for those portions of the column population.
1034  *
1035  * This is exported so that some other estimation functions can use it.
1036  */
1037 double
1039  VariableStatData *vardata,
1040  Oid opoid, FmgrInfo *opproc, bool isgt, bool iseq,
1041  Oid collation,
1042  Datum constval, Oid consttype)
1043 {
1044  double hist_selec;
1045  AttStatsSlot sslot;
1046 
1047  hist_selec = -1.0;
1048 
1049  /*
1050  * Someday, ANALYZE might store more than one histogram per rel/att,
1051  * corresponding to more than one possible sort ordering defined for the
1052  * column type. Right now, we know there is only one, so just grab it and
1053  * see if it matches the query.
1054  *
1055  * Note that we can't use opoid as search argument; the staop appearing in
1056  * pg_statistic will be for the relevant '<' operator, but what we have
1057  * might be some other inequality operator such as '>='. (Even if opoid
1058  * is a '<' operator, it could be cross-type.) Hence we must use
1059  * comparison_ops_are_compatible() to see if the operators match.
1060  */
1061  if (HeapTupleIsValid(vardata->statsTuple) &&
1062  statistic_proc_security_check(vardata, opproc->fn_oid) &&
1063  get_attstatsslot(&sslot, vardata->statsTuple,
1064  STATISTIC_KIND_HISTOGRAM, InvalidOid,
1066  {
1067  if (sslot.nvalues > 1 &&
1068  sslot.stacoll == collation &&
1069  comparison_ops_are_compatible(sslot.staop, opoid))
1070  {
1071  /*
1072  * Use binary search to find the desired location, namely the
1073  * right end of the histogram bin containing the comparison value,
1074  * which is the leftmost entry for which the comparison operator
1075  * succeeds (if isgt) or fails (if !isgt).
1076  *
1077  * In this loop, we pay no attention to whether the operator iseq
1078  * or not; that detail will be mopped up below. (We cannot tell,
1079  * anyway, whether the operator thinks the values are equal.)
1080  *
1081  * If the binary search accesses the first or last histogram
1082  * entry, we try to replace that endpoint with the true column min
1083  * or max as found by get_actual_variable_range(). This
1084  * ameliorates misestimates when the min or max is moving as a
1085  * result of changes since the last ANALYZE. Note that this could
1086  * result in effectively including MCVs into the histogram that
1087  * weren't there before, but we don't try to correct for that.
1088  */
1089  double histfrac;
1090  int lobound = 0; /* first possible slot to search */
1091  int hibound = sslot.nvalues; /* last+1 slot to search */
1092  bool have_end = false;
1093 
1094  /*
1095  * If there are only two histogram entries, we'll want up-to-date
1096  * values for both. (If there are more than two, we need at most
1097  * one of them to be updated, so we deal with that within the
1098  * loop.)
1099  */
1100  if (sslot.nvalues == 2)
1101  have_end = get_actual_variable_range(root,
1102  vardata,
1103  sslot.staop,
1104  collation,
1105  &sslot.values[0],
1106  &sslot.values[1]);
1107 
1108  while (lobound < hibound)
1109  {
1110  int probe = (lobound + hibound) / 2;
1111  bool ltcmp;
1112 
1113  /*
1114  * If we find ourselves about to compare to the first or last
1115  * histogram entry, first try to replace it with the actual
1116  * current min or max (unless we already did so above).
1117  */
1118  if (probe == 0 && sslot.nvalues > 2)
1119  have_end = get_actual_variable_range(root,
1120  vardata,
1121  sslot.staop,
1122  collation,
1123  &sslot.values[0],
1124  NULL);
1125  else if (probe == sslot.nvalues - 1 && sslot.nvalues > 2)
1126  have_end = get_actual_variable_range(root,
1127  vardata,
1128  sslot.staop,
1129  collation,
1130  NULL,
1131  &sslot.values[probe]);
1132 
1133  ltcmp = DatumGetBool(FunctionCall2Coll(opproc,
1134  collation,
1135  sslot.values[probe],
1136  constval));
1137  if (isgt)
1138  ltcmp = !ltcmp;
1139  if (ltcmp)
1140  lobound = probe + 1;
1141  else
1142  hibound = probe;
1143  }
1144 
1145  if (lobound <= 0)
1146  {
1147  /*
1148  * Constant is below lower histogram boundary. More
1149  * precisely, we have found that no entry in the histogram
1150  * satisfies the inequality clause (if !isgt) or they all do
1151  * (if isgt). We estimate that that's true of the entire
1152  * table, so set histfrac to 0.0 (which we'll flip to 1.0
1153  * below, if isgt).
1154  */
1155  histfrac = 0.0;
1156  }
1157  else if (lobound >= sslot.nvalues)
1158  {
1159  /*
1160  * Inverse case: constant is above upper histogram boundary.
1161  */
1162  histfrac = 1.0;
1163  }
1164  else
1165  {
1166  /* We have values[i-1] <= constant <= values[i]. */
1167  int i = lobound;
1168  double eq_selec = 0;
1169  double val,
1170  high,
1171  low;
1172  double binfrac;
1173 
1174  /*
1175  * In the cases where we'll need it below, obtain an estimate
1176  * of the selectivity of "x = constval". We use a calculation
1177  * similar to what var_eq_const() does for a non-MCV constant,
1178  * ie, estimate that all distinct non-MCV values occur equally
1179  * often. But multiplication by "1.0 - sumcommon - nullfrac"
1180  * will be done by our caller, so we shouldn't do that here.
1181  * Therefore we can't try to clamp the estimate by reference
1182  * to the least common MCV; the result would be too small.
1183  *
1184  * Note: since this is effectively assuming that constval
1185  * isn't an MCV, it's logically dubious if constval in fact is
1186  * one. But we have to apply *some* correction for equality,
1187  * and anyway we cannot tell if constval is an MCV, since we
1188  * don't have a suitable equality operator at hand.
1189  */
1190  if (i == 1 || isgt == iseq)
1191  {
1192  double otherdistinct;
1193  bool isdefault;
1194  AttStatsSlot mcvslot;
1195 
1196  /* Get estimated number of distinct values */
1197  otherdistinct = get_variable_numdistinct(vardata,
1198  &isdefault);
1199 
1200  /* Subtract off the number of known MCVs */
1201  if (get_attstatsslot(&mcvslot, vardata->statsTuple,
1202  STATISTIC_KIND_MCV, InvalidOid,
1204  {
1205  otherdistinct -= mcvslot.nnumbers;
1206  free_attstatsslot(&mcvslot);
1207  }
1208 
1209  /* If result doesn't seem sane, leave eq_selec at 0 */
1210  if (otherdistinct > 1)
1211  eq_selec = 1.0 / otherdistinct;
1212  }
1213 
1214  /*
1215  * Convert the constant and the two nearest bin boundary
1216  * values to a uniform comparison scale, and do a linear
1217  * interpolation within this bin.
1218  */
1219  if (convert_to_scalar(constval, consttype, collation,
1220  &val,
1221  sslot.values[i - 1], sslot.values[i],
1222  vardata->vartype,
1223  &low, &high))
1224  {
1225  if (high <= low)
1226  {
1227  /* cope if bin boundaries appear identical */
1228  binfrac = 0.5;
1229  }
1230  else if (val <= low)
1231  binfrac = 0.0;
1232  else if (val >= high)
1233  binfrac = 1.0;
1234  else
1235  {
1236  binfrac = (val - low) / (high - low);
1237 
1238  /*
1239  * Watch out for the possibility that we got a NaN or
1240  * Infinity from the division. This can happen
1241  * despite the previous checks, if for example "low"
1242  * is -Infinity.
1243  */
1244  if (isnan(binfrac) ||
1245  binfrac < 0.0 || binfrac > 1.0)
1246  binfrac = 0.5;
1247  }
1248  }
1249  else
1250  {
1251  /*
1252  * Ideally we'd produce an error here, on the grounds that
1253  * the given operator shouldn't have scalarXXsel
1254  * registered as its selectivity func unless we can deal
1255  * with its operand types. But currently, all manner of
1256  * stuff is invoking scalarXXsel, so give a default
1257  * estimate until that can be fixed.
1258  */
1259  binfrac = 0.5;
1260  }
1261 
1262  /*
1263  * Now, compute the overall selectivity across the values
1264  * represented by the histogram. We have i-1 full bins and
1265  * binfrac partial bin below the constant.
1266  */
1267  histfrac = (double) (i - 1) + binfrac;
1268  histfrac /= (double) (sslot.nvalues - 1);
1269 
1270  /*
1271  * At this point, histfrac is an estimate of the fraction of
1272  * the population represented by the histogram that satisfies
1273  * "x <= constval". Somewhat remarkably, this statement is
1274  * true regardless of which operator we were doing the probes
1275  * with, so long as convert_to_scalar() delivers reasonable
1276  * results. If the probe constant is equal to some histogram
1277  * entry, we would have considered the bin to the left of that
1278  * entry if probing with "<" or ">=", or the bin to the right
1279  * if probing with "<=" or ">"; but binfrac would have come
1280  * out as 1.0 in the first case and 0.0 in the second, leading
1281  * to the same histfrac in either case. For probe constants
1282  * between histogram entries, we find the same bin and get the
1283  * same estimate with any operator.
1284  *
1285  * The fact that the estimate corresponds to "x <= constval"
1286  * and not "x < constval" is because of the way that ANALYZE
1287  * constructs the histogram: each entry is, effectively, the
1288  * rightmost value in its sample bucket. So selectivity
1289  * values that are exact multiples of 1/(histogram_size-1)
1290  * should be understood as estimates including a histogram
1291  * entry plus everything to its left.
1292  *
1293  * However, that breaks down for the first histogram entry,
1294  * which necessarily is the leftmost value in its sample
1295  * bucket. That means the first histogram bin is slightly
1296  * narrower than the rest, by an amount equal to eq_selec.
1297  * Another way to say that is that we want "x <= leftmost" to
1298  * be estimated as eq_selec not zero. So, if we're dealing
1299  * with the first bin (i==1), rescale to make that true while
1300  * adjusting the rest of that bin linearly.
1301  */
1302  if (i == 1)
1303  histfrac += eq_selec * (1.0 - binfrac);
1304 
1305  /*
1306  * "x <= constval" is good if we want an estimate for "<=" or
1307  * ">", but if we are estimating for "<" or ">=", we now need
1308  * to decrease the estimate by eq_selec.
1309  */
1310  if (isgt == iseq)
1311  histfrac -= eq_selec;
1312  }
1313 
1314  /*
1315  * Now the estimate is finished for "<" and "<=" cases. If we are
1316  * estimating for ">" or ">=", flip it.
1317  */
1318  hist_selec = isgt ? (1.0 - histfrac) : histfrac;
1319 
1320  /*
1321  * The histogram boundaries are only approximate to begin with,
1322  * and may well be out of date anyway. Therefore, don't believe
1323  * extremely small or large selectivity estimates --- unless we
1324  * got actual current endpoint values from the table, in which
1325  * case just do the usual sanity clamp. Somewhat arbitrarily, we
1326  * set the cutoff for other cases at a hundredth of the histogram
1327  * resolution.
1328  */
1329  if (have_end)
1330  CLAMP_PROBABILITY(hist_selec);
1331  else
1332  {
1333  double cutoff = 0.01 / (double) (sslot.nvalues - 1);
1334 
1335  if (hist_selec < cutoff)
1336  hist_selec = cutoff;
1337  else if (hist_selec > 1.0 - cutoff)
1338  hist_selec = 1.0 - cutoff;
1339  }
1340  }
1341  else if (sslot.nvalues > 1)
1342  {
1343  /*
1344  * If we get here, we have a histogram but it's not sorted the way
1345  * we want. Do a brute-force search to see how many of the
1346  * entries satisfy the comparison condition, and take that
1347  * fraction as our estimate. (This is identical to the inner loop
1348  * of histogram_selectivity; maybe share code?)
1349  */
1350  LOCAL_FCINFO(fcinfo, 2);
1351  int nmatch = 0;
1352 
1353  InitFunctionCallInfoData(*fcinfo, opproc, 2, collation,
1354  NULL, NULL);
1355  fcinfo->args[0].isnull = false;
1356  fcinfo->args[1].isnull = false;
1357  fcinfo->args[1].value = constval;
1358  for (int i = 0; i < sslot.nvalues; i++)
1359  {
1360  Datum fresult;
1361 
1362  fcinfo->args[0].value = sslot.values[i];
1363  fcinfo->isnull = false;
1364  fresult = FunctionCallInvoke(fcinfo);
1365  if (!fcinfo->isnull && DatumGetBool(fresult))
1366  nmatch++;
1367  }
1368  hist_selec = ((double) nmatch) / ((double) sslot.nvalues);
1369 
1370  /*
1371  * As above, clamp to a hundredth of the histogram resolution.
1372  * This case is surely even less trustworthy than the normal one,
1373  * so we shouldn't believe exact 0 or 1 selectivity. (Maybe the
1374  * clamp should be more restrictive in this case?)
1375  */
1376  {
1377  double cutoff = 0.01 / (double) (sslot.nvalues - 1);
1378 
1379  if (hist_selec < cutoff)
1380  hist_selec = cutoff;
1381  else if (hist_selec > 1.0 - cutoff)
1382  hist_selec = 1.0 - cutoff;
1383  }
1384  }
1385 
1386  free_attstatsslot(&sslot);
1387  }
1388 
1389  return hist_selec;
1390 }
1391 
1392 /*
1393  * Common wrapper function for the selectivity estimators that simply
1394  * invoke scalarineqsel().
1395  */
1396 static Datum
1398 {
1399  PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
1400  Oid operator = PG_GETARG_OID(1);
1401  List *args = (List *) PG_GETARG_POINTER(2);
1402  int varRelid = PG_GETARG_INT32(3);
1403  Oid collation = PG_GET_COLLATION();
1404  VariableStatData vardata;
1405  Node *other;
1406  bool varonleft;
1407  Datum constval;
1408  Oid consttype;
1409  double selec;
1410 
1411  /*
1412  * If expression is not variable op something or something op variable,
1413  * then punt and return a default estimate.
1414  */
1415  if (!get_restriction_variable(root, args, varRelid,
1416  &vardata, &other, &varonleft))
1418 
1419  /*
1420  * Can't do anything useful if the something is not a constant, either.
1421  */
1422  if (!IsA(other, Const))
1423  {
1424  ReleaseVariableStats(vardata);
1426  }
1427 
1428  /*
1429  * If the constant is NULL, assume operator is strict and return zero, ie,
1430  * operator will never return TRUE.
1431  */
1432  if (((Const *) other)->constisnull)
1433  {
1434  ReleaseVariableStats(vardata);
1435  PG_RETURN_FLOAT8(0.0);
1436  }
1437  constval = ((Const *) other)->constvalue;
1438  consttype = ((Const *) other)->consttype;
1439 
1440  /*
1441  * Force the var to be on the left to simplify logic in scalarineqsel.
1442  */
1443  if (!varonleft)
1444  {
1445  operator = get_commutator(operator);
1446  if (!operator)
1447  {
1448  /* Use default selectivity (should we raise an error instead?) */
1449  ReleaseVariableStats(vardata);
1451  }
1452  isgt = !isgt;
1453  }
1454 
1455  /* The rest of the work is done by scalarineqsel(). */
1456  selec = scalarineqsel(root, operator, isgt, iseq, collation,
1457  &vardata, constval, consttype);
1458 
1459  ReleaseVariableStats(vardata);
1460 
1461  PG_RETURN_FLOAT8((float8) selec);
1462 }
1463 
1464 /*
1465  * scalarltsel - Selectivity of "<" for scalars.
1466  */
1467 Datum
1469 {
1470  return scalarineqsel_wrapper(fcinfo, false, false);
1471 }
1472 
1473 /*
1474  * scalarlesel - Selectivity of "<=" for scalars.
1475  */
1476 Datum
1478 {
1479  return scalarineqsel_wrapper(fcinfo, false, true);
1480 }
1481 
1482 /*
1483  * scalargtsel - Selectivity of ">" for scalars.
1484  */
1485 Datum
1487 {
1488  return scalarineqsel_wrapper(fcinfo, true, false);
1489 }
1490 
1491 /*
1492  * scalargesel - Selectivity of ">=" for scalars.
1493  */
1494 Datum
1496 {
1497  return scalarineqsel_wrapper(fcinfo, true, true);
1498 }
1499 
1500 /*
1501  * boolvarsel - Selectivity of Boolean variable.
1502  *
1503  * This can actually be called on any boolean-valued expression. If it
1504  * involves only Vars of the specified relation, and if there are statistics
1505  * about the Var or expression (the latter is possible if it's indexed) then
1506  * we'll produce a real estimate; otherwise it's just a default.
1507  */
1509 boolvarsel(PlannerInfo *root, Node *arg, int varRelid)
1510 {
1511  VariableStatData vardata;
1512  double selec;
1513 
1514  examine_variable(root, arg, varRelid, &vardata);
1515  if (HeapTupleIsValid(vardata.statsTuple))
1516  {
1517  /*
1518  * A boolean variable V is equivalent to the clause V = 't', so we
1519  * compute the selectivity as if that is what we have.
1520  */
1521  selec = var_eq_const(&vardata, BooleanEqualOperator, InvalidOid,
1522  BoolGetDatum(true), false, true, false);
1523  }
1524  else
1525  {
1526  /* Otherwise, the default estimate is 0.5 */
1527  selec = 0.5;
1528  }
1529  ReleaseVariableStats(vardata);
1530  return selec;
1531 }
1532 
1533 /*
1534  * booltestsel - Selectivity of BooleanTest Node.
1535  */
1538  int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
1539 {
1540  VariableStatData vardata;
1541  double selec;
1542 
1543  examine_variable(root, arg, varRelid, &vardata);
1544 
1545  if (HeapTupleIsValid(vardata.statsTuple))
1546  {
1547  Form_pg_statistic stats;
1548  double freq_null;
1549  AttStatsSlot sslot;
1550 
1551  stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
1552  freq_null = stats->stanullfrac;
1553 
1554  if (get_attstatsslot(&sslot, vardata.statsTuple,
1555  STATISTIC_KIND_MCV, InvalidOid,
1557  && sslot.nnumbers > 0)
1558  {
1559  double freq_true;
1560  double freq_false;
1561 
1562  /*
1563  * Get first MCV frequency and derive frequency for true.
1564  */
1565  if (DatumGetBool(sslot.values[0]))
1566  freq_true = sslot.numbers[0];
1567  else
1568  freq_true = 1.0 - sslot.numbers[0] - freq_null;
1569 
1570  /*
1571  * Next derive frequency for false. Then use these as appropriate
1572  * to derive frequency for each case.
1573  */
1574  freq_false = 1.0 - freq_true - freq_null;
1575 
1576  switch (booltesttype)
1577  {
1578  case IS_UNKNOWN:
1579  /* select only NULL values */
1580  selec = freq_null;
1581  break;
1582  case IS_NOT_UNKNOWN:
1583  /* select non-NULL values */
1584  selec = 1.0 - freq_null;
1585  break;
1586  case IS_TRUE:
1587  /* select only TRUE values */
1588  selec = freq_true;
1589  break;
1590  case IS_NOT_TRUE:
1591  /* select non-TRUE values */
1592  selec = 1.0 - freq_true;
1593  break;
1594  case IS_FALSE:
1595  /* select only FALSE values */
1596  selec = freq_false;
1597  break;
1598  case IS_NOT_FALSE:
1599  /* select non-FALSE values */
1600  selec = 1.0 - freq_false;
1601  break;
1602  default:
1603  elog(ERROR, "unrecognized booltesttype: %d",
1604  (int) booltesttype);
1605  selec = 0.0; /* Keep compiler quiet */
1606  break;
1607  }
1608 
1609  free_attstatsslot(&sslot);
1610  }
1611  else
1612  {
1613  /*
1614  * No most-common-value info available. Still have null fraction
1615  * information, so use it for IS [NOT] UNKNOWN. Otherwise adjust
1616  * for null fraction and assume a 50-50 split of TRUE and FALSE.
1617  */
1618  switch (booltesttype)
1619  {
1620  case IS_UNKNOWN:
1621  /* select only NULL values */
1622  selec = freq_null;
1623  break;
1624  case IS_NOT_UNKNOWN:
1625  /* select non-NULL values */
1626  selec = 1.0 - freq_null;
1627  break;
1628  case IS_TRUE:
1629  case IS_FALSE:
1630  /* Assume we select half of the non-NULL values */
1631  selec = (1.0 - freq_null) / 2.0;
1632  break;
1633  case IS_NOT_TRUE:
1634  case IS_NOT_FALSE:
1635  /* Assume we select NULLs plus half of the non-NULLs */
1636  /* equiv. to freq_null + (1.0 - freq_null) / 2.0 */
1637  selec = (freq_null + 1.0) / 2.0;
1638  break;
1639  default:
1640  elog(ERROR, "unrecognized booltesttype: %d",
1641  (int) booltesttype);
1642  selec = 0.0; /* Keep compiler quiet */
1643  break;
1644  }
1645  }
1646  }
1647  else
1648  {
1649  /*
1650  * If we can't get variable statistics for the argument, perhaps
1651  * clause_selectivity can do something with it. We ignore the
1652  * possibility of a NULL value when using clause_selectivity, and just
1653  * assume the value is either TRUE or FALSE.
1654  */
1655  switch (booltesttype)
1656  {
1657  case IS_UNKNOWN:
1658  selec = DEFAULT_UNK_SEL;
1659  break;
1660  case IS_NOT_UNKNOWN:
1661  selec = DEFAULT_NOT_UNK_SEL;
1662  break;
1663  case IS_TRUE:
1664  case IS_NOT_FALSE:
1665  selec = (double) clause_selectivity(root, arg,
1666  varRelid,
1667  jointype, sjinfo);
1668  break;
1669  case IS_FALSE:
1670  case IS_NOT_TRUE:
1671  selec = 1.0 - (double) clause_selectivity(root, arg,
1672  varRelid,
1673  jointype, sjinfo);
1674  break;
1675  default:
1676  elog(ERROR, "unrecognized booltesttype: %d",
1677  (int) booltesttype);
1678  selec = 0.0; /* Keep compiler quiet */
1679  break;
1680  }
1681  }
1682 
1683  ReleaseVariableStats(vardata);
1684 
1685  /* result should be in range, but make sure... */
1686  CLAMP_PROBABILITY(selec);
1687 
1688  return (Selectivity) selec;
1689 }
1690 
1691 /*
1692  * nulltestsel - Selectivity of NullTest Node.
1693  */
1696  int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
1697 {
1698  VariableStatData vardata;
1699  double selec;
1700 
1701  examine_variable(root, arg, varRelid, &vardata);
1702 
1703  if (HeapTupleIsValid(vardata.statsTuple))
1704  {
1705  Form_pg_statistic stats;
1706  double freq_null;
1707 
1708  stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
1709  freq_null = stats->stanullfrac;
1710 
1711  switch (nulltesttype)
1712  {
1713  case IS_NULL:
1714 
1715  /*
1716  * Use freq_null directly.
1717  */
1718  selec = freq_null;
1719  break;
1720  case IS_NOT_NULL:
1721 
1722  /*
1723  * Select not unknown (not null) values. Calculate from
1724  * freq_null.
1725  */
1726  selec = 1.0 - freq_null;
1727  break;
1728  default:
1729  elog(ERROR, "unrecognized nulltesttype: %d",
1730  (int) nulltesttype);
1731  return (Selectivity) 0; /* keep compiler quiet */
1732  }
1733  }
1734  else if (vardata.var && IsA(vardata.var, Var) &&
1735  ((Var *) vardata.var)->varattno < 0)
1736  {
1737  /*
1738  * There are no stats for system columns, but we know they are never
1739  * NULL.
1740  */
1741  selec = (nulltesttype == IS_NULL) ? 0.0 : 1.0;
1742  }
1743  else
1744  {
1745  /*
1746  * No ANALYZE stats available, so make a guess
1747  */
1748  switch (nulltesttype)
1749  {
1750  case IS_NULL:
1751  selec = DEFAULT_UNK_SEL;
1752  break;
1753  case IS_NOT_NULL:
1754  selec = DEFAULT_NOT_UNK_SEL;
1755  break;
1756  default:
1757  elog(ERROR, "unrecognized nulltesttype: %d",
1758  (int) nulltesttype);
1759  return (Selectivity) 0; /* keep compiler quiet */
1760  }
1761  }
1762 
1763  ReleaseVariableStats(vardata);
1764 
1765  /* result should be in range, but make sure... */
1766  CLAMP_PROBABILITY(selec);
1767 
1768  return (Selectivity) selec;
1769 }
1770 
1771 /*
1772  * strip_array_coercion - strip binary-compatible relabeling from an array expr
1773  *
1774  * For array values, the parser normally generates ArrayCoerceExpr conversions,
1775  * but it seems possible that RelabelType might show up. Also, the planner
1776  * is not currently tense about collapsing stacked ArrayCoerceExpr nodes,
1777  * so we need to be ready to deal with more than one level.
1778  */
1779 static Node *
1781 {
1782  for (;;)
1783  {
1784  if (node && IsA(node, ArrayCoerceExpr))
1785  {
1786  ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
1787 
1788  /*
1789  * If the per-element expression is just a RelabelType on top of
1790  * CaseTestExpr, then we know it's a binary-compatible relabeling.
1791  */
1792  if (IsA(acoerce->elemexpr, RelabelType) &&
1793  IsA(((RelabelType *) acoerce->elemexpr)->arg, CaseTestExpr))
1794  node = (Node *) acoerce->arg;
1795  else
1796  break;
1797  }
1798  else if (node && IsA(node, RelabelType))
1799  {
1800  /* We don't really expect this case, but may as well cope */
1801  node = (Node *) ((RelabelType *) node)->arg;
1802  }
1803  else
1804  break;
1805  }
1806  return node;
1807 }
1808 
1809 /*
1810  * scalararraysel - Selectivity of ScalarArrayOpExpr Node.
1811  */
1814  ScalarArrayOpExpr *clause,
1815  bool is_join_clause,
1816  int varRelid,
1817  JoinType jointype,
1818  SpecialJoinInfo *sjinfo)
1819 {
1820  Oid operator = clause->opno;
1821  bool useOr = clause->useOr;
1822  bool isEquality = false;
1823  bool isInequality = false;
1824  Node *leftop;
1825  Node *rightop;
1826  Oid nominal_element_type;
1827  Oid nominal_element_collation;
1828  TypeCacheEntry *typentry;
1829  RegProcedure oprsel;
1830  FmgrInfo oprselproc;
1831  Selectivity s1;
1832  Selectivity s1disjoint;
1833 
1834  /* First, deconstruct the expression */
1835  Assert(list_length(clause->args) == 2);
1836  leftop = (Node *) linitial(clause->args);
1837  rightop = (Node *) lsecond(clause->args);
1838 
1839  /* aggressively reduce both sides to constants */
1840  leftop = estimate_expression_value(root, leftop);
1841  rightop = estimate_expression_value(root, rightop);
1842 
1843  /* get nominal (after relabeling) element type of rightop */
1844  nominal_element_type = get_base_element_type(exprType(rightop));
1845  if (!OidIsValid(nominal_element_type))
1846  return (Selectivity) 0.5; /* probably shouldn't happen */
1847  /* get nominal collation, too, for generating constants */
1848  nominal_element_collation = exprCollation(rightop);
1849 
1850  /* look through any binary-compatible relabeling of rightop */
1851  rightop = strip_array_coercion(rightop);
1852 
1853  /*
1854  * Detect whether the operator is the default equality or inequality
1855  * operator of the array element type.
1856  */
1857  typentry = lookup_type_cache(nominal_element_type, TYPECACHE_EQ_OPR);
1858  if (OidIsValid(typentry->eq_opr))
1859  {
1860  if (operator == typentry->eq_opr)
1861  isEquality = true;
1862  else if (get_negator(operator) == typentry->eq_opr)
1863  isInequality = true;
1864  }
1865 
1866  /*
1867  * If it is equality or inequality, we might be able to estimate this as a
1868  * form of array containment; for instance "const = ANY(column)" can be
1869  * treated as "ARRAY[const] <@ column". scalararraysel_containment tries
1870  * that, and returns the selectivity estimate if successful, or -1 if not.
1871  */
1872  if ((isEquality || isInequality) && !is_join_clause)
1873  {
1874  s1 = scalararraysel_containment(root, leftop, rightop,
1875  nominal_element_type,
1876  isEquality, useOr, varRelid);
1877  if (s1 >= 0.0)
1878  return s1;
1879  }
1880 
1881  /*
1882  * Look up the underlying operator's selectivity estimator. Punt if it
1883  * hasn't got one.
1884  */
1885  if (is_join_clause)
1886  oprsel = get_oprjoin(operator);
1887  else
1888  oprsel = get_oprrest(operator);
1889  if (!oprsel)
1890  return (Selectivity) 0.5;
1891  fmgr_info(oprsel, &oprselproc);
1892 
1893  /*
1894  * In the array-containment check above, we must only believe that an
1895  * operator is equality or inequality if it is the default btree equality
1896  * operator (or its negator) for the element type, since those are the
1897  * operators that array containment will use. But in what follows, we can
1898  * be a little laxer, and also believe that any operators using eqsel() or
1899  * neqsel() as selectivity estimator act like equality or inequality.
1900  */
1901  if (oprsel == F_EQSEL || oprsel == F_EQJOINSEL)
1902  isEquality = true;
1903  else if (oprsel == F_NEQSEL || oprsel == F_NEQJOINSEL)
1904  isInequality = true;
1905 
1906  /*
1907  * We consider three cases:
1908  *
1909  * 1. rightop is an Array constant: deconstruct the array, apply the
1910  * operator's selectivity function for each array element, and merge the
1911  * results in the same way that clausesel.c does for AND/OR combinations.
1912  *
1913  * 2. rightop is an ARRAY[] construct: apply the operator's selectivity
1914  * function for each element of the ARRAY[] construct, and merge.
1915  *
1916  * 3. otherwise, make a guess ...
1917  */
1918  if (rightop && IsA(rightop, Const))
1919  {
1920  Datum arraydatum = ((Const *) rightop)->constvalue;
1921  bool arrayisnull = ((Const *) rightop)->constisnull;
1922  ArrayType *arrayval;
1923  int16 elmlen;
1924  bool elmbyval;
1925  char elmalign;
1926  int num_elems;
1927  Datum *elem_values;
1928  bool *elem_nulls;
1929  int i;
1930 
1931  if (arrayisnull) /* qual can't succeed if null array */
1932  return (Selectivity) 0.0;
1933  arrayval = DatumGetArrayTypeP(arraydatum);
1935  &elmlen, &elmbyval, &elmalign);
1936  deconstruct_array(arrayval,
1937  ARR_ELEMTYPE(arrayval),
1938  elmlen, elmbyval, elmalign,
1939  &elem_values, &elem_nulls, &num_elems);
1940 
1941  /*
1942  * For generic operators, we assume the probability of success is
1943  * independent for each array element. But for "= ANY" or "<> ALL",
1944  * if the array elements are distinct (which'd typically be the case)
1945  * then the probabilities are disjoint, and we should just sum them.
1946  *
1947  * If we were being really tense we would try to confirm that the
1948  * elements are all distinct, but that would be expensive and it
1949  * doesn't seem to be worth the cycles; it would amount to penalizing
1950  * well-written queries in favor of poorly-written ones. However, we
1951  * do protect ourselves a little bit by checking whether the
1952  * disjointness assumption leads to an impossible (out of range)
1953  * probability; if so, we fall back to the normal calculation.
1954  */
1955  s1 = s1disjoint = (useOr ? 0.0 : 1.0);
1956 
1957  for (i = 0; i < num_elems; i++)
1958  {
1959  List *args;
1960  Selectivity s2;
1961 
1962  args = list_make2(leftop,
1963  makeConst(nominal_element_type,
1964  -1,
1965  nominal_element_collation,
1966  elmlen,
1967  elem_values[i],
1968  elem_nulls[i],
1969  elmbyval));
1970  if (is_join_clause)
1971  s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
1972  clause->inputcollid,
1973  PointerGetDatum(root),
1974  ObjectIdGetDatum(operator),
1975  PointerGetDatum(args),
1976  Int16GetDatum(jointype),
1977  PointerGetDatum(sjinfo)));
1978  else
1979  s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
1980  clause->inputcollid,
1981  PointerGetDatum(root),
1982  ObjectIdGetDatum(operator),
1983  PointerGetDatum(args),
1984  Int32GetDatum(varRelid)));
1985 
1986  if (useOr)
1987  {
1988  s1 = s1 + s2 - s1 * s2;
1989  if (isEquality)
1990  s1disjoint += s2;
1991  }
1992  else
1993  {
1994  s1 = s1 * s2;
1995  if (isInequality)
1996  s1disjoint += s2 - 1.0;
1997  }
1998  }
1999 
2000  /* accept disjoint-probability estimate if in range */
2001  if ((useOr ? isEquality : isInequality) &&
2002  s1disjoint >= 0.0 && s1disjoint <= 1.0)
2003  s1 = s1disjoint;
2004  }
2005  else if (rightop && IsA(rightop, ArrayExpr) &&
2006  !((ArrayExpr *) rightop)->multidims)
2007  {
2008  ArrayExpr *arrayexpr = (ArrayExpr *) rightop;
2009  int16 elmlen;
2010  bool elmbyval;
2011  ListCell *l;
2012 
2013  get_typlenbyval(arrayexpr->element_typeid,
2014  &elmlen, &elmbyval);
2015 
2016  /*
2017  * We use the assumption of disjoint probabilities here too, although
2018  * the odds of equal array elements are rather higher if the elements
2019  * are not all constants (which they won't be, else constant folding
2020  * would have reduced the ArrayExpr to a Const). In this path it's
2021  * critical to have the sanity check on the s1disjoint estimate.
2022  */
2023  s1 = s1disjoint = (useOr ? 0.0 : 1.0);
2024 
2025  foreach(l, arrayexpr->elements)
2026  {
2027  Node *elem = (Node *) lfirst(l);
2028  List *args;
2029  Selectivity s2;
2030 
2031  /*
2032  * Theoretically, if elem isn't of nominal_element_type we should
2033  * insert a RelabelType, but it seems unlikely that any operator
2034  * estimation function would really care ...
2035  */
2036  args = list_make2(leftop, elem);
2037  if (is_join_clause)
2038  s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
2039  clause->inputcollid,
2040  PointerGetDatum(root),
2041  ObjectIdGetDatum(operator),
2042  PointerGetDatum(args),
2043  Int16GetDatum(jointype),
2044  PointerGetDatum(sjinfo)));
2045  else
2046  s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
2047  clause->inputcollid,
2048  PointerGetDatum(root),
2049  ObjectIdGetDatum(operator),
2050  PointerGetDatum(args),
2051  Int32GetDatum(varRelid)));
2052 
2053  if (useOr)
2054  {
2055  s1 = s1 + s2 - s1 * s2;
2056  if (isEquality)
2057  s1disjoint += s2;
2058  }
2059  else
2060  {
2061  s1 = s1 * s2;
2062  if (isInequality)
2063  s1disjoint += s2 - 1.0;
2064  }
2065  }
2066 
2067  /* accept disjoint-probability estimate if in range */
2068  if ((useOr ? isEquality : isInequality) &&
2069  s1disjoint >= 0.0 && s1disjoint <= 1.0)
2070  s1 = s1disjoint;
2071  }
2072  else
2073  {
2074  CaseTestExpr *dummyexpr;
2075  List *args;
2076  Selectivity s2;
2077  int i;
2078 
2079  /*
2080  * We need a dummy rightop to pass to the operator selectivity
2081  * routine. It can be pretty much anything that doesn't look like a
2082  * constant; CaseTestExpr is a convenient choice.
2083  */
2084  dummyexpr = makeNode(CaseTestExpr);
2085  dummyexpr->typeId = nominal_element_type;
2086  dummyexpr->typeMod = -1;
2087  dummyexpr->collation = clause->inputcollid;
2088  args = list_make2(leftop, dummyexpr);
2089  if (is_join_clause)
2090  s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
2091  clause->inputcollid,
2092  PointerGetDatum(root),
2093  ObjectIdGetDatum(operator),
2094  PointerGetDatum(args),
2095  Int16GetDatum(jointype),
2096  PointerGetDatum(sjinfo)));
2097  else
2098  s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
2099  clause->inputcollid,
2100  PointerGetDatum(root),
2101  ObjectIdGetDatum(operator),
2102  PointerGetDatum(args),
2103  Int32GetDatum(varRelid)));
2104  s1 = useOr ? 0.0 : 1.0;
2105 
2106  /*
2107  * Arbitrarily assume 10 elements in the eventual array value (see
2108  * also estimate_array_length). We don't risk an assumption of
2109  * disjoint probabilities here.
2110  */
2111  for (i = 0; i < 10; i++)
2112  {
2113  if (useOr)
2114  s1 = s1 + s2 - s1 * s2;
2115  else
2116  s1 = s1 * s2;
2117  }
2118  }
2119 
2120  /* result should be in range, but make sure... */
2121  CLAMP_PROBABILITY(s1);
2122 
2123  return s1;
2124 }
2125 
2126 /*
2127  * Estimate number of elements in the array yielded by an expression.
2128  *
2129  * It's important that this agree with scalararraysel.
2130  */
2131 int
2133 {
2134  /* look through any binary-compatible relabeling of arrayexpr */
2135  arrayexpr = strip_array_coercion(arrayexpr);
2136 
2137  if (arrayexpr && IsA(arrayexpr, Const))
2138  {
2139  Datum arraydatum = ((Const *) arrayexpr)->constvalue;
2140  bool arrayisnull = ((Const *) arrayexpr)->constisnull;
2141  ArrayType *arrayval;
2142 
2143  if (arrayisnull)
2144  return 0;
2145  arrayval = DatumGetArrayTypeP(arraydatum);
2146  return ArrayGetNItems(ARR_NDIM(arrayval), ARR_DIMS(arrayval));
2147  }
2148  else if (arrayexpr && IsA(arrayexpr, ArrayExpr) &&
2149  !((ArrayExpr *) arrayexpr)->multidims)
2150  {
2151  return list_length(((ArrayExpr *) arrayexpr)->elements);
2152  }
2153  else
2154  {
2155  /* default guess --- see also scalararraysel */
2156  return 10;
2157  }
2158 }
2159 
2160 /*
2161  * rowcomparesel - Selectivity of RowCompareExpr Node.
2162  *
2163  * We estimate RowCompare selectivity by considering just the first (high
2164  * order) columns, which makes it equivalent to an ordinary OpExpr. While
2165  * this estimate could be refined by considering additional columns, it
2166  * seems unlikely that we could do a lot better without multi-column
2167  * statistics.
2168  */
2171  RowCompareExpr *clause,
2172  int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
2173 {
2174  Selectivity s1;
2175  Oid opno = linitial_oid(clause->opnos);
2176  Oid inputcollid = linitial_oid(clause->inputcollids);
2177  List *opargs;
2178  bool is_join_clause;
2179 
2180  /* Build equivalent arg list for single operator */
2181  opargs = list_make2(linitial(clause->largs), linitial(clause->rargs));
2182 
2183  /*
2184  * Decide if it's a join clause. This should match clausesel.c's
2185  * treat_as_join_clause(), except that we intentionally consider only the
2186  * leading columns and not the rest of the clause.
2187  */
2188  if (varRelid != 0)
2189  {
2190  /*
2191  * Caller is forcing restriction mode (eg, because we are examining an
2192  * inner indexscan qual).
2193  */
2194  is_join_clause = false;
2195  }
2196  else if (sjinfo == NULL)
2197  {
2198  /*
2199  * It must be a restriction clause, since it's being evaluated at a
2200  * scan node.
2201  */
2202  is_join_clause = false;
2203  }
2204  else
2205  {
2206  /*
2207  * Otherwise, it's a join if there's more than one relation used.
2208  */
2209  is_join_clause = (NumRelids((Node *) opargs) > 1);
2210  }
2211 
2212  if (is_join_clause)
2213  {
2214  /* Estimate selectivity for a join clause. */
2215  s1 = join_selectivity(root, opno,
2216  opargs,
2217  inputcollid,
2218  jointype,
2219  sjinfo);
2220  }
2221  else
2222  {
2223  /* Estimate selectivity for a restriction clause. */
2224  s1 = restriction_selectivity(root, opno,
2225  opargs,
2226  inputcollid,
2227  varRelid);
2228  }
2229 
2230  return s1;
2231 }
2232 
2233 /*
2234  * eqjoinsel - Join selectivity of "="
2235  */
2236 Datum
2238 {
2239  PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
2240  Oid operator = PG_GETARG_OID(1);
2241  List *args = (List *) PG_GETARG_POINTER(2);
2242 
2243 #ifdef NOT_USED
2244  JoinType jointype = (JoinType) PG_GETARG_INT16(3);
2245 #endif
2247  Oid collation = PG_GET_COLLATION();
2248  double selec;
2249  double selec_inner;
2250  VariableStatData vardata1;
2251  VariableStatData vardata2;
2252  double nd1;
2253  double nd2;
2254  bool isdefault1;
2255  bool isdefault2;
2256  Oid opfuncoid;
2257  AttStatsSlot sslot1;
2258  AttStatsSlot sslot2;
2259  Form_pg_statistic stats1 = NULL;
2260  Form_pg_statistic stats2 = NULL;
2261  bool have_mcvs1 = false;
2262  bool have_mcvs2 = false;
2263  bool join_is_reversed;
2264  RelOptInfo *inner_rel;
2265 
2266  get_join_variables(root, args, sjinfo,
2267  &vardata1, &vardata2, &join_is_reversed);
2268 
2269  nd1 = get_variable_numdistinct(&vardata1, &isdefault1);
2270  nd2 = get_variable_numdistinct(&vardata2, &isdefault2);
2271 
2272  opfuncoid = get_opcode(operator);
2273 
2274  memset(&sslot1, 0, sizeof(sslot1));
2275  memset(&sslot2, 0, sizeof(sslot2));
2276 
2277  if (HeapTupleIsValid(vardata1.statsTuple))
2278  {
2279  /* note we allow use of nullfrac regardless of security check */
2280  stats1 = (Form_pg_statistic) GETSTRUCT(vardata1.statsTuple);
2281  if (statistic_proc_security_check(&vardata1, opfuncoid))
2282  have_mcvs1 = get_attstatsslot(&sslot1, vardata1.statsTuple,
2283  STATISTIC_KIND_MCV, InvalidOid,
2285  }
2286 
2287  if (HeapTupleIsValid(vardata2.statsTuple))
2288  {
2289  /* note we allow use of nullfrac regardless of security check */
2290  stats2 = (Form_pg_statistic) GETSTRUCT(vardata2.statsTuple);
2291  if (statistic_proc_security_check(&vardata2, opfuncoid))
2292  have_mcvs2 = get_attstatsslot(&sslot2, vardata2.statsTuple,
2293  STATISTIC_KIND_MCV, InvalidOid,
2295  }
2296 
2297  /* We need to compute the inner-join selectivity in all cases */
2298  selec_inner = eqjoinsel_inner(opfuncoid, collation,
2299  &vardata1, &vardata2,
2300  nd1, nd2,
2301  isdefault1, isdefault2,
2302  &sslot1, &sslot2,
2303  stats1, stats2,
2304  have_mcvs1, have_mcvs2);
2305 
2306  switch (sjinfo->jointype)
2307  {
2308  case JOIN_INNER:
2309  case JOIN_LEFT:
2310  case JOIN_FULL:
2311  selec = selec_inner;
2312  break;
2313  case JOIN_SEMI:
2314  case JOIN_ANTI:
2315 
2316  /*
2317  * Look up the join's inner relation. min_righthand is sufficient
2318  * information because neither SEMI nor ANTI joins permit any
2319  * reassociation into or out of their RHS, so the righthand will
2320  * always be exactly that set of rels.
2321  */
2322  inner_rel = find_join_input_rel(root, sjinfo->min_righthand);
2323 
2324  if (!join_is_reversed)
2325  selec = eqjoinsel_semi(opfuncoid, collation,
2326  &vardata1, &vardata2,
2327  nd1, nd2,
2328  isdefault1, isdefault2,
2329  &sslot1, &sslot2,
2330  stats1, stats2,
2331  have_mcvs1, have_mcvs2,
2332  inner_rel);
2333  else
2334  {
2335  Oid commop = get_commutator(operator);
2336  Oid commopfuncoid = OidIsValid(commop) ? get_opcode(commop) : InvalidOid;
2337 
2338  selec = eqjoinsel_semi(commopfuncoid, collation,
2339  &vardata2, &vardata1,
2340  nd2, nd1,
2341  isdefault2, isdefault1,
2342  &sslot2, &sslot1,
2343  stats2, stats1,
2344  have_mcvs2, have_mcvs1,
2345  inner_rel);
2346  }
2347 
2348  /*
2349  * We should never estimate the output of a semijoin to be more
2350  * rows than we estimate for an inner join with the same input
2351  * rels and join condition; it's obviously impossible for that to
2352  * happen. The former estimate is N1 * Ssemi while the latter is
2353  * N1 * N2 * Sinner, so we may clamp Ssemi <= N2 * Sinner. Doing
2354  * this is worthwhile because of the shakier estimation rules we
2355  * use in eqjoinsel_semi, particularly in cases where it has to
2356  * punt entirely.
2357  */
2358  selec = Min(selec, inner_rel->rows * selec_inner);
2359  break;
2360  default:
2361  /* other values not expected here */
2362  elog(ERROR, "unrecognized join type: %d",
2363  (int) sjinfo->jointype);
2364  selec = 0; /* keep compiler quiet */
2365  break;
2366  }
2367 
2368  free_attstatsslot(&sslot1);
2369  free_attstatsslot(&sslot2);
2370 
2371  ReleaseVariableStats(vardata1);
2372  ReleaseVariableStats(vardata2);
2373 
2374  CLAMP_PROBABILITY(selec);
2375 
2376  PG_RETURN_FLOAT8((float8) selec);
2377 }
2378 
2379 /*
2380  * eqjoinsel_inner --- eqjoinsel for normal inner join
2381  *
2382  * We also use this for LEFT/FULL outer joins; it's not presently clear
2383  * that it's worth trying to distinguish them here.
2384  */
2385 static double
2386 eqjoinsel_inner(Oid opfuncoid, Oid collation,
2387  VariableStatData *vardata1, VariableStatData *vardata2,
2388  double nd1, double nd2,
2389  bool isdefault1, bool isdefault2,
2390  AttStatsSlot *sslot1, AttStatsSlot *sslot2,
2391  Form_pg_statistic stats1, Form_pg_statistic stats2,
2392  bool have_mcvs1, bool have_mcvs2)
2393 {
2394  double selec;
2395 
2396  if (have_mcvs1 && have_mcvs2)
2397  {
2398  /*
2399  * We have most-common-value lists for both relations. Run through
2400  * the lists to see which MCVs actually join to each other with the
2401  * given operator. This allows us to determine the exact join
2402  * selectivity for the portion of the relations represented by the MCV
2403  * lists. We still have to estimate for the remaining population, but
2404  * in a skewed distribution this gives us a big leg up in accuracy.
2405  * For motivation see the analysis in Y. Ioannidis and S.
2406  * Christodoulakis, "On the propagation of errors in the size of join
2407  * results", Technical Report 1018, Computer Science Dept., University
2408  * of Wisconsin, Madison, March 1991 (available from ftp.cs.wisc.edu).
2409  */
2410  LOCAL_FCINFO(fcinfo, 2);
2411  FmgrInfo eqproc;
2412  bool *hasmatch1;
2413  bool *hasmatch2;
2414  double nullfrac1 = stats1->stanullfrac;
2415  double nullfrac2 = stats2->stanullfrac;
2416  double matchprodfreq,
2417  matchfreq1,
2418  matchfreq2,
2419  unmatchfreq1,
2420  unmatchfreq2,
2421  otherfreq1,
2422  otherfreq2,
2423  totalsel1,
2424  totalsel2;
2425  int i,
2426  nmatches;
2427 
2428  fmgr_info(opfuncoid, &eqproc);
2429 
2430  /*
2431  * Save a few cycles by setting up the fcinfo struct just once. Using
2432  * FunctionCallInvoke directly also avoids failure if the eqproc
2433  * returns NULL, though really equality functions should never do
2434  * that.
2435  */
2436  InitFunctionCallInfoData(*fcinfo, &eqproc, 2, collation,
2437  NULL, NULL);
2438  fcinfo->args[0].isnull = false;
2439  fcinfo->args[1].isnull = false;
2440 
2441  hasmatch1 = (bool *) palloc0(sslot1->nvalues * sizeof(bool));
2442  hasmatch2 = (bool *) palloc0(sslot2->nvalues * sizeof(bool));
2443 
2444  /*
2445  * Note we assume that each MCV will match at most one member of the
2446  * other MCV list. If the operator isn't really equality, there could
2447  * be multiple matches --- but we don't look for them, both for speed
2448  * and because the math wouldn't add up...
2449  */
2450  matchprodfreq = 0.0;
2451  nmatches = 0;
2452  for (i = 0; i < sslot1->nvalues; i++)
2453  {
2454  int j;
2455 
2456  fcinfo->args[0].value = sslot1->values[i];
2457 
2458  for (j = 0; j < sslot2->nvalues; j++)
2459  {
2460  Datum fresult;
2461 
2462  if (hasmatch2[j])
2463  continue;
2464  fcinfo->args[1].value = sslot2->values[j];
2465  fcinfo->isnull = false;
2466  fresult = FunctionCallInvoke(fcinfo);
2467  if (!fcinfo->isnull && DatumGetBool(fresult))
2468  {
2469  hasmatch1[i] = hasmatch2[j] = true;
2470  matchprodfreq += sslot1->numbers[i] * sslot2->numbers[j];
2471  nmatches++;
2472  break;
2473  }
2474  }
2475  }
2476  CLAMP_PROBABILITY(matchprodfreq);
2477  /* Sum up frequencies of matched and unmatched MCVs */
2478  matchfreq1 = unmatchfreq1 = 0.0;
2479  for (i = 0; i < sslot1->nvalues; i++)
2480  {
2481  if (hasmatch1[i])
2482  matchfreq1 += sslot1->numbers[i];
2483  else
2484  unmatchfreq1 += sslot1->numbers[i];
2485  }
2486  CLAMP_PROBABILITY(matchfreq1);
2487  CLAMP_PROBABILITY(unmatchfreq1);
2488  matchfreq2 = unmatchfreq2 = 0.0;
2489  for (i = 0; i < sslot2->nvalues; i++)
2490  {
2491  if (hasmatch2[i])
2492  matchfreq2 += sslot2->numbers[i];
2493  else
2494  unmatchfreq2 += sslot2->numbers[i];
2495  }
2496  CLAMP_PROBABILITY(matchfreq2);
2497  CLAMP_PROBABILITY(unmatchfreq2);
2498  pfree(hasmatch1);
2499  pfree(hasmatch2);
2500 
2501  /*
2502  * Compute total frequency of non-null values that are not in the MCV
2503  * lists.
2504  */
2505  otherfreq1 = 1.0 - nullfrac1 - matchfreq1 - unmatchfreq1;
2506  otherfreq2 = 1.0 - nullfrac2 - matchfreq2 - unmatchfreq2;
2507  CLAMP_PROBABILITY(otherfreq1);
2508  CLAMP_PROBABILITY(otherfreq2);
2509 
2510  /*
2511  * We can estimate the total selectivity from the point of view of
2512  * relation 1 as: the known selectivity for matched MCVs, plus
2513  * unmatched MCVs that are assumed to match against random members of
2514  * relation 2's non-MCV population, plus non-MCV values that are
2515  * assumed to match against random members of relation 2's unmatched
2516  * MCVs plus non-MCV values.
2517  */
2518  totalsel1 = matchprodfreq;
2519  if (nd2 > sslot2->nvalues)
2520  totalsel1 += unmatchfreq1 * otherfreq2 / (nd2 - sslot2->nvalues);
2521  if (nd2 > nmatches)
2522  totalsel1 += otherfreq1 * (otherfreq2 + unmatchfreq2) /
2523  (nd2 - nmatches);
2524  /* Same estimate from the point of view of relation 2. */
2525  totalsel2 = matchprodfreq;
2526  if (nd1 > sslot1->nvalues)
2527  totalsel2 += unmatchfreq2 * otherfreq1 / (nd1 - sslot1->nvalues);
2528  if (nd1 > nmatches)
2529  totalsel2 += otherfreq2 * (otherfreq1 + unmatchfreq1) /
2530  (nd1 - nmatches);
2531 
2532  /*
2533  * Use the smaller of the two estimates. This can be justified in
2534  * essentially the same terms as given below for the no-stats case: to
2535  * a first approximation, we are estimating from the point of view of
2536  * the relation with smaller nd.
2537  */
2538  selec = (totalsel1 < totalsel2) ? totalsel1 : totalsel2;
2539  }
2540  else
2541  {
2542  /*
2543  * We do not have MCV lists for both sides. Estimate the join
2544  * selectivity as MIN(1/nd1,1/nd2)*(1-nullfrac1)*(1-nullfrac2). This
2545  * is plausible if we assume that the join operator is strict and the
2546  * non-null values are about equally distributed: a given non-null
2547  * tuple of rel1 will join to either zero or N2*(1-nullfrac2)/nd2 rows
2548  * of rel2, so total join rows are at most
2549  * N1*(1-nullfrac1)*N2*(1-nullfrac2)/nd2 giving a join selectivity of
2550  * not more than (1-nullfrac1)*(1-nullfrac2)/nd2. By the same logic it
2551  * is not more than (1-nullfrac1)*(1-nullfrac2)/nd1, so the expression
2552  * with MIN() is an upper bound. Using the MIN() means we estimate
2553  * from the point of view of the relation with smaller nd (since the
2554  * larger nd is determining the MIN). It is reasonable to assume that
2555  * most tuples in this rel will have join partners, so the bound is
2556  * probably reasonably tight and should be taken as-is.
2557  *
2558  * XXX Can we be smarter if we have an MCV list for just one side? It
2559  * seems that if we assume equal distribution for the other side, we
2560  * end up with the same answer anyway.
2561  */
2562  double nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
2563  double nullfrac2 = stats2 ? stats2->stanullfrac : 0.0;
2564 
2565  selec = (1.0 - nullfrac1) * (1.0 - nullfrac2);
2566  if (nd1 > nd2)
2567  selec /= nd1;
2568  else
2569  selec /= nd2;
2570  }
2571 
2572  return selec;
2573 }
2574 
2575 /*
2576  * eqjoinsel_semi --- eqjoinsel for semi join
2577  *
2578  * (Also used for anti join, which we are supposed to estimate the same way.)
2579  * Caller has ensured that vardata1 is the LHS variable.
2580  * Unlike eqjoinsel_inner, we have to cope with opfuncoid being InvalidOid.
2581  */
2582 static double
2583 eqjoinsel_semi(Oid opfuncoid, Oid collation,
2584  VariableStatData *vardata1, VariableStatData *vardata2,
2585  double nd1, double nd2,
2586  bool isdefault1, bool isdefault2,
2587  AttStatsSlot *sslot1, AttStatsSlot *sslot2,
2588  Form_pg_statistic stats1, Form_pg_statistic stats2,
2589  bool have_mcvs1, bool have_mcvs2,
2590  RelOptInfo *inner_rel)
2591 {
2592  double selec;
2593 
2594  /*
2595  * We clamp nd2 to be not more than what we estimate the inner relation's
2596  * size to be. This is intuitively somewhat reasonable since obviously
2597  * there can't be more than that many distinct values coming from the
2598  * inner rel. The reason for the asymmetry (ie, that we don't clamp nd1
2599  * likewise) is that this is the only pathway by which restriction clauses
2600  * applied to the inner rel will affect the join result size estimate,
2601  * since set_joinrel_size_estimates will multiply SEMI/ANTI selectivity by
2602  * only the outer rel's size. If we clamped nd1 we'd be double-counting
2603  * the selectivity of outer-rel restrictions.
2604  *
2605  * We can apply this clamping both with respect to the base relation from
2606  * which the join variable comes (if there is just one), and to the
2607  * immediate inner input relation of the current join.
2608  *
2609  * If we clamp, we can treat nd2 as being a non-default estimate; it's not
2610  * great, maybe, but it didn't come out of nowhere either. This is most
2611  * helpful when the inner relation is empty and consequently has no stats.
2612  */
2613  if (vardata2->rel)
2614  {
2615  if (nd2 >= vardata2->rel->rows)
2616  {
2617  nd2 = vardata2->rel->rows;
2618  isdefault2 = false;
2619  }
2620  }
2621  if (nd2 >= inner_rel->rows)
2622  {
2623  nd2 = inner_rel->rows;
2624  isdefault2 = false;
2625  }
2626 
2627  if (have_mcvs1 && have_mcvs2 && OidIsValid(opfuncoid))
2628  {
2629  /*
2630  * We have most-common-value lists for both relations. Run through
2631  * the lists to see which MCVs actually join to each other with the
2632  * given operator. This allows us to determine the exact join
2633  * selectivity for the portion of the relations represented by the MCV
2634  * lists. We still have to estimate for the remaining population, but
2635  * in a skewed distribution this gives us a big leg up in accuracy.
2636  */
2637  LOCAL_FCINFO(fcinfo, 2);
2638  FmgrInfo eqproc;
2639  bool *hasmatch1;
2640  bool *hasmatch2;
2641  double nullfrac1 = stats1->stanullfrac;
2642  double matchfreq1,
2643  uncertainfrac,
2644  uncertain;
2645  int i,
2646  nmatches,
2647  clamped_nvalues2;
2648 
2649  /*
2650  * The clamping above could have resulted in nd2 being less than
2651  * sslot2->nvalues; in which case, we assume that precisely the nd2
2652  * most common values in the relation will appear in the join input,
2653  * and so compare to only the first nd2 members of the MCV list. Of
2654  * course this is frequently wrong, but it's the best bet we can make.
2655  */
2656  clamped_nvalues2 = Min(sslot2->nvalues, nd2);
2657 
2658  fmgr_info(opfuncoid, &eqproc);
2659 
2660  /*
2661  * Save a few cycles by setting up the fcinfo struct just once. Using
2662  * FunctionCallInvoke directly also avoids failure if the eqproc
2663  * returns NULL, though really equality functions should never do
2664  * that.
2665  */
2666  InitFunctionCallInfoData(*fcinfo, &eqproc, 2, collation,
2667  NULL, NULL);
2668  fcinfo->args[0].isnull = false;
2669  fcinfo->args[1].isnull = false;
2670 
2671  hasmatch1 = (bool *) palloc0(sslot1->nvalues * sizeof(bool));
2672  hasmatch2 = (bool *) palloc0(clamped_nvalues2 * sizeof(bool));
2673 
2674  /*
2675  * Note we assume that each MCV will match at most one member of the
2676  * other MCV list. If the operator isn't really equality, there could
2677  * be multiple matches --- but we don't look for them, both for speed
2678  * and because the math wouldn't add up...
2679  */
2680  nmatches = 0;
2681  for (i = 0; i < sslot1->nvalues; i++)
2682  {
2683  int j;
2684 
2685  fcinfo->args[0].value = sslot1->values[i];
2686 
2687  for (j = 0; j < clamped_nvalues2; j++)
2688  {
2689  Datum fresult;
2690 
2691  if (hasmatch2[j])
2692  continue;
2693  fcinfo->args[1].value = sslot2->values[j];
2694  fcinfo->isnull = false;
2695  fresult = FunctionCallInvoke(fcinfo);
2696  if (!fcinfo->isnull && DatumGetBool(fresult))
2697  {
2698  hasmatch1[i] = hasmatch2[j] = true;
2699  nmatches++;
2700  break;
2701  }
2702  }
2703  }
2704  /* Sum up frequencies of matched MCVs */
2705  matchfreq1 = 0.0;
2706  for (i = 0; i < sslot1->nvalues; i++)
2707  {
2708  if (hasmatch1[i])
2709  matchfreq1 += sslot1->numbers[i];
2710  }
2711  CLAMP_PROBABILITY(matchfreq1);
2712  pfree(hasmatch1);
2713  pfree(hasmatch2);
2714 
2715  /*
2716  * Now we need to estimate the fraction of relation 1 that has at
2717  * least one join partner. We know for certain that the matched MCVs
2718  * do, so that gives us a lower bound, but we're really in the dark
2719  * about everything else. Our crude approach is: if nd1 <= nd2 then
2720  * assume all non-null rel1 rows have join partners, else assume for
2721  * the uncertain rows that a fraction nd2/nd1 have join partners. We
2722  * can discount the known-matched MCVs from the distinct-values counts
2723  * before doing the division.
2724  *
2725  * Crude as the above is, it's completely useless if we don't have
2726  * reliable ndistinct values for both sides. Hence, if either nd1 or
2727  * nd2 is default, punt and assume half of the uncertain rows have
2728  * join partners.
2729  */
2730  if (!isdefault1 && !isdefault2)
2731  {
2732  nd1 -= nmatches;
2733  nd2 -= nmatches;
2734  if (nd1 <= nd2 || nd2 < 0)
2735  uncertainfrac = 1.0;
2736  else
2737  uncertainfrac = nd2 / nd1;
2738  }
2739  else
2740  uncertainfrac = 0.5;
2741  uncertain = 1.0 - matchfreq1 - nullfrac1;
2742  CLAMP_PROBABILITY(uncertain);
2743  selec = matchfreq1 + uncertainfrac * uncertain;
2744  }
2745  else
2746  {
2747  /*
2748  * Without MCV lists for both sides, we can only use the heuristic
2749  * about nd1 vs nd2.
2750  */
2751  double nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
2752 
2753  if (!isdefault1 && !isdefault2)
2754  {
2755  if (nd1 <= nd2 || nd2 < 0)
2756  selec = 1.0 - nullfrac1;
2757  else
2758  selec = (nd2 / nd1) * (1.0 - nullfrac1);
2759  }
2760  else
2761  selec = 0.5 * (1.0 - nullfrac1);
2762  }
2763 
2764  return selec;
2765 }
2766 
2767 /*
2768  * neqjoinsel - Join selectivity of "!="
2769  */
2770 Datum
2772 {
2773  PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
2774  Oid operator = PG_GETARG_OID(1);
2775  List *args = (List *) PG_GETARG_POINTER(2);
2776  JoinType jointype = (JoinType) PG_GETARG_INT16(3);
2778  Oid collation = PG_GET_COLLATION();
2779  float8 result;
2780 
2781  if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
2782  {
2783  /*
2784  * For semi-joins, if there is more than one distinct value in the RHS
2785  * relation then every non-null LHS row must find a row to join since
2786  * it can only be equal to one of them. We'll assume that there is
2787  * always more than one distinct RHS value for the sake of stability,
2788  * though in theory we could have special cases for empty RHS
2789  * (selectivity = 0) and single-distinct-value RHS (selectivity =
2790  * fraction of LHS that has the same value as the single RHS value).
2791  *
2792  * For anti-joins, if we use the same assumption that there is more
2793  * than one distinct key in the RHS relation, then every non-null LHS
2794  * row must be suppressed by the anti-join.
2795  *
2796  * So either way, the selectivity estimate should be 1 - nullfrac.
2797  */
2798  VariableStatData leftvar;
2799  VariableStatData rightvar;
2800  bool reversed;
2801  HeapTuple statsTuple;
2802  double nullfrac;
2803 
2804  get_join_variables(root, args, sjinfo, &leftvar, &rightvar, &reversed);
2805  statsTuple = reversed ? rightvar.statsTuple : leftvar.statsTuple;
2806  if (HeapTupleIsValid(statsTuple))
2807  nullfrac = ((Form_pg_statistic) GETSTRUCT(statsTuple))->stanullfrac;
2808  else
2809  nullfrac = 0.0;
2810  ReleaseVariableStats(leftvar);
2811  ReleaseVariableStats(rightvar);
2812 
2813  result = 1.0 - nullfrac;
2814  }
2815  else
2816  {
2817  /*
2818  * We want 1 - eqjoinsel() where the equality operator is the one
2819  * associated with this != operator, that is, its negator.
2820  */
2821  Oid eqop = get_negator(operator);
2822 
2823  if (eqop)
2824  {
2825  result =
2827  collation,
2828  PointerGetDatum(root),
2829  ObjectIdGetDatum(eqop),
2830  PointerGetDatum(args),
2831  Int16GetDatum(jointype),
2832  PointerGetDatum(sjinfo)));
2833  }
2834  else
2835  {
2836  /* Use default selectivity (should we raise an error instead?) */
2837  result = DEFAULT_EQ_SEL;
2838  }
2839  result = 1.0 - result;
2840  }
2841 
2842  PG_RETURN_FLOAT8(result);
2843 }
2844 
2845 /*
2846  * scalarltjoinsel - Join selectivity of "<" for scalars
2847  */
2848 Datum
2850 {
2852 }
2853 
2854 /*
2855  * scalarlejoinsel - Join selectivity of "<=" for scalars
2856  */
2857 Datum
2859 {
2861 }
2862 
2863 /*
2864  * scalargtjoinsel - Join selectivity of ">" for scalars
2865  */
2866 Datum
2868 {
2870 }
2871 
2872 /*
2873  * scalargejoinsel - Join selectivity of ">=" for scalars
2874  */
2875 Datum
2877 {
2879 }
2880 
2881 
2882 /*
2883  * mergejoinscansel - Scan selectivity of merge join.
2884  *
2885  * A merge join will stop as soon as it exhausts either input stream.
2886  * Therefore, if we can estimate the ranges of both input variables,
2887  * we can estimate how much of the input will actually be read. This
2888  * can have a considerable impact on the cost when using indexscans.
2889  *
2890  * Also, we can estimate how much of each input has to be read before the
2891  * first join pair is found, which will affect the join's startup time.
2892  *
2893  * clause should be a clause already known to be mergejoinable. opfamily,
2894  * strategy, and nulls_first specify the sort ordering being used.
2895  *
2896  * The outputs are:
2897  * *leftstart is set to the fraction of the left-hand variable expected
2898  * to be scanned before the first join pair is found (0 to 1).
2899  * *leftend is set to the fraction of the left-hand variable expected
2900  * to be scanned before the join terminates (0 to 1).
2901  * *rightstart, *rightend similarly for the right-hand variable.
2902  */
2903 void
2905  Oid opfamily, int strategy, bool nulls_first,
2906  Selectivity *leftstart, Selectivity *leftend,
2907  Selectivity *rightstart, Selectivity *rightend)
2908 {
2909  Node *left,
2910  *right;
2911  VariableStatData leftvar,
2912  rightvar;
2913  int op_strategy;
2914  Oid op_lefttype;
2915  Oid op_righttype;
2916  Oid opno,
2917  collation,
2918  lsortop,
2919  rsortop,
2920  lstatop,
2921  rstatop,
2922  ltop,
2923  leop,
2924  revltop,
2925  revleop;
2926  bool isgt;
2927  Datum leftmin,
2928  leftmax,
2929  rightmin,
2930  rightmax;
2931  double selec;
2932 
2933  /* Set default results if we can't figure anything out. */
2934  /* XXX should default "start" fraction be a bit more than 0? */
2935  *leftstart = *rightstart = 0.0;
2936  *leftend = *rightend = 1.0;
2937 
2938  /* Deconstruct the merge clause */
2939  if (!is_opclause(clause))
2940  return; /* shouldn't happen */
2941  opno = ((OpExpr *) clause)->opno;
2942  collation = ((OpExpr *) clause)->inputcollid;
2943  left = get_leftop((Expr *) clause);
2944  right = get_rightop((Expr *) clause);
2945  if (!right)
2946  return; /* shouldn't happen */
2947 
2948  /* Look for stats for the inputs */
2949  examine_variable(root, left, 0, &leftvar);
2950  examine_variable(root, right, 0, &rightvar);
2951 
2952  /* Extract the operator's declared left/right datatypes */
2953  get_op_opfamily_properties(opno, opfamily, false,
2954  &op_strategy,
2955  &op_lefttype,
2956  &op_righttype);
2957  Assert(op_strategy == BTEqualStrategyNumber);
2958 
2959  /*
2960  * Look up the various operators we need. If we don't find them all, it
2961  * probably means the opfamily is broken, but we just fail silently.
2962  *
2963  * Note: we expect that pg_statistic histograms will be sorted by the '<'
2964  * operator, regardless of which sort direction we are considering.
2965  */
2966  switch (strategy)
2967  {
2968  case BTLessStrategyNumber:
2969  isgt = false;
2970  if (op_lefttype == op_righttype)
2971  {
2972  /* easy case */
2973  ltop = get_opfamily_member(opfamily,
2974  op_lefttype, op_righttype,
2976  leop = get_opfamily_member(opfamily,
2977  op_lefttype, op_righttype,
2979  lsortop = ltop;
2980  rsortop = ltop;
2981  lstatop = lsortop;
2982  rstatop = rsortop;
2983  revltop = ltop;
2984  revleop = leop;
2985  }
2986  else
2987  {
2988  ltop = get_opfamily_member(opfamily,
2989  op_lefttype, op_righttype,
2991  leop = get_opfamily_member(opfamily,
2992  op_lefttype, op_righttype,
2994  lsortop = get_opfamily_member(opfamily,
2995  op_lefttype, op_lefttype,
2997  rsortop = get_opfamily_member(opfamily,
2998  op_righttype, op_righttype,
3000  lstatop = lsortop;
3001  rstatop = rsortop;
3002  revltop = get_opfamily_member(opfamily,
3003  op_righttype, op_lefttype,
3005  revleop = get_opfamily_member(opfamily,
3006  op_righttype, op_lefttype,
3008  }
3009  break;
3011  /* descending-order case */
3012  isgt = true;
3013  if (op_lefttype == op_righttype)
3014  {
3015  /* easy case */
3016  ltop = get_opfamily_member(opfamily,
3017  op_lefttype, op_righttype,
3019  leop = get_opfamily_member(opfamily,
3020  op_lefttype, op_righttype,
3022  lsortop = ltop;
3023  rsortop = ltop;
3024  lstatop = get_opfamily_member(opfamily,
3025  op_lefttype, op_lefttype,
3027  rstatop = lstatop;
3028  revltop = ltop;
3029  revleop = leop;
3030  }
3031  else
3032  {
3033  ltop = get_opfamily_member(opfamily,
3034  op_lefttype, op_righttype,
3036  leop = get_opfamily_member(opfamily,
3037  op_lefttype, op_righttype,
3039  lsortop = get_opfamily_member(opfamily,
3040  op_lefttype, op_lefttype,
3042  rsortop = get_opfamily_member(opfamily,
3043  op_righttype, op_righttype,
3045  lstatop = get_opfamily_member(opfamily,
3046  op_lefttype, op_lefttype,
3048  rstatop = get_opfamily_member(opfamily,
3049  op_righttype, op_righttype,
3051  revltop = get_opfamily_member(opfamily,
3052  op_righttype, op_lefttype,
3054  revleop = get_opfamily_member(opfamily,
3055  op_righttype, op_lefttype,
3057  }
3058  break;
3059  default:
3060  goto fail; /* shouldn't get here */
3061  }
3062 
3063  if (!OidIsValid(lsortop) ||
3064  !OidIsValid(rsortop) ||
3065  !OidIsValid(lstatop) ||
3066  !OidIsValid(rstatop) ||
3067  !OidIsValid(ltop) ||
3068  !OidIsValid(leop) ||
3069  !OidIsValid(revltop) ||
3070  !OidIsValid(revleop))
3071  goto fail; /* insufficient info in catalogs */
3072 
3073  /* Try to get ranges of both inputs */
3074  if (!isgt)
3075  {
3076  if (!get_variable_range(root, &leftvar, lstatop, collation,
3077  &leftmin, &leftmax))
3078  goto fail; /* no range available from stats */
3079  if (!get_variable_range(root, &rightvar, rstatop, collation,
3080  &rightmin, &rightmax))
3081  goto fail; /* no range available from stats */
3082  }
3083  else
3084  {
3085  /* need to swap the max and min */
3086  if (!get_variable_range(root, &leftvar, lstatop, collation,
3087  &leftmax, &leftmin))
3088  goto fail; /* no range available from stats */
3089  if (!get_variable_range(root, &rightvar, rstatop, collation,
3090  &rightmax, &rightmin))
3091  goto fail; /* no range available from stats */
3092  }
3093 
3094  /*
3095  * Now, the fraction of the left variable that will be scanned is the
3096  * fraction that's <= the right-side maximum value. But only believe
3097  * non-default estimates, else stick with our 1.0.
3098  */
3099  selec = scalarineqsel(root, leop, isgt, true, collation, &leftvar,
3100  rightmax, op_righttype);
3101  if (selec != DEFAULT_INEQ_SEL)
3102  *leftend = selec;
3103 
3104  /* And similarly for the right variable. */
3105  selec = scalarineqsel(root, revleop, isgt, true, collation, &rightvar,
3106  leftmax, op_lefttype);
3107  if (selec != DEFAULT_INEQ_SEL)
3108  *rightend = selec;
3109 
3110  /*
3111  * Only one of the two "end" fractions can really be less than 1.0;
3112  * believe the smaller estimate and reset the other one to exactly 1.0. If
3113  * we get exactly equal estimates (as can easily happen with self-joins),
3114  * believe neither.
3115  */
3116  if (*leftend > *rightend)
3117  *leftend = 1.0;
3118  else if (*leftend < *rightend)
3119  *rightend = 1.0;
3120  else
3121  *leftend = *rightend = 1.0;
3122 
3123  /*
3124  * Also, the fraction of the left variable that will be scanned before the
3125  * first join pair is found is the fraction that's < the right-side
3126  * minimum value. But only believe non-default estimates, else stick with
3127  * our own default.
3128  */
3129  selec = scalarineqsel(root, ltop, isgt, false, collation, &leftvar,
3130  rightmin, op_righttype);
3131  if (selec != DEFAULT_INEQ_SEL)
3132  *leftstart = selec;
3133 
3134  /* And similarly for the right variable. */
3135  selec = scalarineqsel(root, revltop, isgt, false, collation, &rightvar,
3136  leftmin, op_lefttype);
3137  if (selec != DEFAULT_INEQ_SEL)
3138  *rightstart = selec;
3139 
3140  /*
3141  * Only one of the two "start" fractions can really be more than zero;
3142  * believe the larger estimate and reset the other one to exactly 0.0. If
3143  * we get exactly equal estimates (as can easily happen with self-joins),
3144  * believe neither.
3145  */
3146  if (*leftstart < *rightstart)
3147  *leftstart = 0.0;
3148  else if (*leftstart > *rightstart)
3149  *rightstart = 0.0;
3150  else
3151  *leftstart = *rightstart = 0.0;
3152 
3153  /*
3154  * If the sort order is nulls-first, we're going to have to skip over any
3155  * nulls too. These would not have been counted by scalarineqsel, and we
3156  * can safely add in this fraction regardless of whether we believe
3157  * scalarineqsel's results or not. But be sure to clamp the sum to 1.0!
3158  */
3159  if (nulls_first)
3160  {
3161  Form_pg_statistic stats;
3162 
3163  if (HeapTupleIsValid(leftvar.statsTuple))
3164  {
3165  stats = (Form_pg_statistic) GETSTRUCT(leftvar.statsTuple);
3166  *leftstart += stats->stanullfrac;
3167  CLAMP_PROBABILITY(*leftstart);
3168  *leftend += stats->stanullfrac;
3169  CLAMP_PROBABILITY(*leftend);
3170  }
3171  if (HeapTupleIsValid(rightvar.statsTuple))
3172  {
3173  stats = (Form_pg_statistic) GETSTRUCT(rightvar.statsTuple);
3174  *rightstart += stats->stanullfrac;
3175  CLAMP_PROBABILITY(*rightstart);
3176  *rightend += stats->stanullfrac;
3177  CLAMP_PROBABILITY(*rightend);
3178  }
3179  }
3180 
3181  /* Disbelieve start >= end, just in case that can happen */
3182  if (*leftstart >= *leftend)
3183  {
3184  *leftstart = 0.0;
3185  *leftend = 1.0;
3186  }
3187  if (*rightstart >= *rightend)
3188  {
3189  *rightstart = 0.0;
3190  *rightend = 1.0;
3191  }
3192 
3193 fail:
3194  ReleaseVariableStats(leftvar);
3195  ReleaseVariableStats(rightvar);
3196 }
3197 
3198 
3199 /*
3200  * matchingsel -- generic matching-operator selectivity support
3201  *
3202  * Use these for any operators that (a) are on data types for which we collect
3203  * standard statistics, and (b) have behavior for which the default estimate
3204  * (twice DEFAULT_EQ_SEL) is sane. Typically that is good for match-like
3205  * operators.
3206  */
3207 
3208 Datum
3210 {
3211  PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
3212  Oid operator = PG_GETARG_OID(1);
3213  List *args = (List *) PG_GETARG_POINTER(2);
3214  int varRelid = PG_GETARG_INT32(3);
3215  Oid collation = PG_GET_COLLATION();
3216  double selec;
3217 
3218  /* Use generic restriction selectivity logic. */
3219  selec = generic_restriction_selectivity(root, operator, collation,
3220  args, varRelid,
3222 
3223  PG_RETURN_FLOAT8((float8) selec);
3224 }
3225 
3226 Datum
3228 {
3229  /* Just punt, for the moment. */
3231 }
3232 
3233 
3234 /*
3235  * Helper routine for estimate_num_groups: add an item to a list of
3236  * GroupVarInfos, but only if it's not known equal to any of the existing
3237  * entries.
3238  */
3239 typedef struct
3240 {
3241  Node *var; /* might be an expression, not just a Var */
3242  RelOptInfo *rel; /* relation it belongs to */
3243  double ndistinct; /* # distinct values */
3244 } GroupVarInfo;
3245 
3246 static List *
3248  Node *var, VariableStatData *vardata)
3249 {
3250  GroupVarInfo *varinfo;
3251  double ndistinct;
3252  bool isdefault;
3253  ListCell *lc;
3254 
3255  ndistinct = get_variable_numdistinct(vardata, &isdefault);
3256 
3257  foreach(lc, varinfos)
3258  {
3259  varinfo = (GroupVarInfo *) lfirst(lc);
3260 
3261  /* Drop exact duplicates */
3262  if (equal(var, varinfo->var))
3263  return varinfos;
3264 
3265  /*
3266  * Drop known-equal vars, but only if they belong to different
3267  * relations (see comments for estimate_num_groups)
3268  */
3269  if (vardata->rel != varinfo->rel &&
3270  exprs_known_equal(root, var, varinfo->var))
3271  {
3272  if (varinfo->ndistinct <= ndistinct)
3273  {
3274  /* Keep older item, forget new one */
3275  return varinfos;
3276  }
3277  else
3278  {
3279  /* Delete the older item */
3280  varinfos = foreach_delete_current(varinfos, lc);
3281  }
3282  }
3283  }
3284 
3285  varinfo = (GroupVarInfo *) palloc(sizeof(GroupVarInfo));
3286 
3287  varinfo->var = var;
3288  varinfo->rel = vardata->rel;
3289  varinfo->ndistinct = ndistinct;
3290  varinfos = lappend(varinfos, varinfo);
3291  return varinfos;
3292 }
3293 
3294 /*
3295  * estimate_num_groups - Estimate number of groups in a grouped query
3296  *
3297  * Given a query having a GROUP BY clause, estimate how many groups there
3298  * will be --- ie, the number of distinct combinations of the GROUP BY
3299  * expressions.
3300  *
3301  * This routine is also used to estimate the number of rows emitted by
3302  * a DISTINCT filtering step; that is an isomorphic problem. (Note:
3303  * actually, we only use it for DISTINCT when there's no grouping or
3304  * aggregation ahead of the DISTINCT.)
3305  *
3306  * Inputs:
3307  * root - the query
3308  * groupExprs - list of expressions being grouped by
3309  * input_rows - number of rows estimated to arrive at the group/unique
3310  * filter step
3311  * pgset - NULL, or a List** pointing to a grouping set to filter the
3312  * groupExprs against
3313  *
3314  * Given the lack of any cross-correlation statistics in the system, it's
3315  * impossible to do anything really trustworthy with GROUP BY conditions
3316  * involving multiple Vars. We should however avoid assuming the worst
3317  * case (all possible cross-product terms actually appear as groups) since
3318  * very often the grouped-by Vars are highly correlated. Our current approach
3319  * is as follows:
3320  * 1. Expressions yielding boolean are assumed to contribute two groups,
3321  * independently of their content, and are ignored in the subsequent
3322  * steps. This is mainly because tests like "col IS NULL" break the
3323  * heuristic used in step 2 especially badly.
3324  * 2. Reduce the given expressions to a list of unique Vars used. For
3325  * example, GROUP BY a, a + b is treated the same as GROUP BY a, b.
3326  * It is clearly correct not to count the same Var more than once.
3327  * It is also reasonable to treat f(x) the same as x: f() cannot
3328  * increase the number of distinct values (unless it is volatile,
3329  * which we consider unlikely for grouping), but it probably won't
3330  * reduce the number of distinct values much either.
3331  * As a special case, if a GROUP BY expression can be matched to an
3332  * expressional index for which we have statistics, then we treat the
3333  * whole expression as though it were just a Var.
3334  * 3. If the list contains Vars of different relations that are known equal
3335  * due to equivalence classes, then drop all but one of the Vars from each
3336  * known-equal set, keeping the one with smallest estimated # of values
3337  * (since the extra values of the others can't appear in joined rows).
3338  * Note the reason we only consider Vars of different relations is that
3339  * if we considered ones of the same rel, we'd be double-counting the
3340  * restriction selectivity of the equality in the next step.
3341  * 4. For Vars within a single source rel, we multiply together the numbers
3342  * of values, clamp to the number of rows in the rel (divided by 10 if
3343  * more than one Var), and then multiply by a factor based on the
3344  * selectivity of the restriction clauses for that rel. When there's
3345  * more than one Var, the initial product is probably too high (it's the
3346  * worst case) but clamping to a fraction of the rel's rows seems to be a
3347  * helpful heuristic for not letting the estimate get out of hand. (The
3348  * factor of 10 is derived from pre-Postgres-7.4 practice.) The factor
3349  * we multiply by to adjust for the restriction selectivity assumes that
3350  * the restriction clauses are independent of the grouping, which may not
3351  * be a valid assumption, but it's hard to do better.
3352  * 5. If there are Vars from multiple rels, we repeat step 4 for each such
3353  * rel, and multiply the results together.
3354  * Note that rels not containing grouped Vars are ignored completely, as are
3355  * join clauses. Such rels cannot increase the number of groups, and we
3356  * assume such clauses do not reduce the number either (somewhat bogus,
3357  * but we don't have the info to do better).
3358  */
3359 double
3360 estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows,
3361  List **pgset)
3362 {
3363  List *varinfos = NIL;
3364  double srf_multiplier = 1.0;
3365  double numdistinct;
3366  ListCell *l;
3367  int i;
3368 
3369  /*
3370  * We don't ever want to return an estimate of zero groups, as that tends
3371  * to lead to division-by-zero and other unpleasantness. The input_rows
3372  * estimate is usually already at least 1, but clamp it just in case it
3373  * isn't.
3374  */
3375  input_rows = clamp_row_est(input_rows);
3376 
3377  /*
3378  * If no grouping columns, there's exactly one group. (This can't happen
3379  * for normal cases with GROUP BY or DISTINCT, but it is possible for
3380  * corner cases with set operations.)
3381  */
3382  if (groupExprs == NIL || (pgset && list_length(*pgset) < 1))
3383  return 1.0;
3384 
3385  /*
3386  * Count groups derived from boolean grouping expressions. For other
3387  * expressions, find the unique Vars used, treating an expression as a Var
3388  * if we can find stats for it. For each one, record the statistical
3389  * estimate of number of distinct values (total in its table, without
3390  * regard for filtering).
3391  */
3392  numdistinct = 1.0;
3393 
3394  i = 0;
3395  foreach(l, groupExprs)
3396  {
3397  Node *groupexpr = (Node *) lfirst(l);
3398  double this_srf_multiplier;
3399  VariableStatData vardata;
3400  List *varshere;
3401  ListCell *l2;
3402 
3403  /* is expression in this grouping set? */
3404  if (pgset && !list_member_int(*pgset, i++))
3405  continue;
3406 
3407  /*
3408  * Set-returning functions in grouping columns are a bit problematic.
3409  * The code below will effectively ignore their SRF nature and come up
3410  * with a numdistinct estimate as though they were scalar functions.
3411  * We compensate by scaling up the end result by the largest SRF
3412  * rowcount estimate. (This will be an overestimate if the SRF
3413  * produces multiple copies of any output value, but it seems best to
3414  * assume the SRF's outputs are distinct. In any case, it's probably
3415  * pointless to worry too much about this without much better
3416  * estimates for SRF output rowcounts than we have today.)
3417  */
3418  this_srf_multiplier = expression_returns_set_rows(root, groupexpr);
3419  if (srf_multiplier < this_srf_multiplier)
3420  srf_multiplier = this_srf_multiplier;
3421 
3422  /* Short-circuit for expressions returning boolean */
3423  if (exprType(groupexpr) == BOOLOID)
3424  {
3425  numdistinct *= 2.0;
3426  continue;
3427  }
3428 
3429  /*
3430  * If examine_variable is able to deduce anything about the GROUP BY
3431  * expression, treat it as a single variable even if it's really more
3432  * complicated.
3433  */
3434  examine_variable(root, groupexpr, 0, &vardata);
3435  if (HeapTupleIsValid(vardata.statsTuple) || vardata.isunique)
3436  {
3437  varinfos = add_unique_group_var(root, varinfos,
3438  groupexpr, &vardata);
3439  ReleaseVariableStats(vardata);
3440  continue;
3441  }
3442  ReleaseVariableStats(vardata);
3443 
3444  /*
3445  * Else pull out the component Vars. Handle PlaceHolderVars by
3446  * recursing into their arguments (effectively assuming that the
3447  * PlaceHolderVar doesn't change the number of groups, which boils
3448  * down to ignoring the possible addition of nulls to the result set).
3449  */
3450  varshere = pull_var_clause(groupexpr,
3454 
3455  /*
3456  * If we find any variable-free GROUP BY item, then either it is a
3457  * constant (and we can ignore it) or it contains a volatile function;
3458  * in the latter case we punt and assume that each input row will
3459  * yield a distinct group.
3460  */
3461  if (varshere == NIL)
3462  {
3463  if (contain_volatile_functions(groupexpr))
3464  return input_rows;
3465  continue;
3466  }
3467 
3468  /*
3469  * Else add variables to varinfos list
3470  */
3471  foreach(l2, varshere)
3472  {
3473  Node *var = (Node *) lfirst(l2);
3474 
3475  examine_variable(root, var, 0, &vardata);
3476  varinfos = add_unique_group_var(root, varinfos, var, &vardata);
3477  ReleaseVariableStats(vardata);
3478  }
3479  }
3480 
3481  /*
3482  * If now no Vars, we must have an all-constant or all-boolean GROUP BY
3483  * list.
3484  */
3485  if (varinfos == NIL)
3486  {
3487  /* Apply SRF multiplier as we would do in the long path */
3488  numdistinct *= srf_multiplier;
3489  /* Round off */
3490  numdistinct = ceil(numdistinct);
3491  /* Guard against out-of-range answers */
3492  if (numdistinct > input_rows)
3493  numdistinct = input_rows;
3494  if (numdistinct < 1.0)
3495  numdistinct = 1.0;
3496  return numdistinct;
3497  }
3498 
3499  /*
3500  * Group Vars by relation and estimate total numdistinct.
3501  *
3502  * For each iteration of the outer loop, we process the frontmost Var in
3503  * varinfos, plus all other Vars in the same relation. We remove these
3504  * Vars from the newvarinfos list for the next iteration. This is the
3505  * easiest way to group Vars of same rel together.
3506  */
3507  do
3508  {
3509  GroupVarInfo *varinfo1 = (GroupVarInfo *) linitial(varinfos);
3510  RelOptInfo *rel = varinfo1->rel;
3511  double reldistinct = 1;
3512  double relmaxndistinct = reldistinct;
3513  int relvarcount = 0;
3514  List *newvarinfos = NIL;
3515  List *relvarinfos = NIL;
3516 
3517  /*
3518  * Split the list of varinfos in two - one for the current rel, one
3519  * for remaining Vars on other rels.
3520  */
3521  relvarinfos = lappend(relvarinfos, varinfo1);
3522  for_each_from(l, varinfos, 1)
3523  {
3524  GroupVarInfo *varinfo2 = (GroupVarInfo *) lfirst(l);
3525 
3526  if (varinfo2->rel == varinfo1->rel)
3527  {
3528  /* varinfos on current rel */
3529  relvarinfos = lappend(relvarinfos, varinfo2);
3530  }
3531  else
3532  {
3533  /* not time to process varinfo2 yet */
3534  newvarinfos = lappend(newvarinfos, varinfo2);
3535  }
3536  }
3537 
3538  /*
3539  * Get the numdistinct estimate for the Vars of this rel. We
3540  * iteratively search for multivariate n-distinct with maximum number
3541  * of vars; assuming that each var group is independent of the others,
3542  * we multiply them together. Any remaining relvarinfos after no more
3543  * multivariate matches are found are assumed independent too, so
3544  * their individual ndistinct estimates are multiplied also.
3545  *
3546  * While iterating, count how many separate numdistinct values we
3547  * apply. We apply a fudge factor below, but only if we multiplied
3548  * more than one such values.
3549  */
3550  while (relvarinfos)
3551  {
3552  double mvndistinct;
3553 
3554  if (estimate_multivariate_ndistinct(root, rel, &relvarinfos,
3555  &mvndistinct))
3556  {
3557  reldistinct *= mvndistinct;
3558  if (relmaxndistinct < mvndistinct)
3559  relmaxndistinct = mvndistinct;
3560  relvarcount++;
3561  }
3562  else
3563  {
3564  foreach(l, relvarinfos)
3565  {
3566  GroupVarInfo *varinfo2 = (GroupVarInfo *) lfirst(l);
3567 
3568  reldistinct *= varinfo2->ndistinct;
3569  if (relmaxndistinct < varinfo2->ndistinct)
3570  relmaxndistinct = varinfo2->ndistinct;
3571  relvarcount++;
3572  }
3573 
3574  /* we're done with this relation */
3575  relvarinfos = NIL;
3576  }
3577  }
3578 
3579  /*
3580  * Sanity check --- don't divide by zero if empty relation.
3581  */
3582  Assert(IS_SIMPLE_REL(rel));
3583  if (rel->tuples > 0)
3584  {
3585  /*
3586  * Clamp to size of rel, or size of rel / 10 if multiple Vars. The
3587  * fudge factor is because the Vars are probably correlated but we
3588  * don't know by how much. We should never clamp to less than the
3589  * largest ndistinct value for any of the Vars, though, since
3590  * there will surely be at least that many groups.
3591  */
3592  double clamp = rel->tuples;
3593 
3594  if (relvarcount > 1)
3595  {
3596  clamp *= 0.1;
3597  if (clamp < relmaxndistinct)
3598  {
3599  clamp = relmaxndistinct;
3600  /* for sanity in case some ndistinct is too large: */
3601  if (clamp > rel->tuples)
3602  clamp = rel->tuples;
3603  }
3604  }
3605  if (reldistinct > clamp)
3606  reldistinct = clamp;
3607 
3608  /*
3609  * Update the estimate based on the restriction selectivity,
3610  * guarding against division by zero when reldistinct is zero.
3611  * Also skip this if we know that we are returning all rows.
3612  */
3613  if (reldistinct > 0 && rel->rows < rel->tuples)
3614  {
3615  /*
3616  * Given a table containing N rows with n distinct values in a
3617  * uniform distribution, if we select p rows at random then
3618  * the expected number of distinct values selected is
3619  *
3620  * n * (1 - product((N-N/n-i)/(N-i), i=0..p-1))
3621  *
3622  * = n * (1 - (N-N/n)! / (N-N/n-p)! * (N-p)! / N!)
3623  *
3624  * See "Approximating block accesses in database
3625  * organizations", S. B. Yao, Communications of the ACM,
3626  * Volume 20 Issue 4, April 1977 Pages 260-261.
3627  *
3628  * Alternatively, re-arranging the terms from the factorials,
3629  * this may be written as
3630  *
3631  * n * (1 - product((N-p-i)/(N-i), i=0..N/n-1))
3632  *
3633  * This form of the formula is more efficient to compute in
3634  * the common case where p is larger than N/n. Additionally,
3635  * as pointed out by Dell'Era, if i << N for all terms in the
3636  * product, it can be approximated by
3637  *
3638  * n * (1 - ((N-p)/N)^(N/n))
3639  *
3640  * See "Expected distinct values when selecting from a bag
3641  * without replacement", Alberto Dell'Era,
3642  * http://www.adellera.it/investigations/distinct_balls/.
3643  *
3644  * The condition i << N is equivalent to n >> 1, so this is a
3645  * good approximation when the number of distinct values in
3646  * the table is large. It turns out that this formula also
3647  * works well even when n is small.
3648  */
3649  reldistinct *=
3650  (1 - pow((rel->tuples - rel->rows) / rel->tuples,
3651  rel->tuples / reldistinct));
3652  }
3653  reldistinct = clamp_row_est(reldistinct);
3654 
3655  /*
3656  * Update estimate of total distinct groups.
3657  */
3658  numdistinct *= reldistinct;
3659  }
3660 
3661  varinfos = newvarinfos;
3662  } while (varinfos != NIL);
3663 
3664  /* Now we can account for the effects of any SRFs */
3665  numdistinct *= srf_multiplier;
3666 
3667  /* Round off */
3668  numdistinct = ceil(numdistinct);
3669 
3670  /* Guard against out-of-range answers */
3671  if (numdistinct > input_rows)
3672  numdistinct = input_rows;
3673  if (numdistinct < 1.0)
3674  numdistinct = 1.0;
3675 
3676  return numdistinct;
3677 }
3678 
3679 /*
3680  * Estimate hash bucket statistics when the specified expression is used
3681  * as a hash key for the given number of buckets.
3682  *
3683  * This attempts to determine two values:
3684  *
3685  * 1. The frequency of the most common value of the expression (returns
3686  * zero into *mcv_freq if we can't get that).
3687  *
3688  * 2. The "bucketsize fraction", ie, average number of entries in a bucket
3689  * divided by total tuples in relation.
3690  *
3691  * XXX This is really pretty bogus since we're effectively assuming that the
3692  * distribution of hash keys will be the same after applying restriction
3693  * clauses as it was in the underlying relation. However, we are not nearly
3694  * smart enough to figure out how the restrict clauses might change the
3695  * distribution, so this will have to do for now.
3696  *
3697  * We are passed the number of buckets the executor will use for the given
3698  * input relation. If the data were perfectly distributed, with the same
3699  * number of tuples going into each available bucket, then the bucketsize
3700  * fraction would be 1/nbuckets. But this happy state of affairs will occur
3701  * only if (a) there are at least nbuckets distinct data values, and (b)
3702  * we have a not-too-skewed data distribution. Otherwise the buckets will
3703  * be nonuniformly occupied. If the other relation in the join has a key
3704  * distribution similar to this one's, then the most-loaded buckets are
3705  * exactly those that will be probed most often. Therefore, the "average"
3706  * bucket size for costing purposes should really be taken as something close
3707  * to the "worst case" bucket size. We try to estimate this by adjusting the
3708  * fraction if there are too few distinct data values, and then scaling up
3709  * by the ratio of the most common value's frequency to the average frequency.
3710  *
3711  * If no statistics are available, use a default estimate of 0.1. This will
3712  * discourage use of a hash rather strongly if the inner relation is large,
3713  * which is what we want. We do not want to hash unless we know that the
3714  * inner rel is well-dispersed (or the alternatives seem much worse).
3715  *
3716  * The caller should also check that the mcv_freq is not so large that the
3717  * most common value would by itself require an impractically large bucket.
3718  * In a hash join, the executor can split buckets if they get too big, but
3719  * obviously that doesn't help for a bucket that contains many duplicates of
3720  * the same value.
3721  */
3722 void
3723 estimate_hash_bucket_stats(PlannerInfo *root, Node *hashkey, double nbuckets,
3724  Selectivity *mcv_freq,
3725  Selectivity *bucketsize_frac)
3726 {
3727  VariableStatData vardata;
3728  double estfract,
3729  ndistinct,
3730  stanullfrac,
3731  avgfreq;
3732  bool isdefault;
3733  AttStatsSlot sslot;
3734 
3735  examine_variable(root, hashkey, 0, &vardata);
3736 
3737  /* Look up the frequency of the most common value, if available */
3738  *mcv_freq = 0.0;
3739 
3740  if (HeapTupleIsValid(vardata.statsTuple))
3741  {
3742  if (get_attstatsslot(&sslot, vardata.statsTuple,
3743  STATISTIC_KIND_MCV, InvalidOid,
3745  {
3746  /*
3747  * The first MCV stat is for the most common value.
3748  */
3749  if (sslot.nnumbers > 0)
3750  *mcv_freq = sslot.numbers[0];
3751  free_attstatsslot(&sslot);
3752  }
3753  }
3754 
3755  /* Get number of distinct values */
3756  ndistinct = get_variable_numdistinct(&vardata, &isdefault);
3757 
3758  /*
3759  * If ndistinct isn't real, punt. We normally return 0.1, but if the
3760  * mcv_freq is known to be even higher than that, use it instead.
3761  */
3762  if (isdefault)
3763  {
3764  *bucketsize_frac = (Selectivity) Max(0.1, *mcv_freq);
3765  ReleaseVariableStats(vardata);
3766  return;
3767  }
3768 
3769  /* Get fraction that are null */
3770  if (HeapTupleIsValid(vardata.statsTuple))
3771  {
3772  Form_pg_statistic stats;
3773 
3774  stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
3775  stanullfrac = stats->stanullfrac;
3776  }
3777  else
3778  stanullfrac = 0.0;
3779 
3780  /* Compute avg freq of all distinct data values in raw relation */
3781  avgfreq = (1.0 - stanullfrac) / ndistinct;
3782 
3783  /*
3784  * Adjust ndistinct to account for restriction clauses. Observe we are
3785  * assuming that the data distribution is affected uniformly by the
3786  * restriction clauses!
3787  *
3788  * XXX Possibly better way, but much more expensive: multiply by
3789  * selectivity of rel's restriction clauses that mention the target Var.
3790  */
3791  if (vardata.rel && vardata.rel->tuples > 0)
3792  {
3793  ndistinct *= vardata.rel->rows / vardata.rel->tuples;
3794  ndistinct = clamp_row_est(ndistinct);
3795  }
3796 
3797  /*
3798  * Initial estimate of bucketsize fraction is 1/nbuckets as long as the
3799  * number of buckets is less than the expected number of distinct values;
3800  * otherwise it is 1/ndistinct.
3801  */
3802  if (ndistinct > nbuckets)
3803  estfract = 1.0 / nbuckets;
3804  else
3805  estfract = 1.0 / ndistinct;
3806 
3807  /*
3808  * Adjust estimated bucketsize upward to account for skewed distribution.
3809  */
3810  if (avgfreq > 0.0 && *mcv_freq > avgfreq)
3811  estfract *= *mcv_freq / avgfreq;
3812 
3813  /*
3814  * Clamp bucketsize to sane range (the above adjustment could easily
3815  * produce an out-of-range result). We set the lower bound a little above
3816  * zero, since zero isn't a very sane result.
3817  */
3818  if (estfract < 1.0e-6)
3819  estfract = 1.0e-6;
3820  else if (estfract > 1.0)
3821  estfract = 1.0;
3822 
3823  *bucketsize_frac = (Selectivity) estfract;
3824 
3825  ReleaseVariableStats(vardata);
3826 }
3827 
3828 /*
3829  * estimate_hashagg_tablesize
3830  * estimate the number of bytes that a hash aggregate hashtable will
3831  * require based on the agg_costs, path width and number of groups.
3832  *
3833  * We return the result as "double" to forestall any possible overflow
3834  * problem in the multiplication by dNumGroups.
3835  *
3836  * XXX this may be over-estimating the size now that hashagg knows to omit
3837  * unneeded columns from the hashtable. Also for mixed-mode grouping sets,
3838  * grouping columns not in the hashed set are counted here even though hashagg
3839  * won't store them. Is this a problem?
3840  */
3841 double
3843  const AggClauseCosts *agg_costs, double dNumGroups)
3844 {
3845  Size hashentrysize;
3846 
3847  hashentrysize = hash_agg_entry_size(list_length(root->aggtransinfos),
3848  path->pathtarget->width,
3849  agg_costs->transitionSpace);
3850 
3851  /*
3852  * Note that this disregards the effect of fill-factor and growth policy
3853  * of the hash table. That's probably ok, given that the default
3854  * fill-factor is relatively high. It'd be hard to meaningfully factor in
3855  * "double-in-size" growth policies here.
3856  */
3857  return hashentrysize * dNumGroups;
3858 }
3859 
3860 
3861 /*-------------------------------------------------------------------------
3862  *
3863  * Support routines
3864  *
3865  *-------------------------------------------------------------------------
3866  */
3867 
3868 /*
3869  * Find applicable ndistinct statistics for the given list of VarInfos (which
3870  * must all belong to the given rel), and update *ndistinct to the estimate of
3871  * the MVNDistinctItem that best matches. If a match it found, *varinfos is
3872  * updated to remove the list of matched varinfos.
3873  *
3874  * Varinfos that aren't for simple Vars are ignored.
3875  *
3876  * Return true if we're able to find a match, false otherwise.
3877  */
3878 static bool
3880  List **varinfos, double *ndistinct)
3881 {
3882  ListCell *lc;
3883  Bitmapset *attnums = NULL;
3884  int nmatches;
3885  Oid statOid = InvalidOid;
3886  MVNDistinct *stats;
3887  Bitmapset *matched = NULL;
3888 
3889  /* bail out immediately if the table has no extended statistics */
3890  if (!rel->statlist)
3891  return false;
3892 
3893  /* Determine the attnums we're looking for */
3894  foreach(lc, *varinfos)
3895  {
3896  GroupVarInfo *varinfo = (GroupVarInfo *) lfirst(lc);
3898 
3899  Assert(varinfo->rel == rel);
3900 
3901  if (!IsA(varinfo->var, Var))
3902  continue;
3903 
3904  attnum = ((Var *) varinfo->var)->varattno;
3905 
3906  if (!AttrNumberIsForUserDefinedAttr(attnum))
3907  continue;
3908 
3909  attnums = bms_add_member(attnums, attnum);
3910  }
3911 
3912  /* look for the ndistinct statistics matching the most vars */
3913  nmatches = 1; /* we require at least two matches */
3914  foreach(lc, rel->statlist)
3915  {
3916  StatisticExtInfo *info = (StatisticExtInfo *) lfirst(lc);
3917  Bitmapset *shared;
3918  int nshared;
3919 
3920  /* skip statistics of other kinds */
3921  if (info->kind != STATS_EXT_NDISTINCT)
3922  continue;
3923 
3924  /* compute attnums shared by the vars and the statistics object */
3925  shared = bms_intersect(info->keys, attnums);
3926  nshared = bms_num_members(shared);
3927 
3928  /*
3929  * Does this statistics object match more columns than the currently
3930  * best object? If so, use this one instead.
3931  *
3932  * XXX This should break ties using name of the object, or something
3933  * like that, to make the outcome stable.
3934  */
3935  if (nshared > nmatches)
3936  {
3937  statOid = info->statOid;
3938  nmatches = nshared;
3939  matched = shared;
3940  }
3941  }
3942 
3943  /* No match? */
3944  if (statOid == InvalidOid)
3945  return false;
3946  Assert(nmatches > 1 && matched != NULL);
3947 
3948  stats = statext_ndistinct_load(statOid);
3949 
3950  /*
3951  * If we have a match, search it for the specific item that matches (there
3952  * must be one), and construct the output values.
3953  */
3954  if (stats)
3955  {
3956  int i;
3957  List *newlist = NIL;
3958  MVNDistinctItem *item = NULL;
3959 
3960  /* Find the specific item that exactly matches the combination */
3961  for (i = 0; i < stats->nitems; i++)
3962  {
3963  MVNDistinctItem *tmpitem = &stats->items[i];
3964 
3965  if (bms_subset_compare(tmpitem->attrs, matched) == BMS_EQUAL)
3966  {
3967  item = tmpitem;
3968  break;
3969  }
3970  }
3971 
3972  /* make sure we found an item */
3973  if (!item)
3974  elog(ERROR, "corrupt MVNDistinct entry");
3975 
3976  /* Form the output varinfo list, keeping only unmatched ones */
3977  foreach(lc, *varinfos)
3978  {
3979  GroupVarInfo *varinfo = (GroupVarInfo *) lfirst(lc);
3981 
3982  if (!IsA(varinfo->var, Var))
3983  {
3984  newlist = lappend(newlist, varinfo);
3985  continue;
3986  }
3987 
3988  attnum = ((Var *) varinfo->var)->varattno;
3989 
3990  if (!AttrNumberIsForUserDefinedAttr(attnum))
3991  continue;
3992 
3993  if (!bms_is_member(attnum, matched))
3994  newlist = lappend(newlist, varinfo);
3995  }
3996 
3997  *varinfos = newlist;
3998  *ndistinct = item->ndistinct;
3999  return true;
4000  }
4001 
4002  return false;
4003 }
4004 
4005 /*
4006  * convert_to_scalar
4007  * Convert non-NULL values of the indicated types to the comparison
4008  * scale needed by scalarineqsel().
4009  * Returns "true" if successful.
4010  *
4011  * XXX this routine is a hack: ideally we should look up the conversion
4012  * subroutines in pg_type.
4013  *
4014  * All numeric datatypes are simply converted to their equivalent
4015  * "double" values. (NUMERIC values that are outside the range of "double"
4016  * are clamped to +/- HUGE_VAL.)
4017  *
4018  * String datatypes are converted by convert_string_to_scalar(),
4019  * which is explained below. The reason why this routine deals with
4020  * three values at a time, not just one, is that we need it for strings.
4021  *
4022  * The bytea datatype is just enough different from strings that it has
4023  * to be treated separately.
4024  *
4025  * The several datatypes representing absolute times are all converted
4026  * to Timestamp, which is actually an int64, and then we promote that to
4027  * a double. Note this will give correct results even for the "special"
4028  * values of Timestamp, since those are chosen to compare correctly;
4029  * see timestamp_cmp.
4030  *
4031  * The several datatypes representing relative times (intervals) are all
4032  * converted to measurements expressed in seconds.
4033  */
4034 static bool
4035 convert_to_scalar(Datum value, Oid valuetypid, Oid collid, double *scaledvalue,
4036  Datum lobound, Datum hibound, Oid boundstypid,
4037  double *scaledlobound, double *scaledhibound)
4038 {
4039  bool failure = false;
4040 
4041  /*
4042  * Both the valuetypid and the boundstypid should exactly match the
4043  * declared input type(s) of the operator we are invoked for. However,
4044  * extensions might try to use scalarineqsel as estimator for operators
4045  * with input type(s) we don't handle here; in such cases, we want to
4046  * return false, not fail. In any case, we mustn't assume that valuetypid
4047  * and boundstypid are identical.
4048  *
4049  * XXX The histogram we are interpolating between points of could belong
4050  * to a column that's only binary-compatible with the declared type. In
4051  * essence we are assuming that the semantics of binary-compatible types
4052  * are enough alike that we can use a histogram generated with one type's
4053  * operators to estimate selectivity for the other's. This is outright
4054  * wrong in some cases --- in particular signed versus unsigned
4055  * interpretation could trip us up. But it's useful enough in the
4056  * majority of cases that we do it anyway. Should think about more
4057  * rigorous ways to do it.
4058  */
4059  switch (valuetypid)
4060  {
4061  /*
4062  * Built-in numeric types
4063  */
4064  case BOOLOID:
4065  case INT2OID:
4066  case INT4OID:
4067  case INT8OID:
4068  case FLOAT4OID:
4069  case FLOAT8OID:
4070  case NUMERICOID:
4071  case OIDOID:
4072  case REGPROCOID:
4073  case REGPROCEDUREOID:
4074  case REGOPEROID:
4075  case REGOPERATOROID:
4076  case REGCLASSOID:
4077  case REGTYPEOID:
4078  case REGCONFIGOID:
4079  case REGDICTIONARYOID:
4080  case REGROLEOID:
4081  case REGNAMESPACEOID:
4082  *scaledvalue = convert_numeric_to_scalar(value, valuetypid,
4083  &failure);
4084  *scaledlobound = convert_numeric_to_scalar(lobound, boundstypid,
4085  &failure);
4086  *scaledhibound = convert_numeric_to_scalar(hibound, boundstypid,
4087  &failure);
4088  return !failure;
4089 
4090  /*
4091  * Built-in string types
4092  */
4093  case CHAROID:
4094  case BPCHAROID:
4095  case VARCHAROID:
4096  case TEXTOID:
4097  case NAMEOID:
4098  {
4099  char *valstr = convert_string_datum(value, valuetypid,
4100  collid, &failure);
4101  char *lostr = convert_string_datum(lobound, boundstypid,
4102  collid, &failure);
4103  char *histr = convert_string_datum(hibound, boundstypid,
4104  collid, &failure);
4105 
4106  /*
4107  * Bail out if any of the values is not of string type. We
4108  * might leak converted strings for the other value(s), but
4109  * that's not worth troubling over.
4110  */
4111  if (failure)
4112  return false;
4113 
4114  convert_string_to_scalar(valstr, scaledvalue,
4115  lostr, scaledlobound,
4116  histr, scaledhibound);
4117  pfree(valstr);
4118  pfree(lostr);
4119  pfree(histr);
4120  return true;
4121  }
4122 
4123  /*
4124  * Built-in bytea type
4125  */
4126  case BYTEAOID:
4127  {
4128  /* We only support bytea vs bytea comparison */
4129  if (boundstypid != BYTEAOID)
4130  return false;
4131  convert_bytea_to_scalar(value, scaledvalue,
4132  lobound, scaledlobound,
4133  hibound, scaledhibound);
4134  return true;
4135  }
4136 
4137  /*
4138  * Built-in time types
4139  */
4140  case TIMESTAMPOID:
4141  case TIMESTAMPTZOID:
4142  case DATEOID:
4143  case INTERVALOID:
4144  case TIMEOID:
4145  case TIMETZOID:
4146  *scaledvalue = convert_timevalue_to_scalar(value, valuetypid,
4147  &failure);
4148  *scaledlobound = convert_timevalue_to_scalar(lobound, boundstypid,
4149  &failure);
4150  *scaledhibound = convert_timevalue_to_scalar(hibound, boundstypid,
4151  &failure);
4152  return !failure;
4153 
4154  /*
4155  * Built-in network types
4156  */
4157  case INETOID:
4158  case CIDROID:
4159  case MACADDROID:
4160  case MACADDR8OID:
4161  *scaledvalue = convert_network_to_scalar(value, valuetypid,
4162  &failure);
4163  *scaledlobound = convert_network_to_scalar(lobound, boundstypid,
4164  &failure);
4165  *scaledhibound = convert_network_to_scalar(hibound, boundstypid,
4166  &failure);
4167  return !failure;
4168  }
4169  /* Don't know how to convert */
4170  *scaledvalue = *scaledlobound = *scaledhibound = 0;
4171  return false;
4172 }
4173 
4174 /*
4175  * Do convert_to_scalar()'s work for any numeric data type.
4176  *
4177  * On failure (e.g., unsupported typid), set *failure to true;
4178  * otherwise, that variable is not changed.
4179  */
4180 static double
4182 {
4183  switch (typid)
4184  {
4185  case BOOLOID:
4186  return (double) DatumGetBool(value);
4187  case INT2OID:
4188  return (double) DatumGetInt16(value);
4189  case INT4OID:
4190  return (double) DatumGetInt32(value);
4191  case INT8OID:
4192  return (double) DatumGetInt64(value);
4193  case FLOAT4OID:
4194  return (double) DatumGetFloat4(value);
4195  case FLOAT8OID:
4196  return (double) DatumGetFloat8(value);
4197  case NUMERICOID:
4198  /* Note: out-of-range values will be clamped to +-HUGE_VAL */
4199  return (double)
4201  value));
4202  case OIDOID:
4203  case REGPROCOID:
4204  case REGPROCEDUREOID:
4205  case REGOPEROID:
4206  case REGOPERATOROID:
4207  case REGCLASSOID:
4208  case REGTYPEOID:
4209  case REGCONFIGOID:
4210  case REGDICTIONARYOID:
4211  case REGROLEOID:
4212  case REGNAMESPACEOID:
4213  /* we can treat OIDs as integers... */
4214  return (double) DatumGetObjectId(value);
4215  }
4216 
4217  *failure = true;
4218  return 0;
4219 }
4220 
4221 /*
4222  * Do convert_to_scalar()'s work for any character-string data type.
4223  *
4224  * String datatypes are converted to a scale that ranges from 0 to 1,
4225  * where we visualize the bytes of the string as fractional digits.
4226  *
4227  * We do not want the base to be 256, however, since that tends to
4228  * generate inflated selectivity estimates; few databases will have
4229  * occurrences of all 256 possible byte values at each position.
4230  * Instead, use the smallest and largest byte values seen in the bounds
4231  * as the estimated range for each byte, after some fudging to deal with
4232  * the fact that we probably aren't going to see the full range that way.
4233  *
4234  * An additional refinement is that we discard any common prefix of the
4235  * three strings before computing the scaled values. This allows us to
4236  * "zoom in" when we encounter a narrow data range. An example is a phone
4237  * number database where all the values begin with the same area code.
4238  * (Actually, the bounds will be adjacent histogram-bin-boundary values,
4239  * so this is more likely to happen than you might think.)
4240  */
4241 static void
4243  double *scaledvalue,
4244  char *lobound,
4245  double *scaledlobound,
4246  char *hibound,
4247  double *scaledhibound)
4248 {
4249  int rangelo,
4250  rangehi;
4251  char *sptr;
4252 
4253  rangelo = rangehi = (unsigned char) hibound[0];
4254  for (sptr = lobound; *sptr; sptr++)
4255  {
4256  if (rangelo > (unsigned char) *sptr)
4257  rangelo = (unsigned char) *sptr;
4258  if (rangehi < (unsigned char) *sptr)
4259  rangehi = (unsigned char) *sptr;
4260  }
4261  for (sptr = hibound; *sptr; sptr++)
4262  {
4263  if (rangelo > (unsigned char) *sptr)
4264  rangelo = (unsigned char) *sptr;
4265  if (rangehi < (unsigned char) *sptr)
4266  rangehi = (unsigned char) *sptr;
4267  }
4268  /* If range includes any upper-case ASCII chars, make it include all */
4269  if (rangelo <= 'Z' && rangehi >= 'A')
4270  {
4271  if (rangelo > 'A')
4272  rangelo = 'A';
4273  if (rangehi < 'Z')
4274  rangehi = 'Z';
4275  }
4276  /* Ditto lower-case */
4277  if (rangelo <= 'z' && rangehi >= 'a')
4278  {
4279  if (rangelo > 'a')
4280  rangelo = 'a';
4281  if (rangehi < 'z')
4282  rangehi = 'z';
4283  }
4284  /* Ditto digits */
4285  if (rangelo <= '9' && rangehi >= '0')
4286  {
4287  if (rangelo > '0')
4288  rangelo = '0';
4289  if (rangehi < '9')
4290  rangehi = '9';
4291  }
4292 
4293  /*
4294  * If range includes less than 10 chars, assume we have not got enough
4295  * data, and make it include regular ASCII set.
4296  */
4297  if (rangehi - rangelo < 9)
4298  {
4299  rangelo = ' ';
4300  rangehi = 127;
4301  }
4302 
4303  /*
4304  * Now strip any common prefix of the three strings.
4305  */
4306  while (*lobound)
4307  {
4308  if (*lobound != *hibound || *lobound != *value)
4309  break;
4310  lobound++, hibound++, value++;
4311  }
4312 
4313  /*
4314  * Now we can do the conversions.
4315  */
4316  *scaledvalue = convert_one_string_to_scalar(value, rangelo, rangehi);
4317  *scaledlobound = convert_one_string_to_scalar(lobound, rangelo, rangehi);
4318  *scaledhibound = convert_one_string_to_scalar(hibound, rangelo, rangehi);
4319 }
4320 
4321 static double
4322 convert_one_string_to_scalar(char *value, int rangelo, int rangehi)
4323 {
4324  int slen = strlen(value);
4325  double num,
4326  denom,
4327  base;
4328 
4329  if (slen <= 0)
4330  return 0.0; /* empty string has scalar value 0 */
4331 
4332  /*
4333  * There seems little point in considering more than a dozen bytes from
4334  * the string. Since base is at least 10, that will give us nominal
4335  * resolution of at least 12 decimal digits, which is surely far more
4336  * precision than this estimation technique has got anyway (especially in
4337  * non-C locales). Also, even with the maximum possible base of 256, this
4338  * ensures denom cannot grow larger than 256^13 = 2.03e31, which will not
4339  * overflow on any known machine.
4340  */
4341  if (slen > 12)
4342  slen = 12;
4343 
4344  /* Convert initial characters to fraction */
4345  base = rangehi - rangelo + 1;
4346  num = 0.0;
4347  denom = base;
4348  while (slen-- > 0)
4349  {
4350  int ch = (unsigned char) *value++;
4351 
4352  if (ch < rangelo)
4353  ch = rangelo - 1;
4354  else if (ch > rangehi)
4355  ch = rangehi + 1;
4356  num += ((double) (ch - rangelo)) / denom;
4357  denom *= base;
4358  }
4359 
4360  return num;
4361 }
4362 
4363 /*
4364  * Convert a string-type Datum into a palloc'd, null-terminated string.
4365  *
4366  * On failure (e.g., unsupported typid), set *failure to true;
4367  * otherwise, that variable is not changed. (We'll return NULL on failure.)
4368  *
4369  * When using a non-C locale, we must pass the string through strxfrm()
4370  * before continuing, so as to generate correct locale-specific results.
4371  */
4372 static char *
4373 convert_string_datum(Datum value, Oid typid, Oid collid, bool *failure)
4374 {
4375  char *val;
4376 
4377  switch (typid)
4378  {
4379  case CHAROID:
4380  val = (char *) palloc(2);
4381  val[0] = DatumGetChar(value);
4382  val[1] = '\0';
4383  break;
4384  case BPCHAROID:
4385  case VARCHAROID:
4386  case TEXTOID:
4387  val = TextDatumGetCString(value);
4388  break;
4389  case NAMEOID:
4390  {
4391  NameData *nm = (NameData *) DatumGetPointer(value);
4392 
4393  val = pstrdup(NameStr(*nm));
4394  break;
4395  }
4396  default:
4397  *failure = true;
4398  return NULL;
4399  }
4400 
4401  if (!lc_collate_is_c(collid))
4402  {
4403  char *xfrmstr;
4404  size_t xfrmlen;
4405  size_t xfrmlen2 PG_USED_FOR_ASSERTS_ONLY;
4406 
4407  /*
4408  * XXX: We could guess at a suitable output buffer size and only call
4409  * strxfrm twice if our guess is too small.
4410  *
4411  * XXX: strxfrm doesn't support UTF-8 encoding on Win32, it can return
4412  * bogus data or set an error. This is not really a problem unless it
4413  * crashes since it will only give an estimation error and nothing
4414  * fatal.
4415  */
4416  xfrmlen = strxfrm(NULL, val, 0);
4417 #ifdef WIN32
4418 
4419  /*
4420  * On Windows, strxfrm returns INT_MAX when an error occurs. Instead
4421  * of trying to allocate this much memory (and fail), just return the
4422  * original string unmodified as if we were in the C locale.
4423  */
4424  if (xfrmlen == INT_MAX)
4425  return val;
4426 #endif
4427  xfrmstr = (char *) palloc(xfrmlen + 1);
4428  xfrmlen2 = strxfrm(xfrmstr, val, xfrmlen + 1);
4429 
4430  /*
4431  * Some systems (e.g., glibc) can return a smaller value from the
4432  * second call than the first; thus the Assert must be <= not ==.
4433  */
4434  Assert(xfrmlen2 <= xfrmlen);
4435  pfree(val);
4436  val = xfrmstr;
4437  }
4438 
4439  return val;
4440 }
4441 
4442 /*
4443  * Do convert_to_scalar()'s work for any bytea data type.
4444  *
4445  * Very similar to convert_string_to_scalar except we can't assume
4446  * null-termination and therefore pass explicit lengths around.
4447  *
4448  * Also, assumptions about likely "normal" ranges of characters have been
4449  * removed - a data range of 0..255 is always used, for now. (Perhaps
4450  * someday we will add information about actual byte data range to
4451  * pg_statistic.)
4452  */
4453 static void
4455  double *scaledvalue,
4456  Datum lobound,
4457  double *scaledlobound,
4458  Datum hibound,
4459  double *scaledhibound)
4460 {
4461  bytea *valuep = DatumGetByteaPP(value);
4462  bytea *loboundp = DatumGetByteaPP(lobound);
4463  bytea *hiboundp = DatumGetByteaPP(hibound);
4464  int rangelo,
4465  rangehi,
4466  valuelen = VARSIZE_ANY_EXHDR(valuep),
4467  loboundlen = VARSIZE_ANY_EXHDR(loboundp),
4468  hiboundlen = VARSIZE_ANY_EXHDR(hiboundp),
4469  i,
4470  minlen;
4471  unsigned char *valstr = (unsigned char *) VARDATA_ANY(valuep);
4472  unsigned char *lostr = (unsigned char *) VARDATA_ANY(loboundp);
4473  unsigned char *histr = (unsigned char *) VARDATA_ANY(hiboundp);
4474 
4475  /*
4476  * Assume bytea data is uniformly distributed across all byte values.
4477  */
4478  rangelo = 0;
4479  rangehi = 255;
4480 
4481  /*
4482  * Now strip any common prefix of the three strings.
4483  */
4484  minlen = Min(Min(valuelen, loboundlen), hiboundlen);
4485  for (i = 0; i < minlen; i++)
4486  {
4487  if (*lostr != *histr || *lostr != *valstr)
4488  break;
4489  lostr++, histr++, valstr++;
4490  loboundlen--, hiboundlen--, valuelen--;
4491  }
4492 
4493  /*
4494  * Now we can do the conversions.
4495  */
4496  *scaledvalue = convert_one_bytea_to_scalar(valstr, valuelen, rangelo, rangehi);
4497  *scaledlobound = convert_one_bytea_to_scalar(lostr, loboundlen, rangelo, rangehi);
4498  *scaledhibound = convert_one_bytea_to_scalar(histr, hiboundlen, rangelo, rangehi);
4499 }
4500 
4501 static double
4502 convert_one_bytea_to_scalar(unsigned char *value, int valuelen,
4503  int rangelo, int rangehi)
4504 {
4505  double num,
4506  denom,
4507  base;
4508 
4509  if (valuelen <= 0)
4510  return 0.0; /* empty string has scalar value 0 */
4511 
4512  /*
4513  * Since base is 256, need not consider more than about 10 chars (even
4514  * this many seems like overkill)
4515  */
4516  if (valuelen > 10)
4517  valuelen = 10;
4518 
4519  /* Convert initial characters to fraction */
4520  base = rangehi - rangelo + 1;
4521  num = 0.0;
4522  denom = base;
4523  while (valuelen-- > 0)
4524  {
4525  int ch = *value++;
4526 
4527  if (ch < rangelo)
4528  ch = rangelo - 1;
4529  else if (ch > rangehi)
4530  ch = rangehi + 1;
4531  num += ((double) (ch - rangelo)) / denom;
4532  denom *= base;
4533  }
4534 
4535  return num;
4536 }
4537 
4538 /*
4539  * Do convert_to_scalar()'s work for any timevalue data type.
4540  *
4541  * On failure (e.g., unsupported typid), set *failure to true;
4542  * otherwise, that variable is not changed.
4543  */
4544 static double
4546 {
4547  switch (typid)
4548  {
4549  case TIMESTAMPOID:
4550  return DatumGetTimestamp(value);
4551  case TIMESTAMPTZOID:
4552  return DatumGetTimestampTz(value);
4553  case DATEOID:
4555  case INTERVALOID:
4556  {
4558 
4559  /*
4560  * Convert the month part of Interval to days using assumed
4561  * average month length of 365.25/12.0 days. Not too
4562  * accurate, but plenty good enough for our purposes.
4563  */
4564  return interval->time + interval->day * (double) USECS_PER_DAY +
4565  interval->month * ((DAYS_PER_YEAR / (double) MONTHS_PER_YEAR) * USECS_PER_DAY);
4566  }
4567  case TIMEOID:
4568  return DatumGetTimeADT(value);
4569  case TIMETZOID:
4570  {
4571  TimeTzADT *timetz = DatumGetTimeTzADTP(value);
4572 
4573  /* use GMT-equivalent time */
4574  return (double) (timetz->time + (timetz->zone * 1000000.0));
4575  }
4576  }
4577 
4578  *failure = true;
4579  return 0;
4580 }
4581 
4582 
4583 /*
4584  * get_restriction_variable
4585  * Examine the args of a restriction clause to see if it's of the
4586  * form (variable op pseudoconstant) or (pseudoconstant op variable),
4587  * where "variable" could be either a Var or an expression in vars of a
4588  * single relation. If so, extract information about the variable,
4589  * and also indicate which side it was on and the other argument.
4590  *
4591  * Inputs:
4592  * root: the planner info
4593  * args: clause argument list
4594  * varRelid: see specs for restriction selectivity functions
4595  *
4596  * Outputs: (these are valid only if true is returned)
4597  * *vardata: gets information about variable (see examine_variable)
4598  * *other: gets other clause argument, aggressively reduced to a constant
4599  * *varonleft: set true if variable is on the left, false if on the right
4600  *
4601  * Returns true if a variable is identified, otherwise false.
4602  *
4603  * Note: if there are Vars on both sides of the clause, we must fail, because
4604  * callers are expecting that the other side will act like a pseudoconstant.
4605  */
4606 bool
4608  VariableStatData *vardata, Node **other,
4609  bool *varonleft)
4610 {
4611  Node *left,
4612  *right;
4613  VariableStatData rdata;
4614 
4615  /* Fail if not a binary opclause (probably shouldn't happen) */
4616  if (list_length(args) != 2)
4617  return false;
4618 
4619  left = (Node *) linitial(args);
4620  right = (Node *) lsecond(args);
4621 
4622  /*
4623  * Examine both sides. Note that when varRelid is nonzero, Vars of other
4624  * relations will be treated as pseudoconstants.
4625  */
4626  examine_variable(root, left, varRelid, vardata);
4627  examine_variable(root, right, varRelid, &rdata);
4628 
4629  /*
4630  * If one side is a variable and the other not, we win.
4631  */
4632  if (vardata->rel && rdata.rel == NULL)
4633  {
4634  *varonleft = true;
4635  *other = estimate_expression_value(root, rdata.var);
4636  /* Assume we need no ReleaseVariableStats(rdata) here */
4637  return true;
4638  }
4639 
4640  if (vardata->rel == NULL && rdata.rel)
4641  {
4642  *varonleft = false;
4643  *other = estimate_expression_value(root, vardata->var);
4644  /* Assume we need no ReleaseVariableStats(*vardata) here */
4645  *vardata = rdata;
4646  return true;
4647  }
4648 
4649  /* Oops, clause has wrong structure (probably var op var) */
4650  ReleaseVariableStats(*vardata);
4651  ReleaseVariableStats(rdata);
4652 
4653  return false;
4654 }
4655 
4656 /*
4657  * get_join_variables
4658  * Apply examine_variable() to each side of a join clause.
4659  * Also, attempt to identify whether the join clause has the same
4660  * or reversed sense compared to the SpecialJoinInfo.
4661  *
4662  * We consider the join clause "normal" if it is "lhs_var OP rhs_var",
4663  * or "reversed" if it is "rhs_var OP lhs_var". In complicated cases
4664  * where we can't tell for sure, we default to assuming it's normal.
4665  */
4666 void
4668  VariableStatData *vardata1, VariableStatData *vardata2,
4669  bool *join_is_reversed)
4670 {
4671  Node *left,
4672  *right;
4673 
4674  if (list_length(args) != 2)
4675  elog(ERROR, "join operator should take two arguments");
4676 
4677  left = (Node *) linitial(args);
4678  right = (Node *) lsecond(args);
4679 
4680  examine_variable(root, left, 0, vardata1);
4681  examine_variable(root, right, 0, vardata2);
4682 
4683  if (vardata1->rel &&
4684  bms_is_subset(vardata1->rel->relids, sjinfo->syn_righthand))
4685  *join_is_reversed = true; /* var1 is on RHS */
4686  else if (vardata2->rel &&
4687  bms_is_subset(vardata2->rel->relids, sjinfo->syn_lefthand))
4688  *join_is_reversed = true; /* var2 is on LHS */
4689  else
4690  *join_is_reversed = false;
4691 }
4692 
4693 /*
4694  * examine_variable
4695  * Try to look up statistical data about an expression.
4696  * Fill in a VariableStatData struct to describe the expression.
4697  *
4698  * Inputs:
4699  * root: the planner info
4700  * node: the expression tree to examine
4701  * varRelid: see specs for restriction selectivity functions
4702  *
4703  * Outputs: *vardata is filled as follows:
4704  * var: the input expression (with any binary relabeling stripped, if
4705  * it is or contains a variable; but otherwise the type is preserved)
4706  * rel: RelOptInfo for relation containing variable; NULL if expression
4707  * contains no Vars (NOTE this could point to a RelOptInfo of a
4708  * subquery, not one in the current query).
4709  * statsTuple: the pg_statistic entry for the variable, if one exists;
4710  * otherwise NULL.
4711  * freefunc: pointer to a function to release statsTuple with.
4712  * vartype: exposed type of the expression; this should always match
4713  * the declared input type of the operator we are estimating for.
4714  * atttype, atttypmod: actual type/typmod of the "var" expression. This is
4715  * commonly the same as the exposed type of the variable argument,
4716  * but can be different in binary-compatible-type cases.
4717  * isunique: true if we were able to match the var to a unique index or a
4718  * single-column DISTINCT clause, implying its values are unique for
4719  * this query. (Caution: this should be trusted for statistical
4720  * purposes only, since we do not check indimmediate nor verify that
4721  * the exact same definition of equality applies.)
4722  * acl_ok: true if current user has permission to read the column(s)
4723  * underlying the pg_statistic entry. This is consulted by
4724  * statistic_proc_security_check().
4725  *
4726  * Caller is responsible for doing ReleaseVariableStats() before exiting.
4727  */
4728 void
4729 examine_variable(PlannerInfo *root, Node *node, int varRelid,
4730  VariableStatData *vardata)
4731 {
4732  Node *basenode;
4733  Relids varnos;
4734  RelOptInfo *onerel;
4735 
4736  /* Make sure we don't return dangling pointers in vardata */
4737  MemSet(vardata, 0, sizeof(VariableStatData));
4738 
4739  /* Save the exposed type of the expression */
4740  vardata->vartype = exprType(node);
4741 
4742  /* Look inside any binary-compatible relabeling */
4743 
4744  if (IsA(node, RelabelType))
4745  basenode = (Node *) ((RelabelType *) node)->arg;
4746  else
4747  basenode = node;
4748 
4749  /* Fast path for a simple Var */
4750 
4751  if (IsA(basenode, Var) &&
4752  (varRelid == 0 || varRelid == ((Var *) basenode)->varno))
4753  {
4754  Var *var = (Var *) basenode;
4755 
4756  /* Set up result fields other than the stats tuple */
4757  vardata->var = basenode; /* return Var without relabeling */
4758  vardata->rel = find_base_rel(root, var->varno);
4759  vardata->atttype = var->vartype;
4760  vardata->atttypmod = var->vartypmod;
4761  vardata->isunique = has_unique_index(vardata->rel, var->varattno);
4762 
4763  /* Try to locate some stats */
4764  examine_simple_variable(root, var, vardata);
4765 
4766  return;
4767  }
4768 
4769  /*
4770  * Okay, it's a more complicated expression. Determine variable
4771  * membership. Note that when varRelid isn't zero, only vars of that
4772  * relation are considered "real" vars.
4773  */
4774  varnos = pull_varnos(basenode);
4775 
4776  onerel = NULL;
4777 
4778  switch (bms_membership(varnos))
4779  {
4780  case BMS_EMPTY_SET:
4781  /* No Vars at all ... must be pseudo-constant clause */
4782  break;
4783  case BMS_SINGLETON:
4784  if (varRelid == 0 || bms_is_member(varRelid, varnos))
4785  {
4786  onerel = find_base_rel(root,
4787  (varRelid ? varRelid : bms_singleton_member(varnos)));
4788  vardata->rel = onerel;
4789  node = basenode; /* strip any relabeling */
4790  }
4791  /* else treat it as a constant */
4792  break;
4793  case BMS_MULTIPLE:
4794  if (varRelid == 0)
4795  {
4796  /* treat it as a variable of a join relation */
4797  vardata->rel = find_join_rel(root, varnos);
4798  node = basenode; /* strip any relabeling */
4799  }
4800  else if (bms_is_member(varRelid, varnos))
4801  {
4802  /* ignore the vars belonging to other relations */
4803  vardata->rel = find_base_rel(root, varRelid);
4804  node = basenode; /* strip any relabeling */
4805  /* note: no point in expressional-index search here */
4806  }
4807  /* else treat it as a constant */
4808  break;
4809  }
4810 
4811  bms_free(varnos);
4812 
4813  vardata->var = node;
4814  vardata->atttype = exprType(node);
4815  vardata->atttypmod = exprTypmod(node);
4816 
4817  if (onerel)
4818  {
4819  /*
4820  * We have an expression in vars of a single relation. Try to match
4821  * it to expressional index columns, in hopes of finding some
4822  * statistics.
4823  *
4824  * Note that we consider all index columns including INCLUDE columns,
4825  * since there could be stats for such columns. But the test for
4826  * uniqueness needs to be warier.
4827  *
4828  * XXX it's conceivable that there are multiple matches with different
4829  * index opfamilies; if so, we need to pick one that matches the
4830  * operator we are estimating for. FIXME later.
4831  */
4832  ListCell *ilist;
4833 
4834  foreach(ilist, onerel->indexlist)
4835  {
4836  IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
4837  ListCell *indexpr_item;
4838  int pos;
4839 
4840  indexpr_item = list_head(index->indexprs);
4841  if (indexpr_item == NULL)
4842  continue; /* no expressions here... */
4843 
4844  for (pos = 0; pos < index->ncolumns; pos++)
4845  {
4846  if (index->indexkeys[pos] == 0)
4847  {
4848  Node *indexkey;
4849 
4850  if (indexpr_item == NULL)
4851  elog(ERROR, "too few entries in indexprs list");
4852  indexkey = (Node *) lfirst(indexpr_item);
4853  if (indexkey && IsA(indexkey, RelabelType))
4854  indexkey = (Node *) ((RelabelType *) indexkey)->arg;
4855  if (equal(node, indexkey))
4856  {
4857  /*
4858  * Found a match ... is it a unique index? Tests here
4859  * should match has_unique_index().
4860  */
4861  if (index->unique &&
4862  index->nkeycolumns == 1 &&
4863  pos == 0 &&
4864  (index->indpred == NIL || index->predOK))
4865  vardata->isunique = true;
4866 
4867  /*
4868  * Has it got stats? We only consider stats for
4869  * non-partial indexes, since partial indexes probably
4870  * don't reflect whole-relation statistics; the above
4871  * check for uniqueness is the only info we take from
4872  * a partial index.
4873  *
4874  * An index stats hook, however, must make its own
4875  * decisions about what to do with partial indexes.
4876  */
4877  if (get_index_stats_hook &&
4878  (*get_index_stats_hook) (root, index->indexoid,
4879  pos + 1, vardata))
4880  {
4881  /*
4882  * The hook took control of acquiring a stats
4883  * tuple. If it did supply a tuple, it'd better
4884  * have supplied a freefunc.
4885  */
4886  if (HeapTupleIsValid(vardata->statsTuple) &&
4887  !vardata->freefunc)
4888  elog(ERROR, "no function provided to release variable stats with");
4889  }
4890  else if (index->indpred == NIL)
4891  {
4892  vardata->statsTuple =
4894  ObjectIdGetDatum(index->indexoid),
4895  Int16GetDatum(pos + 1),
4896  BoolGetDatum(false));
4897  vardata->freefunc = ReleaseSysCache;
4898 
4899  if (HeapTupleIsValid(vardata->statsTuple))
4900  {
4901  /* Get index's table for permission check */
4902  RangeTblEntry *rte;
4903  Oid userid;
4904 
4905  rte = planner_rt_fetch(index->rel->relid, root);
4906  Assert(rte->rtekind == RTE_RELATION);
4907 
4908  /*
4909  * Use checkAsUser if it's set, in case we're
4910  * accessing the table via a view.
4911  */
4912  userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
4913 
4914  /*
4915  * For simplicity, we insist on the whole
4916  * table being selectable, rather than trying
4917  * to identify which column(s) the index
4918  * depends on. Also require all rows to be
4919  * selectable --- there must be no
4920  * securityQuals from security barrier views
4921  * or RLS policies.
4922  */
4923  vardata->acl_ok =
4924  rte->securityQuals == NIL &&
4925  (pg_class_aclcheck(rte->relid, userid,
4926  ACL_SELECT) == ACLCHECK_OK);
4927 
4928  /*
4929  * If the user doesn't have permissions to
4930  * access an inheritance child relation, check
4931  * the permissions of the table actually
4932  * mentioned in the query, since most likely
4933  * the user does have that permission. Note
4934  * that whole-table select privilege on the
4935  * parent doesn't quite guarantee that the
4936  * user could read all columns of the child.
4937  * But in practice it's unlikely that any
4938  * interesting security violation could result
4939  * from allowing access to the expression
4940  * index's stats, so we allow it anyway. See
4941  * similar code in examine_simple_variable()
4942  * for additional comments.
4943  */
4944  if (!vardata->acl_ok &&
4945  root->append_rel_array != NULL)
4946  {
4947  AppendRelInfo *appinfo;
4948  Index varno = index->rel->relid;
4949 
4950  appinfo = root->append_rel_array[varno];
4951  while (appinfo &&
4952  planner_rt_fetch(appinfo->parent_relid,
4953  root)->rtekind == RTE_RELATION)
4954  {
4955  varno = appinfo->parent_relid;
4956  appinfo = root->append_rel_array[varno];
4957  }
4958  if (varno != index->rel->relid)
4959  {
4960  /* Repeat access check on this rel */
4961  rte = planner_rt_fetch(varno, root);
4962  Assert(rte->rtekind == RTE_RELATION);
4963 
4964  userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
4965 
4966  vardata->acl_ok =
4967  rte->securityQuals == NIL &&
4968  (pg_class_aclcheck(rte->relid,
4969  userid,
4970  ACL_SELECT) == ACLCHECK_OK);
4971  }
4972  }
4973  }
4974  else
4975  {
4976  /* suppress leakproofness checks later */
4977  vardata->acl_ok = true;
4978  }
4979  }
4980  if (vardata->statsTuple)
4981  break;
4982  }
4983  indexpr_item = lnext(index->indexprs, indexpr_item);
4984  }
4985  }
4986  if (vardata->statsTuple)
4987  break;
4988  }
4989  }
4990 }
4991 
4992 /*
4993  * examine_simple_variable
4994  * Handle a simple Var for examine_variable
4995  *
4996  * This is split out as a subroutine so that we can recurse to deal with
4997  * Vars referencing subqueries.
4998  *
4999  * We already filled in all the fields of *vardata except for the stats tuple.
5000  */
5001 static void
5003  VariableStatData *vardata)
5004 {
5005  RangeTblEntry *rte = root->simple_rte_array[var->varno];
5006 
5007  Assert(IsA(rte, RangeTblEntry));
5008 
5010  (*get_relation_stats_hook) (root, rte, var->varattno, vardata))
5011  {
5012  /*
5013  * The hook took control of acquiring a stats tuple. If it did supply
5014  * a tuple, it'd better have supplied a freefunc.
5015  */
5016  if (HeapTupleIsValid(vardata->statsTuple) &&
5017  !vardata->freefunc)
5018  elog(ERROR, "no function provided to release variable stats with");
5019  }
5020  else if (rte->rtekind == RTE_RELATION)
5021  {
5022  /*
5023  * Plain table or parent of an inheritance appendrel, so look up the
5024  * column in pg_statistic
5025  */
5027  ObjectIdGetDatum(rte->relid),
5028  Int16GetDatum(var->varattno),
5029  BoolGetDatum(rte->inh));
5030  vardata->freefunc = ReleaseSysCache;
5031 
5032  if (HeapTupleIsValid(vardata->statsTuple))
5033  {
5034  Oid userid;
5035 
5036  /*
5037  * Check if user has permission to read this column. We require
5038  * all rows to be accessible, so there must be no securityQuals
5039  * from security barrier views or RLS policies. Use checkAsUser
5040  * if it's set, in case we're accessing the table via a view.
5041  */
5042  userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
5043 
5044  vardata->acl_ok =
5045  rte->securityQuals == NIL &&
5046  ((pg_class_aclcheck(rte->relid, userid,
5047  ACL_SELECT) == ACLCHECK_OK) ||
5048  (pg_attribute_aclcheck(rte->relid, var->varattno, userid,
5049  ACL_SELECT) == ACLCHECK_OK));
5050 
5051  /*
5052  * If the user doesn't have permissions to access an inheritance
5053  * child relation or specifically this attribute, check the
5054  * permissions of the table/column actually mentioned in the
5055  * query, since most likely the user does have that permission
5056  * (else the query will fail at runtime), and if the user can read
5057  * the column there then he can get the values of the child table
5058  * too. To do that, we must find out which of the root parent's
5059  * attributes the child relation's attribute corresponds to.
5060  */
5061  if (!vardata->acl_ok && var->varattno > 0 &&
5062  root->append_rel_array != NULL)
5063  {
5064  AppendRelInfo *appinfo;
5065  Index varno = var->varno;
5066  int varattno = var->varattno;
5067  bool found = false;
5068 
5069  appinfo = root->append_rel_array[varno];
5070 
5071  /*
5072  * Partitions are mapped to their immediate parent, not the
5073  * root parent, so must be ready to walk up multiple
5074  * AppendRelInfos. But stop if we hit a parent that is not
5075  * RTE_RELATION --- that's a flattened UNION ALL subquery, not
5076  * an inheritance parent.
5077  */
5078  while (appinfo &&
5079  planner_rt_fetch(appinfo->parent_relid,
5080  root)->rtekind == RTE_RELATION)
5081  {
5082  int parent_varattno;
5083 
5084  found = false;
5085  if (varattno <= 0 || varattno > appinfo->num_child_cols)
5086  break; /* safety check */
5087  parent_varattno = appinfo->parent_colnos[varattno - 1];
5088  if (parent_varattno == 0)
5089  break; /* Var is local to child */
5090 
5091  varno = appinfo->parent_relid;
5092  varattno = parent_varattno;
5093  found = true;
5094 
5095  /* If the parent is itself a child, continue up. */
5096  appinfo = root->append_rel_array[varno];
5097  }
5098 
5099  /*
5100  * In rare cases, the Var may be local to the child table, in
5101  * which case, we've got to live with having no access to this
5102  * column's stats.
5103  */
5104  if (!found)
5105  return;
5106 
5107  /* Repeat the access check on this parent rel & column */
5108  rte = planner_rt_fetch(varno, root);
5109  Assert(rte->rtekind == RTE_RELATION);
5110 
5111  userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
5112 
5113  vardata->acl_ok =
5114  rte->securityQuals == NIL &&
5115  ((pg_class_aclcheck(rte->relid, userid,
5116  ACL_SELECT) == ACLCHECK_OK) ||
5117  (pg_attribute_aclcheck(rte->relid, varattno, userid,
5118  ACL_SELECT) == ACLCHECK_OK));
5119  }
5120  }
5121  else
5122  {
5123  /* suppress any possible leakproofness checks later */
5124  vardata->acl_ok = true;
5125  }
5126  }
5127  else if (rte->rtekind == RTE_SUBQUERY && !rte->inh)
5128  {
5129  /*
5130  * Plain subquery (not one that was converted to an appendrel).
5131  */
5132  Query *subquery = rte->subquery;
5133  RelOptInfo *rel;
5134  TargetEntry *ste;
5135 
5136  /*
5137  * Punt if it's a whole-row var rather than a plain column reference.
5138  */
5139  if (var->varattno == InvalidAttrNumber)
5140  return;
5141 
5142  /*
5143  * Punt if subquery uses set operations or GROUP BY, as these will
5144  * mash underlying columns' stats beyond recognition. (Set ops are
5145  * particularly nasty; if we forged ahead, we would return stats
5146  * relevant to only the leftmost subselect...) DISTINCT is also
5147  * problematic, but we check that later because there is a possibility
5148  * of learning something even with it.
5149  */
5150  if (subquery->setOperations ||
5151  subquery->groupClause)
5152  return;
5153 
5154  /*
5155  * OK, fetch RelOptInfo for subquery. Note that we don't change the
5156  * rel returned in vardata, since caller expects it to be a rel of the
5157  * caller's query level. Because we might already be recursing, we
5158  * can't use that rel pointer either, but have to look up the Var's
5159  * rel afresh.
5160  */
5161  rel = find_base_rel(root, var->varno);
5162 
5163  /* If the subquery hasn't been planned yet, we have to punt */
5164  if (rel->subroot == NULL)
5165  return;
5166  Assert(IsA(rel->subroot, PlannerInfo));
5167 
5168  /*
5169  * Switch our attention to the subquery as mangled by the planner. It
5170  * was okay to look at the pre-planning version for the tests above,
5171  * but now we need a Var that will refer to the subroot's live
5172  * RelOptInfos. For instance, if any subquery pullup happened during
5173  * planning, Vars in the targetlist might have gotten replaced, and we
5174  * need to see the replacement expressions.
5175  */
5176  subquery = rel->subroot->parse;
5177  Assert(IsA(subquery, Query));
5178 
5179  /* Get the subquery output expression referenced by the upper Var */
5180  ste = get_tle_by_resno(subquery->targetList, var->varattno);
5181  if (ste == NULL || ste->resjunk)
5182  elog(ERROR, "subquery %s does not have attribute %d",
5183  rte->eref->aliasname, var->varattno);
5184  var = (Var *) ste->expr;
5185 
5186  /*
5187  * If subquery uses DISTINCT, we can't make use of any stats for the
5188  * variable ... but, if it's the only DISTINCT column, we are entitled
5189  * to consider it unique. We do the test this way so that it works
5190  * for cases involving DISTINCT ON.
5191  */
5192  if (subquery->distinctClause)
5193  {
5194  if (list_length(subquery->distinctClause) == 1 &&
5195  targetIsInSortList(ste, InvalidOid, subquery->distinctClause))
5196  vardata->isunique = true;
5197  /* cannot go further */
5198  return;
5199  }
5200 
5201  /*
5202  * If the sub-query originated from a view with the security_barrier
5203  * attribute, we must not look at the variable's statistics, though it
5204  * seems all right to notice the existence of a DISTINCT clause. So
5205  * stop here.
5206  *
5207  * This is probably a harsher restriction than necessary; it's
5208  * certainly OK for the selectivity estimator (which is a C function,
5209  * and therefore omnipotent anyway) to look at the statistics. But
5210  * many selectivity estimators will happily *invoke the operator
5211  * function* to try to work out a good estimate - and that's not OK.
5212  * So for now, don't dig down for stats.
5213  */
5214  if (rte->security_barrier)
5215  return;
5216 
5217  /* Can only handle a simple Var of subquery's query level */
5218  if (var && IsA(var, Var) &&
5219  var->varlevelsup == 0)
5220  {
5221  /*
5222  * OK, recurse into the subquery. Note that the original setting
5223  * of vardata->isunique (which will surely be false) is left
5224  * unchanged in this situation. That's what we want, since even
5225  * if the underlying column is unique, the subquery may have
5226  * joined to other tables in a way that creates duplicates.
5227  */
5228  examine_simple_variable(rel->subroot, var, vardata);
5229  }
5230  }
5231  else
5232  {
5233  /*
5234  * Otherwise, the Var comes from a FUNCTION, VALUES, or CTE RTE. (We
5235  * won't see RTE_JOIN here because join alias Vars have already been
5236  * flattened.) There's not much we can do with function outputs, but
5237  * maybe someday try to be smarter about VALUES and/or CTEs.
5238  */
5239  }
5240 }
5241 
5242 /*
5243  * Check whether it is permitted to call func_oid passing some of the
5244  * pg_statistic data in vardata. We allow this either if the user has SELECT
5245  * privileges on the table or column underlying the pg_statistic data or if
5246  * the function is marked leak-proof.
5247  */
5248 bool
5250 {
5251  if (vardata->acl_ok)
5252  return true;
5253 
5254  if (!OidIsValid(func_oid))
5255  return false;
5256 
5257  if (get_func_leakproof(func_oid))
5258  return true;
5259 
5260  ereport(DEBUG2,
5261  (errmsg_internal("not using statistics because function \"%s\" is not leak-proof",
5262  get_func_name(func_oid))));
5263  return false;
5264 }
5265 
5266 /*
5267  * get_variable_numdistinct
5268  * Estimate the number of distinct values of a variable.
5269  *
5270  * vardata: results of examine_variable
5271  * *isdefault: set to true if the result is a default rather than based on
5272  * anything meaningful.
5273  *
5274  * NB: be careful to produce a positive integral result, since callers may
5275  * compare the result to exact integer counts, or might divide by it.
5276  */
5277 double
5278 get_variable_numdistinct(VariableStatData *vardata, bool *isdefault)
5279 {
5280  double stadistinct;
5281  double stanullfrac = 0.0;
5282  double ntuples;
5283 
5284  *isdefault = false;
5285 
5286  /*
5287  * Determine the stadistinct value to use. There are cases where we can
5288  * get an estimate even without a pg_statistic entry, or can get a better
5289  * value than is in pg_statistic. Grab stanullfrac too if we can find it
5290  * (otherwise, assume no nulls, for lack of any better idea).
5291  */
5292  if (HeapTupleIsValid(vardata->statsTuple))
5293  {
5294  /* Use the pg_statistic entry */
5295  Form_pg_statistic stats;
5296 
5297  stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
5298  stadistinct = stats->stadistinct;
5299  stanullfrac = stats->stanullfrac;
5300  }
5301  else if (vardata->vartype == BOOLOID)
5302  {
5303  /*
5304  * Special-case boolean columns: presumably, two distinct values.
5305  *
5306  * Are there any other datatypes we should wire in special estimates
5307  * for?
5308  */
5309  stadistinct = 2.0;
5310  }
5311  else if (vardata->rel && vardata->rel->rtekind == RTE_VALUES)
5312  {
5313  /*
5314  * If the Var represents a column of a VALUES RTE, assume it's unique.
5315  * This could of course be very wrong, but it should tend to be true
5316  * in well-written queries. We could consider examining the VALUES'
5317  * contents to get some real statistics; but that only works if the
5318  * entries are all constants, and it would be pretty expensive anyway.
5319  */
5320  stadistinct = -1.0; /* unique (and all non null) */
5321  }
5322  else
5323  {
5324  /*
5325  * We don't keep statistics for system columns, but in some cases we
5326  * can infer distinctness anyway.
5327  */
5328  if (vardata->var && IsA(vardata->var, Var))
5329  {
5330  switch (((Var *) vardata->var)->varattno)
5331  {
5333  stadistinct = -1.0; /* unique (and all non null) */
5334  break;
5336  stadistinct = 1.0; /* only 1 value */
5337  break;
5338  default:
5339  stadistinct = 0.0; /* means "unknown" */
5340  break;
5341  }
5342  }
5343  else
5344  stadistinct = 0.0; /* means "unknown" */
5345 
5346  /*
5347  * XXX consider using estimate_num_groups on expressions?
5348  */
5349  }
5350 
5351  /*
5352  * If there is a unique index or DISTINCT clause for the variable, assume
5353  * it is unique no matter what pg_statistic says; the statistics could be
5354  * out of date, or we might have found a partial unique index that proves
5355  * the var is unique for this query. However, we'd better still believe
5356  * the null-fraction statistic.
5357  */
5358  if (vardata->isunique)
5359  stadistinct = -1.0 * (1.0 - stanullfrac);
5360 
5361  /*
5362  * If we had an absolute estimate, use that.
5363  */
5364  if (stadistinct > 0.0)
5365  return clamp_row_est(stadistinct);
5366 
5367  /*
5368  * Otherwise we need to get the relation size; punt if not available.
5369  */
5370  if (vardata->rel == NULL)
5371  {
5372  *isdefault = true;
5373  return DEFAULT_NUM_DISTINCT;
5374  }
5375  ntuples = vardata->rel->tuples;
5376  if (ntuples <= 0.0)
5377  {
5378  *isdefault = true;
5379  return DEFAULT_NUM_DISTINCT;
5380  }
5381 
5382  /*
5383  * If we had a relative estimate, use that.
5384  */
5385  if (stadistinct < 0.0)
5386  return clamp_row_est(-stadistinct * ntuples);
5387 
5388  /*
5389  * With no data, estimate ndistinct = ntuples if the table is small, else
5390  * use default. We use DEFAULT_NUM_DISTINCT as the cutoff for "small" so
5391  * that the behavior isn't discontinuous.
5392  */
5393  if (ntuples < DEFAULT_NUM_DISTINCT)
5394  return clamp_row_est(ntuples);
5395 
5396  *isdefault = true;
5397  return DEFAULT_NUM_DISTINCT;
5398 }
5399 
5400 /*
5401  * get_variable_range
5402  * Estimate the minimum and maximum value of the specified variable.
5403  * If successful, store values in *min and *max, and return true.
5404  * If no data available, return false.
5405  *
5406  * sortop is the "<" comparison operator to use. This should generally
5407  * be "<" not ">", as only the former is likely to be found in pg_statistic.
5408  * The collation must be specified too.
5409  */
5410 static bool
5412  Oid sortop, Oid collation,
5413  Datum *min, Datum *max)
5414 {
5415  Datum tmin = 0;
5416  Datum tmax = 0;
5417  bool have_data = false;
5418  int16 typLen;
5419  bool typByVal;
5420  Oid opfuncoid;
5421  FmgrInfo opproc;
5422  AttStatsSlot sslot;
5423 
5424  /*
5425  * XXX It's very tempting to try to use the actual column min and max, if
5426  * we can get them relatively-cheaply with an index probe. However, since
5427  * this function is called many times during join planning, that could
5428  * have unpleasant effects on planning speed. Need more investigation
5429  * before enabling this.
5430  */
5431 #ifdef NOT_USED
5432  if (get_actual_variable_range(root, vardata, sortop, collation, min, max))
5433  return true;
5434 #endif
5435 
5436  if (!HeapTupleIsValid(vardata->statsTuple))
5437  {
5438  /* no stats available, so default result */
5439  return false;
5440  }
5441 
5442  /*
5443  * If we can't apply the sortop to the stats data, just fail. In
5444  * principle, if there's a histogram and no MCVs, we could return the
5445  * histogram endpoints without ever applying the sortop ... but it's
5446  * probably not worth trying, because whatever the caller wants to do with
5447  * the endpoints would likely fail the security check too.
5448  */
5449  if (!statistic_proc_security_check(vardata,
5450  (opfuncoid = get_opcode(sortop))))
5451  return false;
5452 
5453  opproc.fn_oid = InvalidOid; /* mark this as not looked up yet */
5454 
5455  get_typlenbyval(vardata->atttype, &typLen, &typByVal);
5456 
5457  /*
5458  * If there is a histogram with the ordering we want, grab the first and
5459  * last values.
5460  */
5461  if (get_attstatsslot(&sslot, vardata->statsTuple,
5462  STATISTIC_KIND_HISTOGRAM, sortop,
5464  {
5465  if (sslot.stacoll == collation && sslot.nvalues > 0)
5466  {
5467  tmin = datumCopy(sslot.values[0], typByVal, typLen);
5468  tmax = datumCopy(sslot.values[sslot.nvalues - 1], typByVal, typLen);
5469  have_data = true;
5470  }
5471  free_attstatsslot(&sslot);
5472  }
5473 
5474  /*
5475  * Otherwise, if there is a histogram with some other ordering, scan it
5476  * and get the min and max values according to the ordering we want. This
5477  * of course may not find values that are really extremal according to our
5478  * ordering, but it beats ignoring available data.
5479  */
5480  if (!have_data &&
5481  get_attstatsslot(&sslot, vardata->statsTuple,
5482  STATISTIC_KIND_HISTOGRAM, InvalidOid,
5484  {
5485  get_stats_slot_range(&sslot, opfuncoid, &opproc,
5486  collation, typLen, typByVal,
5487  &tmin, &tmax, &have_data);
5488  free_attstatsslot(&sslot);
5489  }
5490 
5491  /*
5492  * If we have most-common-values info, look for extreme MCVs. This is
5493  * needed even if we also have a histogram, since the histogram excludes
5494  * the MCVs.
5495  */
5496  if (get_attstatsslot(&sslot, vardata->statsTuple,
5497  STATISTIC_KIND_MCV, InvalidOid,
5499  {
5500  get_stats_slot_range(&sslot, opfuncoid, &opproc,
5501  collation, typLen, typByVal,
5502  &tmin, &tmax, &have_data);
5503  free_attstatsslot(&sslot);
5504  }
5505 
5506  *min = tmin;
5507  *max = tmax;
5508  return have_data;
5509 }
5510 
5511 /*
5512  * get_stats_slot_range: scan sslot for min/max values
5513  *
5514  * Subroutine for get_variable_range: update min/max/have_data according
5515  * to what we find in the statistics array.
5516  */
5517 static void
5518 get_stats_slot_range(AttStatsSlot *sslot, Oid opfuncoid, FmgrInfo *opproc,
5519  Oid collation, int16 typLen, bool typByVal,
5520  Datum *min, Datum *max, bool *p_have_data)
5521 {
5522  Datum tmin = *min;
5523  Datum tmax = *max;
5524  bool have_data = *p_have_data;
5525  bool found_tmin = false;
5526  bool found_tmax = false;
5527 
5528  /* Look up the comparison function, if we didn't already do so */
5529  if (opproc->fn_oid != opfuncoid)
5530  fmgr_info(opfuncoid, opproc);
5531 
5532  /* Scan all the slot's values */
5533  for (int i = 0; i < sslot->nvalues; i++)
5534  {
5535  if (!have_data)
5536  {
5537  tmin = tmax = sslot->values[i];
5538  found_tmin = found_tmax = true;
5539  *p_have_data = have_data = true;
5540  continue;
5541  }
5542  if (DatumGetBool(FunctionCall2Coll(opproc,
5543  collation,
5544  sslot->values[i], tmin)))
5545  {
5546  tmin = sslot->values[i];
5547  found_tmin = true;
5548  }
5549  if (DatumGetBool(FunctionCall2Coll(opproc,
5550  collation,
5551  tmax, sslot->values[i])))
5552  {
5553  tmax = sslot->values[i];
5554  found_tmax = true;
5555  }
5556  }
5557 
5558  /*
5559  * Copy the slot's values, if we found new extreme values.
5560  */
5561  if (found_tmin)
5562  *min = datumCopy(tmin, typByVal, typLen);
5563  if (found_tmax)
5564  *max = datumCopy(tmax, typByVal, typLen);
5565 }
5566 
5567 
5568 /*
5569  * get_actual_variable_range
5570  * Attempt to identify the current *actual* minimum and/or maximum
5571  * of the specified variable, by looking for a suitable btree index
5572  * and fetching its low and/or high values.
5573  * If successful, store values in *min and *max, and return true.
5574  * (Either pointer can be NULL if that endpoint isn't needed.)
5575  * If no data available, return false.
5576  *
5577  * sortop is the "<" comparison operator to use.
5578  * collation is the required collation.
5579  */
5580 static bool
5582  Oid sortop, Oid collation,
5583  Datum *min, Datum *max)
5584 {
5585  bool have_data = false;
5586  RelOptInfo *rel = vardata->rel;
5587  RangeTblEntry *rte;
5588  ListCell *lc;
5589 
5590  /* No hope if no relation or it doesn't have indexes */
5591  if (rel == NULL || rel->indexlist == NIL)
5592  return false;
5593  /* If it has indexes it must be a plain relation */
5594  rte = root->simple_rte_array[rel->relid];
5595  Assert(rte->rtekind == RTE_RELATION);
5596 
5597  /* Search through the indexes to see if any match our problem */
5598  foreach(lc, rel->indexlist)
5599  {
5601  ScanDirection indexscandir;
5602 
5603  /* Ignore non-btree indexes */
5604  if (index->relam != BTREE_AM_OID)
5605  continue;
5606 
5607  /*
5608  * Ignore partial indexes --- we only want stats that cover the entire
5609  * relation.
5610  */
5611  if (index->indpred != NIL)
5612  continue;
5613 
5614  /*
5615  * The index list might include hypothetical indexes inserted by a
5616  * get_relation_info hook --- don't try to access them.
5617  */
5618  if (index->hypothetical)
5619  continue;
5620 
5621  /*
5622  * The first index column must match the desired variable, sortop, and
5623  * collation --- but we can use a descending-order index.
5624  */
5625  if (collation != index->indexcollations[0])
5626  continue; /* test first 'cause it's cheapest */
5627  if (!match_index_to_operand(vardata->var, 0, index))
5628  continue;
5629  switch (get_op_opfamily_strategy(sortop, index->sortopfamily[0]))
5630  {
5631  case BTLessStrategyNumber:
5632  if (index->reverse_sort[0])
5633  indexscandir = BackwardScanDirection;
5634  else
5635  indexscandir = ForwardScanDirection;
5636  break;
5638  if (index->reverse_sort[0])
5639  indexscandir = ForwardScanDirection;
5640  else
5641  indexscandir = BackwardScanDirection;
5642  break;
5643  default:
5644  /* index doesn't match the sortop */
5645  continue;
5646  }
5647 
5648  /*
5649  * Found a suitable index to extract data from. Set up some data that
5650  * can be used by both invocations of get_actual_variable_endpoint.
5651  */
5652  {
5653  MemoryContext tmpcontext;
5654  MemoryContext oldcontext;
5655  Relation heapRel;
5656  Relation indexRel;
5657  TupleTableSlot *slot;
5658  int16 typLen;
5659  bool typByVal;
5660  ScanKeyData scankeys[1];
5661 
5662  /* Make sure any cruft gets recycled when we're done */
5664  "get_actual_variable_range workspace",
5666  oldcontext = MemoryContextSwitchTo(tmpcontext);
5667 
5668  /*
5669  * Open the table and index so we can read from them. We should
5670  * already have some type of lock on each.
5671  */
5672  heapRel = table_open(rte->relid, NoLock);
5673  indexRel = index_open(index->indexoid, NoLock);
5674 
5675  /* build some stuff needed for indexscan execution */
5676  slot = table_slot_create(heapRel, NULL);
5677  get_typlenbyval(vardata->atttype, &typLen, &typByVal);
5678 
5679  /* set up an IS NOT NULL scan key so that we ignore nulls */
5680  ScanKeyEntryInitialize(&scankeys[0],
5682  1, /* index col to scan */
5683  InvalidStrategy, /* no strategy */
5684  InvalidOid, /* no strategy subtype */
5685  InvalidOid, /* no collation */
5686  InvalidOid, /* no reg proc for this */
5687  (Datum) 0); /* constant */
5688 
5689  /* If min is requested ... */
5690  if (min)
5691  {
5692  have_data = get_actual_variable_endpoint(heapRel,
5693  indexRel,
5694  indexscandir,
5695  scankeys,
5696  typLen,
5697  typByVal,
5698  slot,
5699  oldcontext,
5700  min);
5701  }
5702  else
5703  {
5704  /* If min not requested, assume index is nonempty */
5705  have_data = true;
5706  }
5707 
5708  /* If max is requested, and we didn't find the index is empty */
5709  if (max && have_data)
5710  {
5711  /* scan in the opposite direction; all else is the same */
5712  have_data = get_actual_variable_endpoint(heapRel,
5713  indexRel,
5714  -indexscandir,
5715  scankeys,
5716  typLen,
5717  typByVal,
5718  slot,
5719  oldcontext,
5720  max);
5721  }
5722 
5723  /* Clean everything up */
5725 
5726  index_close(indexRel, NoLock);
5727  table_close(heapRel, NoLock);
5728 
5729  MemoryContextSwitchTo(oldcontext);
5730  MemoryContextDelete(tmpcontext);
5731 
5732  /* And we're done */
5733  break;
5734  }
5735  }
5736 
5737  return have_data;
5738 }
5739 
5740 /*
5741  * Get one endpoint datum (min or max depending on indexscandir) from the
5742  * specified index. Return true if successful, false if index is empty.
5743  * On success, endpoint value is stored to *endpointDatum (and copied into
5744  * outercontext).
5745  *
5746  * scankeys is a 1-element scankey array set up to reject nulls.
5747  * typLen/typByVal describe the datatype of the index's first column.
5748  * tableslot is a slot suitable to hold table tuples, in case we need
5749  * to probe the heap.
5750  * (We could compute these values locally, but that would mean computing them
5751  * twice when get_actual_variable_range needs both the min and the max.)
5752  */
5753 static bool
5755  Relation indexRel,
5756  ScanDirection indexscandir,
5757  ScanKey scankeys,
5758  int16 typLen,
5759  bool typByVal,
5760  TupleTableSlot *tableslot,
5761  MemoryContext outercontext,
5762  Datum *endpointDatum)
5763 {
5764  bool have_data = false;
5765  SnapshotData SnapshotNonVacuumable;
5766  IndexScanDesc index_scan;
5767  Buffer vmbuffer = InvalidBuffer;
5768  ItemPointer tid;
5770  bool isnull[INDEX_MAX_KEYS];
5771  MemoryContext oldcontext;
5772 
5773  /*
5774  * We use the index-only-scan machinery for this. With mostly-static
5775  * tables that's a win because it avoids a heap visit. It's also a win
5776  * for dynamic data, but the reason is less obvious; read on for details.
5777  *
5778  * In principle, we should scan the index with our current active
5779  * snapshot, which is the best approximation we've got to what the query
5780  * will see when executed. But that won't be exact if a new snap is taken
5781  * before running the query, and it can be very expensive if a lot of
5782  * recently-dead or uncommitted rows exist at the beginning or end of the
5783  * index (because we'll laboriously fetch each one and reject it).
5784  * Instead, we use SnapshotNonVacuumable. That will accept recently-dead
5785  * and uncommitted rows as well as normal visible rows. On the other
5786  * hand, it will reject known-dead rows, and thus not give a bogus answer
5787  * when the extreme value has been deleted (unless the deletion was quite
5788  * recent); that case motivates not using SnapshotAny here.
5789  *
5790  * A crucial point here is that SnapshotNonVacuumable, with
5791  * GlobalVisTestFor(heapRel) as horizon, yields the inverse of the
5792  * condition that the indexscan will use to decide that index entries are
5793  * killable (see heap_hot_search_buffer()). Therefore, if the snapshot
5794  * rejects a tuple (or more precisely, all tuples of a HOT chain) and we
5795  * have to continue scanning past it, we know that the indexscan will mark
5796  * that index entry killed. That means that the next
5797  * get_actual_variable_endpoint() call will not have to re-consider that
5798  * index entry. In this way we avoid repetitive work when this function
5799  * is used a lot during planning.
5800  *
5801  * But using SnapshotNonVacuumable creates a hazard of its own. In a
5802  * recently-created index, some index entries may point at "broken" HOT
5803  * chains in which not all the tuple versions contain data matching the
5804  * index entry. The live tuple version(s) certainly do match the index,
5805  * but SnapshotNonVacuumable can accept recently-dead tuple versions that
5806  * don't match. Hence, if we took data from the selected heap tuple, we
5807  * might get a bogus answer that's not close to the index extremal value,
5808  * or could even be NULL. We avoid this hazard because we take the data
5809  * from the index entry not the heap.
5810  */
5811  InitNonVacuumableSnapshot(SnapshotNonVacuumable,
5812  GlobalVisTestFor(heapRel));
5813 
5814  index_scan = index_beginscan(heapRel, indexRel,
5815  &SnapshotNonVacuumable,
5816  1, 0);
5817  /* Set it up for index-only scan */
5818  index_scan->xs_want_itup = true;
5819  index_rescan(index_scan, scankeys, 1, NULL, 0);
5820 
5821  /* Fetch first/next tuple in specified direction */
5822  while ((tid = index_getnext_tid(index_scan, indexscandir)) != NULL)
5823  {
5824  if (!VM_ALL_VISIBLE(heapRel,
5826  &vmbuffer))
5827  {
5828  /* Rats, we have to visit the heap to check visibility */
5829  if (!index_fetch_heap(index_scan, tableslot))
5830  continue; /* no visible tuple, try next index entry */
5831 
5832  /* We don't actually need the heap tuple for anything */
5833  ExecClearTuple(tableslot);
5834 
5835  /*
5836  * We don't care whether there's more than one visible tuple in
5837  * the HOT chain; if any are visible, that's good enough.
5838  */
5839  }
5840 
5841  /*
5842  * We expect that btree will return data in IndexTuple not HeapTuple
5843  * format. It's not lossy either.
5844  */
5845  if (!index_scan->xs_itup)
5846  elog(ERROR, "no data returned for index-only scan");
5847  if (index_scan->xs_recheck)
5848  elog(ERROR, "unexpected recheck indication from btree");
5849 
5850  /* OK to deconstruct the index tuple */
5851  index_deform_tuple(index_scan->xs_itup,
5852  index_scan->xs_itupdesc,
5853  values, isnull);
5854 
5855  /* Shouldn't have got a null, but be careful */
5856  if (isnull[0])
5857  elog(ERROR, "found unexpected null value in index \"%s\"",
5858  RelationGetRelationName(indexRel));
5859 
5860  /* Copy the index column value out to caller's context */
5861  oldcontext = MemoryContextSwitchTo(outercontext);
5862  *endpointDatum = datumCopy(values[0], typByVal, typLen);
5863  MemoryContextSwitchTo(oldcontext);
5864  have_data = true;
5865  break;
5866  }
5867 
5868  if (vmbuffer != InvalidBuffer)
5869  ReleaseBuffer(vmbuffer);
5870  index_endscan(index_scan);
5871 
5872  return have_data;
5873 }
5874 
5875 /*
5876  * find_join_input_rel
5877  * Look up the input relation for a join.
5878  *
5879  * We assume that the input relation's RelOptInfo must have been constructed
5880  * already.
5881  */
5882 static RelOptInfo *
5884 {
5885  RelOptInfo *rel = NULL;
5886 
5887  switch (bms_membership(relids))
5888  {
5889  case BMS_EMPTY_SET:
5890  /* should not happen */
5891  break;
5892  case BMS_SINGLETON:
5893  rel = find_base_rel(root, bms_singleton_member(relids));
5894  break;
5895  case BMS_MULTIPLE:
5896  rel = find_join_rel(root, relids);
5897  break;
5898  }
5899 
5900  if (rel == NULL)
5901  elog(ERROR, "could not find RelOptInfo for given relids");
5902 
5903  return rel;
5904 }
5905 
5906 
5907 /*-------------------------------------------------------------------------
5908  *
5909  * Index cost estimation functions
5910  *
5911  *-------------------------------------------------------------------------
5912  */
5913 
5914 /*
5915  * Extract the actual indexquals (as RestrictInfos) from an IndexClause list
5916  */
5917 List *
5919 {
5920  List *result = NIL;
5921  ListCell *lc;
5922 
5923  foreach(lc, indexclauses)
5924  {
5925  IndexClause *iclause = lfirst_node(IndexClause, lc);
5926  ListCell *lc2;
5927 
5928  foreach(lc2, iclause->indexquals)
5929  {
5930  RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc2);
5931 
5932  result = lappend(result, rinfo);
5933  }
5934  }
5935  return result;
5936 }
5937 
5938 /*
5939  * Compute the total evaluation cost of the comparison operands in a list
5940  * of index qual expressions. Since we know these will be evaluated just
5941  * once per scan, there's no need to distinguish startup from per-row cost.
5942  *
5943  * This can be used either on the result of get_quals_from_indexclauses(),
5944  * or directly on an indexorderbys list. In both cases, we expect that the
5945  * index key expression is on the left side of binary clauses.
5946  */
5947 Cost
5949 {
5950  Cost qual_arg_cost = 0;
5951  ListCell *lc;
5952 
5953  foreach(lc, indexquals)
5954  {
5955  Expr *clause = (Expr *) lfirst(lc);
5956  Node *other_operand;
5957  QualCost index_qual_cost;
5958 
5959  /*
5960  * Index quals will have RestrictInfos, indexorderbys won't. Look
5961  * through RestrictInfo if present.
5962  */
5963  if (IsA(clause, RestrictInfo))
5964  clause = ((RestrictInfo *) clause)->clause;
5965 
5966  if (IsA(clause, OpExpr))
5967  {
5968  OpExpr *op = (OpExpr *) clause;
5969 
5970  other_operand = (Node *) lsecond(op->args);
5971  }
5972  else if (IsA(clause, RowCompareExpr))
5973  {
5974  RowCompareExpr *rc = (RowCompareExpr *) clause;
5975 
5976  other_operand = (Node *) rc->rargs;
5977  }
5978  else if (IsA(clause, ScalarArrayOpExpr))
5979  {
5980  ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
5981 
5982  other_operand = (Node *) lsecond(saop->args);
5983  }
5984  else if (IsA(clause, NullTest))
5985  {
5986  other_operand = NULL;
5987  }
5988  else
5989  {
5990  elog(ERROR, "unsupported indexqual type: %d",
5991  (int) nodeTag(clause));
5992  other_operand = NULL; /* keep compiler quiet */
5993  }
5994 
5995  cost_qual_eval_node(&index_qual_cost, other_operand, root);
5996  qual_arg_cost += index_qual_cost.startup + index_qual_cost.per_tuple;
5997  }
5998  return qual_arg_cost;
5999 }
6000 
6001 void
6003  IndexPath *path,
6004  double loop_count,
6005  GenericCosts *costs)
6006 {
6007  IndexOptInfo *index = path->indexinfo;
6008  List *indexQuals = get_quals_from_indexclauses(path->indexclauses);
6009  List *indexOrderBys = path->indexorderbys;
6010  Cost indexStartupCost;
6011  Cost indexTotalCost;
6012  Selectivity indexSelectivity;
6013  double indexCorrelation;
6014  double numIndexPages;
6015  double numIndexTuples;
6016  double spc_random_page_cost;
6017  double num_sa_scans;
6018  double num_outer_scans;
6019  double num_scans;
6020  double qual_op_cost;
6021  double qual_arg_cost;
6022  List *selectivityQuals;
6023  ListCell *l;
6024 
6025  /*
6026  * If the index is partial, AND the index predicate with the explicitly
6027  * given indexquals to produce a more accurate idea of the index
6028  * selectivity.
6029  */
6030  selectivityQuals = add_predicate_to_index_quals(index, indexQuals);
6031 
6032  /*
6033  * Check for ScalarArrayOpExpr index quals, and estimate the number of
6034  * index scans that will be performed.
6035  */
6036  num_sa_scans = 1;
6037  foreach(l, indexQuals)
6038  {
6039  RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
6040 
6041  if (IsA(rinfo->clause, ScalarArrayOpExpr))
6042  {
6043  ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) rinfo->clause;
6044  int alength = estimate_array_length(lsecond(saop->args));
6045 
6046  if (alength > 1)
6047  num_sa_scans *= alength;
6048  }
6049  }
6050 
6051  /* Estimate the fraction of main-table tuples that will be visited */
6052  indexSelectivity = clauselist_selectivity(root, selectivityQuals,
6053  index->rel->relid,
6054  JOIN_INNER,
6055  NULL);
6056 
6057  /*
6058  * If caller didn't give us an estimate, estimate the number of index
6059  * tuples that will be visited. We do it in this rather peculiar-looking
6060  * way in order to get the right answer for partial indexes.
6061  */
6062  numIndexTuples = costs->numIndexTuples;
6063  if (numIndexTuples <= 0.0)
6064  {
6065  numIndexTuples = indexSelectivity * index->rel->tuples;
6066 
6067  /*
6068  * The above calculation counts all the tuples visited across all
6069  * scans induced by ScalarArrayOpExpr nodes. We want to consider the
6070  * average per-indexscan number, so adjust. This is a handy place to
6071  * round to integer, too. (If caller supplied tuple estimate, it's
6072  * responsible for handling these considerations.)
6073  */
6074  numIndexTuples = rint(numIndexTuples / num_sa_scans);
6075  }
6076 
6077  /*
6078  * We can bound the number of tuples by the index size in any case. Also,
6079  * always estimate at least one tuple is touched, even when
6080  * indexSelectivity estimate is tiny.
6081  */
6082  if (numIndexTuples > index->tuples)
6083  numIndexTuples = index->tuples;
6084  if (numIndexTuples < 1.0)
6085  numIndexTuples = 1.0;
6086 
6087  /*
6088  * Estimate the number of index pages that will be retrieved.
6089  *
6090  * We use the simplistic method of taking a pro-rata fraction of the total
6091  * number of index pages. In effect, this counts only leaf pages and not
6092  * any overhead such as index metapage or upper tree levels.
6093  *
6094  * In practice access to upper index levels is often nearly free because
6095  * those tend to stay in cache under load; moreover, the cost involved is
6096  * highly dependent on index type. We therefore ignore such costs here
6097  * and leave it to the caller to add a suitable charge if needed.
6098  */
6099  if (index->pages > 1 && index->tuples > 1)
6100  numIndexPages = ceil(numIndexTuples * index->pages / index->tuples);
6101  else
6102  numIndexPages = 1.0;
6103 
6104  /* fetch estimated page cost for tablespace containing index */
6106  &spc_random_page_cost,
6107  NULL);
6108 
6109  /*
6110  * Now compute the disk access costs.
6111  *
6112  * The above calculations are all per-index-scan. However, if we are in a
6113  * nestloop inner scan, we can expect the scan to be repeated (with
6114  * different search keys) for each row of the outer relation. Likewise,
6115  * ScalarArrayOpExpr quals result in multiple index scans. This creates
6116  * the potential for cache effects to reduce the number of disk page
6117  * fetches needed. We want to estimate the average per-scan I/O cost in
6118  * the presence of caching.
6119  *
6120  * We use the Mackert-Lohman formula (see costsize.c for details) to
6121  * estimate the total number of page fetches that occur. While this
6122  * wasn't what it was designed for, it seems a reasonable model anyway.
6123  * Note that we are counting pages not tuples anymore, so we take N = T =
6124  * index size, as if there were one "tuple" per page.
6125  */
6126  num_outer_scans = loop_count;
6127  num_scans = num_sa_scans * num_outer_scans;
6128 
6129  if (num_scans > 1)
6130  {
6131  double pages_fetched;
6132 
6133  /* total page fetches ignoring cache effects */
6134  pages_fetched = numIndexPages * num_scans;
6135 
6136  /* use Mackert and Lohman formula to adjust for cache effects */
6137  pages_fetched = index_pages_fetched(pages_fetched,
6138  index->pages,
6139  (double) index->pages,
6140  root);
6141 
6142  /*
6143  * Now compute the total disk access cost, and then report a pro-rated
6144  * share for each outer scan. (Don't pro-rate for ScalarArrayOpExpr,
6145  * since that's internal to the indexscan.)
6146  */
6147  indexTotalCost = (pages_fetched * spc_random_page_cost)
6148  / num_outer_scans;
6149  }
6150  else
6151  {
6152  /*
6153  * For a single index scan, we just charge spc_random_page_cost per
6154  * page touched.
6155  */
6156  indexTotalCost = numIndexPages * spc_random_page_cost;
6157  }
6158 
6159  /*
6160  * CPU cost: any complex expressions in the indexquals will need to be
6161  * evaluated once at the start of the scan to reduce them to runtime keys
6162  * to pass to the index AM (see nodeIndexscan.c). We model the per-tuple
6163  * CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per
6164  * indexqual operator. Because we have numIndexTuples as a per-scan
6165  * number, we have to multiply by num_sa_scans to get the correct result
6166  * for ScalarArrayOpExpr cases. Similarly add in costs for any index
6167  * ORDER BY expressions.
6168  *
6169  * Note: this neglects the possible costs of rechecking lossy operators.
6170  * Detecting that that might be needed seems more expensive than it's
6171  * worth, though, considering all the other inaccuracies here ...
6172  */
6173  qual_arg_cost = index_other_operands_eval_cost(root, indexQuals) +
6174  index_other_operands_eval_cost(root, indexOrderBys);
6175  qual_op_cost = cpu_operator_cost *
6176  (list_length(indexQuals) + list_length(indexOrderBys));
6177 
6178  indexStartupCost = qual_arg_cost;
6179  indexTotalCost += qual_arg_cost;
6180  indexTotalCost += numIndexTuples * num_sa_scans * (cpu_index_tuple_cost + qual_op_cost);
6181 
6182  /*
6183  * Generic assumption about index correlation: there isn't any.
6184  */
6185  indexCorrelation = 0.0;
6186 
6187  /*
6188  * Return everything to caller.
6189  */
6190  costs->indexStartupCost = indexStartupCost;
6191  costs->indexTotalCost = indexTotalCost;