PostgreSQL Source Code  git master
selfuncs.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * selfuncs.c
4  * Selectivity functions and index cost estimation functions for
5  * standard operators and index access methods.
6  *
7  * Selectivity routines are registered in the pg_operator catalog
8  * in the "oprrest" and "oprjoin" attributes.
9  *
10  * Index cost functions are located via the index AM's API struct,
11  * which is obtained from the handler function registered in pg_am.
12  *
13  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
14  * Portions Copyright (c) 1994, Regents of the University of California
15  *
16  *
17  * IDENTIFICATION
18  * src/backend/utils/adt/selfuncs.c
19  *
20  *-------------------------------------------------------------------------
21  */
22 
23 /*----------
24  * Operator selectivity estimation functions are called to estimate the
25  * selectivity of WHERE clauses whose top-level operator is their operator.
26  * We divide the problem into two cases:
27  * Restriction clause estimation: the clause involves vars of just
28  * one relation.
29  * Join clause estimation: the clause involves vars of multiple rels.
30  * Join selectivity estimation is far more difficult and usually less accurate
31  * than restriction estimation.
32  *
33  * When dealing with the inner scan of a nestloop join, we consider the
34  * join's joinclauses as restriction clauses for the inner relation, and
35  * treat vars of the outer relation as parameters (a/k/a constants of unknown
36  * values). So, restriction estimators need to be able to accept an argument
37  * telling which relation is to be treated as the variable.
38  *
39  * The call convention for a restriction estimator (oprrest function) is
40  *
41  * Selectivity oprrest (PlannerInfo *root,
42  * Oid operator,
43  * List *args,
44  * int varRelid);
45  *
46  * root: general information about the query (rtable and RelOptInfo lists
47  * are particularly important for the estimator).
48  * operator: OID of the specific operator in question.
49  * args: argument list from the operator clause.
50  * varRelid: if not zero, the relid (rtable index) of the relation to
51  * be treated as the variable relation. May be zero if the args list
52  * is known to contain vars of only one relation.
53  *
54  * This is represented at the SQL level (in pg_proc) as
55  *
56  * float8 oprrest (internal, oid, internal, int4);
57  *
58  * The result is a selectivity, that is, a fraction (0 to 1) of the rows
59  * of the relation that are expected to produce a TRUE result for the
60  * given operator.
61  *
62  * The call convention for a join estimator (oprjoin function) is similar
63  * except that varRelid is not needed, and instead join information is
64  * supplied:
65  *
66  * Selectivity oprjoin (PlannerInfo *root,
67  * Oid operator,
68  * List *args,
69  * JoinType jointype,
70  * SpecialJoinInfo *sjinfo);
71  *
72  * float8 oprjoin (internal, oid, internal, int2, internal);
73  *
74  * (Before Postgres 8.4, join estimators had only the first four of these
75  * parameters. That signature is still allowed, but deprecated.) The
76  * relationship between jointype and sjinfo is explained in the comments for
77  * clause_selectivity() --- the short version is that jointype is usually
78  * best ignored in favor of examining sjinfo.
79  *
80  * Join selectivity for regular inner and outer joins is defined as the
81  * fraction (0 to 1) of the cross product of the relations that is expected
82  * to produce a TRUE result for the given operator. For both semi and anti
83  * joins, however, the selectivity is defined as the fraction of the left-hand
84  * side relation's rows that are expected to have a match (ie, at least one
85  * row with a TRUE result) in the right-hand side.
86  *
87  * For both oprrest and oprjoin functions, the operator's input collation OID
88  * (if any) is passed using the standard fmgr mechanism, so that the estimator
89  * function can fetch it with PG_GET_COLLATION(). Note, however, that all
90  * statistics in pg_statistic are currently built using the relevant column's
91  * collation.
92  *----------
93  */
94 
95 #include "postgres.h"
96 
97 #include <ctype.h>
98 #include <math.h>
99 
100 #include "access/brin.h"
101 #include "access/brin_page.h"
102 #include "access/gin.h"
103 #include "access/table.h"
104 #include "access/tableam.h"
105 #include "access/visibilitymap.h"
106 #include "catalog/pg_am.h"
107 #include "catalog/pg_collation.h"
108 #include "catalog/pg_operator.h"
109 #include "catalog/pg_statistic.h"
111 #include "executor/nodeAgg.h"
112 #include "miscadmin.h"
113 #include "nodes/makefuncs.h"
114 #include "nodes/nodeFuncs.h"
115 #include "optimizer/clauses.h"
116 #include "optimizer/cost.h"
117 #include "optimizer/optimizer.h"
118 #include "optimizer/pathnode.h"
119 #include "optimizer/paths.h"
120 #include "optimizer/plancat.h"
121 #include "parser/parse_clause.h"
122 #include "parser/parsetree.h"
123 #include "statistics/statistics.h"
124 #include "storage/bufmgr.h"
125 #include "utils/acl.h"
126 #include "utils/builtins.h"
127 #include "utils/date.h"
128 #include "utils/datum.h"
129 #include "utils/fmgroids.h"
130 #include "utils/index_selfuncs.h"
131 #include "utils/lsyscache.h"
132 #include "utils/memutils.h"
133 #include "utils/pg_locale.h"
134 #include "utils/rel.h"
135 #include "utils/selfuncs.h"
136 #include "utils/snapmgr.h"
137 #include "utils/spccache.h"
138 #include "utils/syscache.h"
139 #include "utils/timestamp.h"
140 #include "utils/typcache.h"
141 
142 
143 /* Hooks for plugins to get control when we ask for stats */
146 
147 static double eqsel_internal(PG_FUNCTION_ARGS, bool negate);
148 static double eqjoinsel_inner(Oid opfuncoid, Oid collation,
149  VariableStatData *vardata1, VariableStatData *vardata2,
150  double nd1, double nd2,
151  bool isdefault1, bool isdefault2,
152  AttStatsSlot *sslot1, AttStatsSlot *sslot2,
153  Form_pg_statistic stats1, Form_pg_statistic stats2,
154  bool have_mcvs1, bool have_mcvs2);
155 static double eqjoinsel_semi(Oid opfuncoid, Oid collation,
156  VariableStatData *vardata1, VariableStatData *vardata2,
157  double nd1, double nd2,
158  bool isdefault1, bool isdefault2,
159  AttStatsSlot *sslot1, AttStatsSlot *sslot2,
160  Form_pg_statistic stats1, Form_pg_statistic stats2,
161  bool have_mcvs1, bool have_mcvs2,
162  RelOptInfo *inner_rel);
164  RelOptInfo *rel, List **varinfos, double *ndistinct);
165 static bool convert_to_scalar(Datum value, Oid valuetypid, Oid collid,
166  double *scaledvalue,
167  Datum lobound, Datum hibound, Oid boundstypid,
168  double *scaledlobound, double *scaledhibound);
169 static double convert_numeric_to_scalar(Datum value, Oid typid, bool *failure);
170 static void convert_string_to_scalar(char *value,
171  double *scaledvalue,
172  char *lobound,
173  double *scaledlobound,
174  char *hibound,
175  double *scaledhibound);
177  double *scaledvalue,
178  Datum lobound,
179  double *scaledlobound,
180  Datum hibound,
181  double *scaledhibound);
182 static double convert_one_string_to_scalar(char *value,
183  int rangelo, int rangehi);
184 static double convert_one_bytea_to_scalar(unsigned char *value, int valuelen,
185  int rangelo, int rangehi);
186 static char *convert_string_datum(Datum value, Oid typid, Oid collid,
187  bool *failure);
188 static double convert_timevalue_to_scalar(Datum value, Oid typid,
189  bool *failure);
190 static void examine_simple_variable(PlannerInfo *root, Var *var,
191  VariableStatData *vardata);
192 static bool get_variable_range(PlannerInfo *root, VariableStatData *vardata,
193  Oid sortop, Oid collation,
194  Datum *min, Datum *max);
195 static void get_stats_slot_range(AttStatsSlot *sslot,
196  Oid opfuncoid, FmgrInfo *opproc,
197  Oid collation, int16 typLen, bool typByVal,
198  Datum *min, Datum *max, bool *p_have_data);
199 static bool get_actual_variable_range(PlannerInfo *root,
200  VariableStatData *vardata,
201  Oid sortop, Oid collation,
202  Datum *min, Datum *max);
203 static bool get_actual_variable_endpoint(Relation heapRel,
204  Relation indexRel,
205  ScanDirection indexscandir,
206  ScanKey scankeys,
207  int16 typLen,
208  bool typByVal,
209  TupleTableSlot *tableslot,
210  MemoryContext outercontext,
211  Datum *endpointDatum);
212 static RelOptInfo *find_join_input_rel(PlannerInfo *root, Relids relids);
213 
214 
215 /*
216  * eqsel - Selectivity of "=" for any data types.
217  *
218  * Note: this routine is also used to estimate selectivity for some
219  * operators that are not "=" but have comparable selectivity behavior,
220  * such as "~=" (geometric approximate-match). Even for "=", we must
221  * keep in mind that the left and right datatypes may differ.
222  */
223 Datum
225 {
226  PG_RETURN_FLOAT8((float8) eqsel_internal(fcinfo, false));
227 }
228 
229 /*
230  * Common code for eqsel() and neqsel()
231  */
232 static double
234 {
236  Oid operator = PG_GETARG_OID(1);
237  List *args = (List *) PG_GETARG_POINTER(2);
238  int varRelid = PG_GETARG_INT32(3);
239  Oid collation = PG_GET_COLLATION();
240  VariableStatData vardata;
241  Node *other;
242  bool varonleft;
243  double selec;
244 
245  /*
246  * When asked about <>, we do the estimation using the corresponding =
247  * operator, then convert to <> via "1.0 - eq_selectivity - nullfrac".
248  */
249  if (negate)
250  {
251  operator = get_negator(operator);
252  if (!OidIsValid(operator))
253  {
254  /* Use default selectivity (should we raise an error instead?) */
255  return 1.0 - DEFAULT_EQ_SEL;
256  }
257  }
258 
259  /*
260  * If expression is not variable = something or something = variable, then
261  * punt and return a default estimate.
262  */
263  if (!get_restriction_variable(root, args, varRelid,
264  &vardata, &other, &varonleft))
265  return negate ? (1.0 - DEFAULT_EQ_SEL) : DEFAULT_EQ_SEL;
266 
267  /*
268  * We can do a lot better if the something is a constant. (Note: the
269  * Const might result from estimation rather than being a simple constant
270  * in the query.)
271  */
272  if (IsA(other, Const))
273  selec = var_eq_const(&vardata, operator, collation,
274  ((Const *) other)->constvalue,
275  ((Const *) other)->constisnull,
276  varonleft, negate);
277  else
278  selec = var_eq_non_const(&vardata, operator, collation, other,
279  varonleft, negate);
280 
281  ReleaseVariableStats(vardata);
282 
283  return selec;
284 }
285 
286 /*
287  * var_eq_const --- eqsel for var = const case
288  *
289  * This is exported so that some other estimation functions can use it.
290  */
291 double
292 var_eq_const(VariableStatData *vardata, Oid operator, Oid collation,
293  Datum constval, bool constisnull,
294  bool varonleft, bool negate)
295 {
296  double selec;
297  double nullfrac = 0.0;
298  bool isdefault;
299  Oid opfuncoid;
300 
301  /*
302  * If the constant is NULL, assume operator is strict and return zero, ie,
303  * operator will never return TRUE. (It's zero even for a negator op.)
304  */
305  if (constisnull)
306  return 0.0;
307 
308  /*
309  * Grab the nullfrac for use below. Note we allow use of nullfrac
310  * regardless of security check.
311  */
312  if (HeapTupleIsValid(vardata->statsTuple))
313  {
314  Form_pg_statistic stats;
315 
316  stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
317  nullfrac = stats->stanullfrac;
318  }
319 
320  /*
321  * If we matched the var to a unique index or DISTINCT clause, assume
322  * there is exactly one match regardless of anything else. (This is
323  * slightly bogus, since the index or clause's equality operator might be
324  * different from ours, but it's much more likely to be right than
325  * ignoring the information.)
326  */
327  if (vardata->isunique && vardata->rel && vardata->rel->tuples >= 1.0)
328  {
329  selec = 1.0 / vardata->rel->tuples;
330  }
331  else if (HeapTupleIsValid(vardata->statsTuple) &&
333  (opfuncoid = get_opcode(operator))))
334  {
335  AttStatsSlot sslot;
336  bool match = false;
337  int i;
338 
339  /*
340  * Is the constant "=" to any of the column's most common values?
341  * (Although the given operator may not really be "=", we will assume
342  * that seeing whether it returns TRUE is an appropriate test. If you
343  * don't like this, maybe you shouldn't be using eqsel for your
344  * operator...)
345  */
346  if (get_attstatsslot(&sslot, vardata->statsTuple,
347  STATISTIC_KIND_MCV, InvalidOid,
349  {
350  LOCAL_FCINFO(fcinfo, 2);
351  FmgrInfo eqproc;
352 
353  fmgr_info(opfuncoid, &eqproc);
354 
355  /*
356  * Save a few cycles by setting up the fcinfo struct just once.
357  * Using FunctionCallInvoke directly also avoids failure if the
358  * eqproc returns NULL, though really equality functions should
359  * never do that.
360  */
361  InitFunctionCallInfoData(*fcinfo, &eqproc, 2, collation,
362  NULL, NULL);
363  fcinfo->args[0].isnull = false;
364  fcinfo->args[1].isnull = false;
365  /* be careful to apply operator right way 'round */
366  if (varonleft)
367  fcinfo->args[1].value = constval;
368  else
369  fcinfo->args[0].value = constval;
370 
371  for (i = 0; i < sslot.nvalues; i++)
372  {
373  Datum fresult;
374 
375  if (varonleft)
376  fcinfo->args[0].value = sslot.values[i];
377  else
378  fcinfo->args[1].value = sslot.values[i];
379  fcinfo->isnull = false;
380  fresult = FunctionCallInvoke(fcinfo);
381  if (!fcinfo->isnull && DatumGetBool(fresult))
382  {
383  match = true;
384  break;
385  }
386  }
387  }
388  else
389  {
390  /* no most-common-value info available */
391  i = 0; /* keep compiler quiet */
392  }
393 
394  if (match)
395  {
396  /*
397  * Constant is "=" to this common value. We know selectivity
398  * exactly (or as exactly as ANALYZE could calculate it, anyway).
399  */
400  selec = sslot.numbers[i];
401  }
402  else
403  {
404  /*
405  * Comparison is against a constant that is neither NULL nor any
406  * of the common values. Its selectivity cannot be more than
407  * this:
408  */
409  double sumcommon = 0.0;
410  double otherdistinct;
411 
412  for (i = 0; i < sslot.nnumbers; i++)
413  sumcommon += sslot.numbers[i];
414  selec = 1.0 - sumcommon - nullfrac;
415  CLAMP_PROBABILITY(selec);
416 
417  /*
418  * and in fact it's probably a good deal less. We approximate that
419  * all the not-common values share this remaining fraction
420  * equally, so we divide by the number of other distinct values.
421  */
422  otherdistinct = get_variable_numdistinct(vardata, &isdefault) -
423  sslot.nnumbers;
424  if (otherdistinct > 1)
425  selec /= otherdistinct;
426 
427  /*
428  * Another cross-check: selectivity shouldn't be estimated as more
429  * than the least common "most common value".
430  */
431  if (sslot.nnumbers > 0 && selec > sslot.numbers[sslot.nnumbers - 1])
432  selec = sslot.numbers[sslot.nnumbers - 1];
433  }
434 
435  free_attstatsslot(&sslot);
436  }
437  else
438  {
439  /*
440  * No ANALYZE stats available, so make a guess using estimated number
441  * of distinct values and assuming they are equally common. (The guess
442  * is unlikely to be very good, but we do know a few special cases.)
443  */
444  selec = 1.0 / get_variable_numdistinct(vardata, &isdefault);
445  }
446 
447  /* now adjust if we wanted <> rather than = */
448  if (negate)
449  selec = 1.0 - selec - nullfrac;
450 
451  /* result should be in range, but make sure... */
452  CLAMP_PROBABILITY(selec);
453 
454  return selec;
455 }
456 
457 /*
458  * var_eq_non_const --- eqsel for var = something-other-than-const case
459  *
460  * This is exported so that some other estimation functions can use it.
461  */
462 double
463 var_eq_non_const(VariableStatData *vardata, Oid operator, Oid collation,
464  Node *other,
465  bool varonleft, bool negate)
466 {
467  double selec;
468  double nullfrac = 0.0;
469  bool isdefault;
470 
471  /*
472  * Grab the nullfrac for use below.
473  */
474  if (HeapTupleIsValid(vardata->statsTuple))
475  {
476  Form_pg_statistic stats;
477 
478  stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
479  nullfrac = stats->stanullfrac;
480  }
481 
482  /*
483  * If we matched the var to a unique index or DISTINCT clause, assume
484  * there is exactly one match regardless of anything else. (This is
485  * slightly bogus, since the index or clause's equality operator might be
486  * different from ours, but it's much more likely to be right than
487  * ignoring the information.)
488  */
489  if (vardata->isunique && vardata->rel && vardata->rel->tuples >= 1.0)
490  {
491  selec = 1.0 / vardata->rel->tuples;
492  }
493  else if (HeapTupleIsValid(vardata->statsTuple))
494  {
495  double ndistinct;
496  AttStatsSlot sslot;
497 
498  /*
499  * Search is for a value that we do not know a priori, but we will
500  * assume it is not NULL. Estimate the selectivity as non-null
501  * fraction divided by number of distinct values, so that we get a
502  * result averaged over all possible values whether common or
503  * uncommon. (Essentially, we are assuming that the not-yet-known
504  * comparison value is equally likely to be any of the possible
505  * values, regardless of their frequency in the table. Is that a good
506  * idea?)
507  */
508  selec = 1.0 - nullfrac;
509  ndistinct = get_variable_numdistinct(vardata, &isdefault);
510  if (ndistinct > 1)
511  selec /= ndistinct;
512 
513  /*
514  * Cross-check: selectivity should never be estimated as more than the
515  * most common value's.
516  */
517  if (get_attstatsslot(&sslot, vardata->statsTuple,
518  STATISTIC_KIND_MCV, InvalidOid,
520  {
521  if (sslot.nnumbers > 0 && selec > sslot.numbers[0])
522  selec = sslot.numbers[0];
523  free_attstatsslot(&sslot);
524  }
525  }
526  else
527  {
528  /*
529  * No ANALYZE stats available, so make a guess using estimated number
530  * of distinct values and assuming they are equally common. (The guess
531  * is unlikely to be very good, but we do know a few special cases.)
532  */
533  selec = 1.0 / get_variable_numdistinct(vardata, &isdefault);
534  }
535 
536  /* now adjust if we wanted <> rather than = */
537  if (negate)
538  selec = 1.0 - selec - nullfrac;
539 
540  /* result should be in range, but make sure... */
541  CLAMP_PROBABILITY(selec);
542 
543  return selec;
544 }
545 
546 /*
547  * neqsel - Selectivity of "!=" for any data types.
548  *
549  * This routine is also used for some operators that are not "!="
550  * but have comparable selectivity behavior. See above comments
551  * for eqsel().
552  */
553 Datum
555 {
556  PG_RETURN_FLOAT8((float8) eqsel_internal(fcinfo, true));
557 }
558 
559 /*
560  * scalarineqsel - Selectivity of "<", "<=", ">", ">=" for scalars.
561  *
562  * This is the guts of scalarltsel/scalarlesel/scalargtsel/scalargesel.
563  * The isgt and iseq flags distinguish which of the four cases apply.
564  *
565  * The caller has commuted the clause, if necessary, so that we can treat
566  * the variable as being on the left. The caller must also make sure that
567  * the other side of the clause is a non-null Const, and dissect that into
568  * a value and datatype. (This definition simplifies some callers that
569  * want to estimate against a computed value instead of a Const node.)
570  *
571  * This routine works for any datatype (or pair of datatypes) known to
572  * convert_to_scalar(). If it is applied to some other datatype,
573  * it will return an approximate estimate based on assuming that the constant
574  * value falls in the middle of the bin identified by binary search.
575  */
576 static double
577 scalarineqsel(PlannerInfo *root, Oid operator, bool isgt, bool iseq,
578  Oid collation,
579  VariableStatData *vardata, Datum constval, Oid consttype)
580 {
581  Form_pg_statistic stats;
582  FmgrInfo opproc;
583  double mcv_selec,
584  hist_selec,
585  sumcommon;
586  double selec;
587 
588  if (!HeapTupleIsValid(vardata->statsTuple))
589  {
590  /*
591  * No stats are available. Typically this means we have to fall back
592  * on the default estimate; but if the variable is CTID then we can
593  * make an estimate based on comparing the constant to the table size.
594  */
595  if (vardata->var && IsA(vardata->var, Var) &&
596  ((Var *) vardata->var)->varattno == SelfItemPointerAttributeNumber)
597  {
598  ItemPointer itemptr;
599  double block;
600  double density;
601 
602  /*
603  * If the relation's empty, we're going to include all of it.
604  * (This is mostly to avoid divide-by-zero below.)
605  */
606  if (vardata->rel->pages == 0)
607  return 1.0;
608 
609  itemptr = (ItemPointer) DatumGetPointer(constval);
610  block = ItemPointerGetBlockNumberNoCheck(itemptr);
611 
612  /*
613  * Determine the average number of tuples per page (density).
614  *
615  * Since the last page will, on average, be only half full, we can
616  * estimate it to have half as many tuples as earlier pages. So
617  * give it half the weight of a regular page.
618  */
619  density = vardata->rel->tuples / (vardata->rel->pages - 0.5);
620 
621  /* If target is the last page, use half the density. */
622  if (block >= vardata->rel->pages - 1)
623  density *= 0.5;
624 
625  /*
626  * Using the average tuples per page, calculate how far into the
627  * page the itemptr is likely to be and adjust block accordingly,
628  * by adding that fraction of a whole block (but never more than a
629  * whole block, no matter how high the itemptr's offset is). Here
630  * we are ignoring the possibility of dead-tuple line pointers,
631  * which is fairly bogus, but we lack the info to do better.
632  */
633  if (density > 0.0)
634  {
636 
637  block += Min(offset / density, 1.0);
638  }
639 
640  /*
641  * Convert relative block number to selectivity. Again, the last
642  * page has only half weight.
643  */
644  selec = block / (vardata->rel->pages - 0.5);
645 
646  /*
647  * The calculation so far gave us a selectivity for the "<=" case.
648  * We'll have one less tuple for "<" and one additional tuple for
649  * ">=", the latter of which we'll reverse the selectivity for
650  * below, so we can simply subtract one tuple for both cases. The
651  * cases that need this adjustment can be identified by iseq being
652  * equal to isgt.
653  */
654  if (iseq == isgt && vardata->rel->tuples >= 1.0)
655  selec -= (1.0 / vardata->rel->tuples);
656 
657  /* Finally, reverse the selectivity for the ">", ">=" cases. */
658  if (isgt)
659  selec = 1.0 - selec;
660 
661  CLAMP_PROBABILITY(selec);
662  return selec;
663  }
664 
665  /* no stats available, so default result */
666  return DEFAULT_INEQ_SEL;
667  }
668  stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
669 
670  fmgr_info(get_opcode(operator), &opproc);
671 
672  /*
673  * If we have most-common-values info, add up the fractions of the MCV
674  * entries that satisfy MCV OP CONST. These fractions contribute directly
675  * to the result selectivity. Also add up the total fraction represented
676  * by MCV entries.
677  */
678  mcv_selec = mcv_selectivity(vardata, &opproc, collation, constval, true,
679  &sumcommon);
680 
681  /*
682  * If there is a histogram, determine which bin the constant falls in, and
683  * compute the resulting contribution to selectivity.
684  */
685  hist_selec = ineq_histogram_selectivity(root, vardata,
686  operator, &opproc, isgt, iseq,
687  collation,
688  constval, consttype);
689 
690  /*
691  * Now merge the results from the MCV and histogram calculations,
692  * realizing that the histogram covers only the non-null values that are
693  * not listed in MCV.
694  */
695  selec = 1.0 - stats->stanullfrac - sumcommon;
696 
697  if (hist_selec >= 0.0)
698  selec *= hist_selec;
699  else
700  {
701  /*
702  * If no histogram but there are values not accounted for by MCV,
703  * arbitrarily assume half of them will match.
704  */
705  selec *= 0.5;
706  }
707 
708  selec += mcv_selec;
709 
710  /* result should be in range, but make sure... */
711  CLAMP_PROBABILITY(selec);
712 
713  return selec;
714 }
715 
716 /*
717  * mcv_selectivity - Examine the MCV list for selectivity estimates
718  *
719  * Determine the fraction of the variable's MCV population that satisfies
720  * the predicate (VAR OP CONST), or (CONST OP VAR) if !varonleft. Also
721  * compute the fraction of the total column population represented by the MCV
722  * list. This code will work for any boolean-returning predicate operator.
723  *
724  * The function result is the MCV selectivity, and the fraction of the
725  * total population is returned into *sumcommonp. Zeroes are returned
726  * if there is no MCV list.
727  */
728 double
729 mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc, Oid collation,
730  Datum constval, bool varonleft,
731  double *sumcommonp)
732 {
733  double mcv_selec,
734  sumcommon;
735  AttStatsSlot sslot;
736  int i;
737 
738  mcv_selec = 0.0;
739  sumcommon = 0.0;
740 
741  if (HeapTupleIsValid(vardata->statsTuple) &&
742  statistic_proc_security_check(vardata, opproc->fn_oid) &&
743  get_attstatsslot(&sslot, vardata->statsTuple,
744  STATISTIC_KIND_MCV, InvalidOid,
746  {
747  LOCAL_FCINFO(fcinfo, 2);
748 
749  /*
750  * We invoke the opproc "by hand" so that we won't fail on NULL
751  * results. Such cases won't arise for normal comparison functions,
752  * but generic_restriction_selectivity could perhaps be used with
753  * operators that can return NULL. A small side benefit is to not
754  * need to re-initialize the fcinfo struct from scratch each time.
755  */
756  InitFunctionCallInfoData(*fcinfo, opproc, 2, collation,
757  NULL, NULL);
758  fcinfo->args[0].isnull = false;
759  fcinfo->args[1].isnull = false;
760  /* be careful to apply operator right way 'round */
761  if (varonleft)
762  fcinfo->args[1].value = constval;
763  else
764  fcinfo->args[0].value = constval;
765 
766  for (i = 0; i < sslot.nvalues; i++)
767  {
768  Datum fresult;
769 
770  if (varonleft)
771  fcinfo->args[0].value = sslot.values[i];
772  else
773  fcinfo->args[1].value = sslot.values[i];
774  fcinfo->isnull = false;
775  fresult = FunctionCallInvoke(fcinfo);
776  if (!fcinfo->isnull && DatumGetBool(fresult))
777  mcv_selec += sslot.numbers[i];
778  sumcommon += sslot.numbers[i];
779  }
780  free_attstatsslot(&sslot);
781  }
782 
783  *sumcommonp = sumcommon;
784  return mcv_selec;
785 }
786 
787 /*
788  * histogram_selectivity - Examine the histogram for selectivity estimates
789  *
790  * Determine the fraction of the variable's histogram entries that satisfy
791  * the predicate (VAR OP CONST), or (CONST OP VAR) if !varonleft.
792  *
793  * This code will work for any boolean-returning predicate operator, whether
794  * or not it has anything to do with the histogram sort operator. We are
795  * essentially using the histogram just as a representative sample. However,
796  * small histograms are unlikely to be all that representative, so the caller
797  * should be prepared to fall back on some other estimation approach when the
798  * histogram is missing or very small. It may also be prudent to combine this
799  * approach with another one when the histogram is small.
800  *
801  * If the actual histogram size is not at least min_hist_size, we won't bother
802  * to do the calculation at all. Also, if the n_skip parameter is > 0, we
803  * ignore the first and last n_skip histogram elements, on the grounds that
804  * they are outliers and hence not very representative. Typical values for
805  * these parameters are 10 and 1.
806  *
807  * The function result is the selectivity, or -1 if there is no histogram
808  * or it's smaller than min_hist_size.
809  *
810  * The output parameter *hist_size receives the actual histogram size,
811  * or zero if no histogram. Callers may use this number to decide how
812  * much faith to put in the function result.
813  *
814  * Note that the result disregards both the most-common-values (if any) and
815  * null entries. The caller is expected to combine this result with
816  * statistics for those portions of the column population. It may also be
817  * prudent to clamp the result range, ie, disbelieve exact 0 or 1 outputs.
818  */
819 double
821  FmgrInfo *opproc, Oid collation,
822  Datum constval, bool varonleft,
823  int min_hist_size, int n_skip,
824  int *hist_size)
825 {
826  double result;
827  AttStatsSlot sslot;
828 
829  /* check sanity of parameters */
830  Assert(n_skip >= 0);
831  Assert(min_hist_size > 2 * n_skip);
832 
833  if (HeapTupleIsValid(vardata->statsTuple) &&
834  statistic_proc_security_check(vardata, opproc->fn_oid) &&
835  get_attstatsslot(&sslot, vardata->statsTuple,
836  STATISTIC_KIND_HISTOGRAM, InvalidOid,
838  {
839  *hist_size = sslot.nvalues;
840  if (sslot.nvalues >= min_hist_size)
841  {
842  LOCAL_FCINFO(fcinfo, 2);
843  int nmatch = 0;
844  int i;
845 
846  /*
847  * We invoke the opproc "by hand" so that we won't fail on NULL
848  * results. Such cases won't arise for normal comparison
849  * functions, but generic_restriction_selectivity could perhaps be
850  * used with operators that can return NULL. A small side benefit
851  * is to not need to re-initialize the fcinfo struct from scratch
852  * each time.
853  */
854  InitFunctionCallInfoData(*fcinfo, opproc, 2, collation,
855  NULL, NULL);
856  fcinfo->args[0].isnull = false;
857  fcinfo->args[1].isnull = false;
858  /* be careful to apply operator right way 'round */
859  if (varonleft)
860  fcinfo->args[1].value = constval;
861  else
862  fcinfo->args[0].value = constval;
863 
864  for (i = n_skip; i < sslot.nvalues - n_skip; i++)
865  {
866  Datum fresult;
867 
868  if (varonleft)
869  fcinfo->args[0].value = sslot.values[i];
870  else
871  fcinfo->args[1].value = sslot.values[i];
872  fcinfo->isnull = false;
873  fresult = FunctionCallInvoke(fcinfo);
874  if (!fcinfo->isnull && DatumGetBool(fresult))
875  nmatch++;
876  }
877  result = ((double) nmatch) / ((double) (sslot.nvalues - 2 * n_skip));
878  }
879  else
880  result = -1;
881  free_attstatsslot(&sslot);
882  }
883  else
884  {
885  *hist_size = 0;
886  result = -1;
887  }
888 
889  return result;
890 }
891 
892 /*
893  * generic_restriction_selectivity - Selectivity for almost anything
894  *
895  * This function estimates selectivity for operators that we don't have any
896  * special knowledge about, but are on data types that we collect standard
897  * MCV and/or histogram statistics for. (Additional assumptions are that
898  * the operator is strict and immutable, or at least stable.)
899  *
900  * If we have "VAR OP CONST" or "CONST OP VAR", selectivity is estimated by
901  * applying the operator to each element of the column's MCV and/or histogram
902  * stats, and merging the results using the assumption that the histogram is
903  * a reasonable random sample of the column's non-MCV population. Note that
904  * if the operator's semantics are related to the histogram ordering, this
905  * might not be such a great assumption; other functions such as
906  * scalarineqsel() are probably a better match in such cases.
907  *
908  * Otherwise, fall back to the default selectivity provided by the caller.
909  */
910 double
912  List *args, int varRelid,
913  double default_selectivity)
914 {
915  double selec;
916  VariableStatData vardata;
917  Node *other;
918  bool varonleft;
919 
920  /*
921  * If expression is not variable OP something or something OP variable,
922  * then punt and return the default estimate.
923  */
924  if (!get_restriction_variable(root, args, varRelid,
925  &vardata, &other, &varonleft))
926  return default_selectivity;
927 
928  /*
929  * If the something is a NULL constant, assume operator is strict and
930  * return zero, ie, operator will never return TRUE.
931  */
932  if (IsA(other, Const) &&
933  ((Const *) other)->constisnull)
934  {
935  ReleaseVariableStats(vardata);
936  return 0.0;
937  }
938 
939  if (IsA(other, Const))
940  {
941  /* Variable is being compared to a known non-null constant */
942  Datum constval = ((Const *) other)->constvalue;
943  FmgrInfo opproc;
944  double mcvsum;
945  double mcvsel;
946  double nullfrac;
947  int hist_size;
948 
949  fmgr_info(get_opcode(oproid), &opproc);
950 
951  /*
952  * Calculate the selectivity for the column's most common values.
953  */
954  mcvsel = mcv_selectivity(&vardata, &opproc, collation,
955  constval, varonleft,
956  &mcvsum);
957 
958  /*
959  * If the histogram is large enough, see what fraction of it matches
960  * the query, and assume that's representative of the non-MCV
961  * population. Otherwise use the default selectivity for the non-MCV
962  * population.
963  */
964  selec = histogram_selectivity(&vardata, &opproc, collation,
965  constval, varonleft,
966  10, 1, &hist_size);
967  if (selec < 0)
968  {
969  /* Nope, fall back on default */
970  selec = default_selectivity;
971  }
972  else if (hist_size < 100)
973  {
974  /*
975  * For histogram sizes from 10 to 100, we combine the histogram
976  * and default selectivities, putting increasingly more trust in
977  * the histogram for larger sizes.
978  */
979  double hist_weight = hist_size / 100.0;
980 
981  selec = selec * hist_weight +
982  default_selectivity * (1.0 - hist_weight);
983  }
984 
985  /* In any case, don't believe extremely small or large estimates. */
986  if (selec < 0.0001)
987  selec = 0.0001;
988  else if (selec > 0.9999)
989  selec = 0.9999;
990 
991  /* Don't forget to account for nulls. */
992  if (HeapTupleIsValid(vardata.statsTuple))
993  nullfrac = ((Form_pg_statistic) GETSTRUCT(vardata.statsTuple))->stanullfrac;
994  else
995  nullfrac = 0.0;
996 
997  /*
998  * Now merge the results from the MCV and histogram calculations,
999  * realizing that the histogram covers only the non-null values that
1000  * are not listed in MCV.
1001  */
1002  selec *= 1.0 - nullfrac - mcvsum;
1003  selec += mcvsel;
1004  }
1005  else
1006  {
1007  /* Comparison value is not constant, so we can't do anything */
1008  selec = default_selectivity;
1009  }
1010 
1011  ReleaseVariableStats(vardata);
1012 
1013  /* result should be in range, but make sure... */
1014  CLAMP_PROBABILITY(selec);
1015 
1016  return selec;
1017 }
1018 
1019 /*
1020  * ineq_histogram_selectivity - Examine the histogram for scalarineqsel
1021  *
1022  * Determine the fraction of the variable's histogram population that
1023  * satisfies the inequality condition, ie, VAR < (or <=, >, >=) CONST.
1024  * The isgt and iseq flags distinguish which of the four cases apply.
1025  *
1026  * While opproc could be looked up from the operator OID, common callers
1027  * also need to call it separately, so we make the caller pass both.
1028  *
1029  * Returns -1 if there is no histogram (valid results will always be >= 0).
1030  *
1031  * Note that the result disregards both the most-common-values (if any) and
1032  * null entries. The caller is expected to combine this result with
1033  * statistics for those portions of the column population.
1034  *
1035  * This is exported so that some other estimation functions can use it.
1036  */
1037 double
1039  VariableStatData *vardata,
1040  Oid opoid, FmgrInfo *opproc, bool isgt, bool iseq,
1041  Oid collation,
1042  Datum constval, Oid consttype)
1043 {
1044  double hist_selec;
1045  AttStatsSlot sslot;
1046 
1047  hist_selec = -1.0;
1048 
1049  /*
1050  * Someday, ANALYZE might store more than one histogram per rel/att,
1051  * corresponding to more than one possible sort ordering defined for the
1052  * column type. Right now, we know there is only one, so just grab it and
1053  * see if it matches the query.
1054  *
1055  * Note that we can't use opoid as search argument; the staop appearing in
1056  * pg_statistic will be for the relevant '<' operator, but what we have
1057  * might be some other inequality operator such as '>='. (Even if opoid
1058  * is a '<' operator, it could be cross-type.) Hence we must use
1059  * comparison_ops_are_compatible() to see if the operators match.
1060  */
1061  if (HeapTupleIsValid(vardata->statsTuple) &&
1062  statistic_proc_security_check(vardata, opproc->fn_oid) &&
1063  get_attstatsslot(&sslot, vardata->statsTuple,
1064  STATISTIC_KIND_HISTOGRAM, InvalidOid,
1066  {
1067  if (sslot.nvalues > 1 &&
1068  sslot.stacoll == collation &&
1069  comparison_ops_are_compatible(sslot.staop, opoid))
1070  {
1071  /*
1072  * Use binary search to find the desired location, namely the
1073  * right end of the histogram bin containing the comparison value,
1074  * which is the leftmost entry for which the comparison operator
1075  * succeeds (if isgt) or fails (if !isgt).
1076  *
1077  * In this loop, we pay no attention to whether the operator iseq
1078  * or not; that detail will be mopped up below. (We cannot tell,
1079  * anyway, whether the operator thinks the values are equal.)
1080  *
1081  * If the binary search accesses the first or last histogram
1082  * entry, we try to replace that endpoint with the true column min
1083  * or max as found by get_actual_variable_range(). This
1084  * ameliorates misestimates when the min or max is moving as a
1085  * result of changes since the last ANALYZE. Note that this could
1086  * result in effectively including MCVs into the histogram that
1087  * weren't there before, but we don't try to correct for that.
1088  */
1089  double histfrac;
1090  int lobound = 0; /* first possible slot to search */
1091  int hibound = sslot.nvalues; /* last+1 slot to search */
1092  bool have_end = false;
1093 
1094  /*
1095  * If there are only two histogram entries, we'll want up-to-date
1096  * values for both. (If there are more than two, we need at most
1097  * one of them to be updated, so we deal with that within the
1098  * loop.)
1099  */
1100  if (sslot.nvalues == 2)
1101  have_end = get_actual_variable_range(root,
1102  vardata,
1103  sslot.staop,
1104  collation,
1105  &sslot.values[0],
1106  &sslot.values[1]);
1107 
1108  while (lobound < hibound)
1109  {
1110  int probe = (lobound + hibound) / 2;
1111  bool ltcmp;
1112 
1113  /*
1114  * If we find ourselves about to compare to the first or last
1115  * histogram entry, first try to replace it with the actual
1116  * current min or max (unless we already did so above).
1117  */
1118  if (probe == 0 && sslot.nvalues > 2)
1119  have_end = get_actual_variable_range(root,
1120  vardata,
1121  sslot.staop,
1122  collation,
1123  &sslot.values[0],
1124  NULL);
1125  else if (probe == sslot.nvalues - 1 && sslot.nvalues > 2)
1126  have_end = get_actual_variable_range(root,
1127  vardata,
1128  sslot.staop,
1129  collation,
1130  NULL,
1131  &sslot.values[probe]);
1132 
1133  ltcmp = DatumGetBool(FunctionCall2Coll(opproc,
1134  collation,
1135  sslot.values[probe],
1136  constval));
1137  if (isgt)
1138  ltcmp = !ltcmp;
1139  if (ltcmp)
1140  lobound = probe + 1;
1141  else
1142  hibound = probe;
1143  }
1144 
1145  if (lobound <= 0)
1146  {
1147  /*
1148  * Constant is below lower histogram boundary. More
1149  * precisely, we have found that no entry in the histogram
1150  * satisfies the inequality clause (if !isgt) or they all do
1151  * (if isgt). We estimate that that's true of the entire
1152  * table, so set histfrac to 0.0 (which we'll flip to 1.0
1153  * below, if isgt).
1154  */
1155  histfrac = 0.0;
1156  }
1157  else if (lobound >= sslot.nvalues)
1158  {
1159  /*
1160  * Inverse case: constant is above upper histogram boundary.
1161  */
1162  histfrac = 1.0;
1163  }
1164  else
1165  {
1166  /* We have values[i-1] <= constant <= values[i]. */
1167  int i = lobound;
1168  double eq_selec = 0;
1169  double val,
1170  high,
1171  low;
1172  double binfrac;
1173 
1174  /*
1175  * In the cases where we'll need it below, obtain an estimate
1176  * of the selectivity of "x = constval". We use a calculation
1177  * similar to what var_eq_const() does for a non-MCV constant,
1178  * ie, estimate that all distinct non-MCV values occur equally
1179  * often. But multiplication by "1.0 - sumcommon - nullfrac"
1180  * will be done by our caller, so we shouldn't do that here.
1181  * Therefore we can't try to clamp the estimate by reference
1182  * to the least common MCV; the result would be too small.
1183  *
1184  * Note: since this is effectively assuming that constval
1185  * isn't an MCV, it's logically dubious if constval in fact is
1186  * one. But we have to apply *some* correction for equality,
1187  * and anyway we cannot tell if constval is an MCV, since we
1188  * don't have a suitable equality operator at hand.
1189  */
1190  if (i == 1 || isgt == iseq)
1191  {
1192  double otherdistinct;
1193  bool isdefault;
1194  AttStatsSlot mcvslot;
1195 
1196  /* Get estimated number of distinct values */
1197  otherdistinct = get_variable_numdistinct(vardata,
1198  &isdefault);
1199 
1200  /* Subtract off the number of known MCVs */
1201  if (get_attstatsslot(&mcvslot, vardata->statsTuple,
1202  STATISTIC_KIND_MCV, InvalidOid,
1204  {
1205  otherdistinct -= mcvslot.nnumbers;
1206  free_attstatsslot(&mcvslot);
1207  }
1208 
1209  /* If result doesn't seem sane, leave eq_selec at 0 */
1210  if (otherdistinct > 1)
1211  eq_selec = 1.0 / otherdistinct;
1212  }
1213 
1214  /*
1215  * Convert the constant and the two nearest bin boundary
1216  * values to a uniform comparison scale, and do a linear
1217  * interpolation within this bin.
1218  */
1219  if (convert_to_scalar(constval, consttype, collation,
1220  &val,
1221  sslot.values[i - 1], sslot.values[i],
1222  vardata->vartype,
1223  &low, &high))
1224  {
1225  if (high <= low)
1226  {
1227  /* cope if bin boundaries appear identical */
1228  binfrac = 0.5;
1229  }
1230  else if (val <= low)
1231  binfrac = 0.0;
1232  else if (val >= high)
1233  binfrac = 1.0;
1234  else
1235  {
1236  binfrac = (val - low) / (high - low);
1237 
1238  /*
1239  * Watch out for the possibility that we got a NaN or
1240  * Infinity from the division. This can happen
1241  * despite the previous checks, if for example "low"
1242  * is -Infinity.
1243  */
1244  if (isnan(binfrac) ||
1245  binfrac < 0.0 || binfrac > 1.0)
1246  binfrac = 0.5;
1247  }
1248  }
1249  else
1250  {
1251  /*
1252  * Ideally we'd produce an error here, on the grounds that
1253  * the given operator shouldn't have scalarXXsel
1254  * registered as its selectivity func unless we can deal
1255  * with its operand types. But currently, all manner of
1256  * stuff is invoking scalarXXsel, so give a default
1257  * estimate until that can be fixed.
1258  */
1259  binfrac = 0.5;
1260  }
1261 
1262  /*
1263  * Now, compute the overall selectivity across the values
1264  * represented by the histogram. We have i-1 full bins and
1265  * binfrac partial bin below the constant.
1266  */
1267  histfrac = (double) (i - 1) + binfrac;
1268  histfrac /= (double) (sslot.nvalues - 1);
1269 
1270  /*
1271  * At this point, histfrac is an estimate of the fraction of
1272  * the population represented by the histogram that satisfies
1273  * "x <= constval". Somewhat remarkably, this statement is
1274  * true regardless of which operator we were doing the probes
1275  * with, so long as convert_to_scalar() delivers reasonable
1276  * results. If the probe constant is equal to some histogram
1277  * entry, we would have considered the bin to the left of that
1278  * entry if probing with "<" or ">=", or the bin to the right
1279  * if probing with "<=" or ">"; but binfrac would have come
1280  * out as 1.0 in the first case and 0.0 in the second, leading
1281  * to the same histfrac in either case. For probe constants
1282  * between histogram entries, we find the same bin and get the
1283  * same estimate with any operator.
1284  *
1285  * The fact that the estimate corresponds to "x <= constval"
1286  * and not "x < constval" is because of the way that ANALYZE
1287  * constructs the histogram: each entry is, effectively, the
1288  * rightmost value in its sample bucket. So selectivity
1289  * values that are exact multiples of 1/(histogram_size-1)
1290  * should be understood as estimates including a histogram
1291  * entry plus everything to its left.
1292  *
1293  * However, that breaks down for the first histogram entry,
1294  * which necessarily is the leftmost value in its sample
1295  * bucket. That means the first histogram bin is slightly
1296  * narrower than the rest, by an amount equal to eq_selec.
1297  * Another way to say that is that we want "x <= leftmost" to
1298  * be estimated as eq_selec not zero. So, if we're dealing
1299  * with the first bin (i==1), rescale to make that true while
1300  * adjusting the rest of that bin linearly.
1301  */
1302  if (i == 1)
1303  histfrac += eq_selec * (1.0 - binfrac);
1304 
1305  /*
1306  * "x <= constval" is good if we want an estimate for "<=" or
1307  * ">", but if we are estimating for "<" or ">=", we now need
1308  * to decrease the estimate by eq_selec.
1309  */
1310  if (isgt == iseq)
1311  histfrac -= eq_selec;
1312  }
1313 
1314  /*
1315  * Now the estimate is finished for "<" and "<=" cases. If we are
1316  * estimating for ">" or ">=", flip it.
1317  */
1318  hist_selec = isgt ? (1.0 - histfrac) : histfrac;
1319 
1320  /*
1321  * The histogram boundaries are only approximate to begin with,
1322  * and may well be out of date anyway. Therefore, don't believe
1323  * extremely small or large selectivity estimates --- unless we
1324  * got actual current endpoint values from the table, in which
1325  * case just do the usual sanity clamp. Somewhat arbitrarily, we
1326  * set the cutoff for other cases at a hundredth of the histogram
1327  * resolution.
1328  */
1329  if (have_end)
1330  CLAMP_PROBABILITY(hist_selec);
1331  else
1332  {
1333  double cutoff = 0.01 / (double) (sslot.nvalues - 1);
1334 
1335  if (hist_selec < cutoff)
1336  hist_selec = cutoff;
1337  else if (hist_selec > 1.0 - cutoff)
1338  hist_selec = 1.0 - cutoff;
1339  }
1340  }
1341  else if (sslot.nvalues > 1)
1342  {
1343  /*
1344  * If we get here, we have a histogram but it's not sorted the way
1345  * we want. Do a brute-force search to see how many of the
1346  * entries satisfy the comparison condition, and take that
1347  * fraction as our estimate. (This is identical to the inner loop
1348  * of histogram_selectivity; maybe share code?)
1349  */
1350  LOCAL_FCINFO(fcinfo, 2);
1351  int nmatch = 0;
1352 
1353  InitFunctionCallInfoData(*fcinfo, opproc, 2, collation,
1354  NULL, NULL);
1355  fcinfo->args[0].isnull = false;
1356  fcinfo->args[1].isnull = false;
1357  fcinfo->args[1].value = constval;
1358  for (int i = 0; i < sslot.nvalues; i++)
1359  {
1360  Datum fresult;
1361 
1362  fcinfo->args[0].value = sslot.values[i];
1363  fcinfo->isnull = false;
1364  fresult = FunctionCallInvoke(fcinfo);
1365  if (!fcinfo->isnull && DatumGetBool(fresult))
1366  nmatch++;
1367  }
1368  hist_selec = ((double) nmatch) / ((double) sslot.nvalues);
1369 
1370  /*
1371  * As above, clamp to a hundredth of the histogram resolution.
1372  * This case is surely even less trustworthy than the normal one,
1373  * so we shouldn't believe exact 0 or 1 selectivity. (Maybe the
1374  * clamp should be more restrictive in this case?)
1375  */
1376  {
1377  double cutoff = 0.01 / (double) (sslot.nvalues - 1);
1378 
1379  if (hist_selec < cutoff)
1380  hist_selec = cutoff;
1381  else if (hist_selec > 1.0 - cutoff)
1382  hist_selec = 1.0 - cutoff;
1383  }
1384  }
1385 
1386  free_attstatsslot(&sslot);
1387  }
1388 
1389  return hist_selec;
1390 }
1391 
1392 /*
1393  * Common wrapper function for the selectivity estimators that simply
1394  * invoke scalarineqsel().
1395  */
1396 static Datum
1398 {
1399  PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
1400  Oid operator = PG_GETARG_OID(1);
1401  List *args = (List *) PG_GETARG_POINTER(2);
1402  int varRelid = PG_GETARG_INT32(3);
1403  Oid collation = PG_GET_COLLATION();
1404  VariableStatData vardata;
1405  Node *other;
1406  bool varonleft;
1407  Datum constval;
1408  Oid consttype;
1409  double selec;
1410 
1411  /*
1412  * If expression is not variable op something or something op variable,
1413  * then punt and return a default estimate.
1414  */
1415  if (!get_restriction_variable(root, args, varRelid,
1416  &vardata, &other, &varonleft))
1418 
1419  /*
1420  * Can't do anything useful if the something is not a constant, either.
1421  */
1422  if (!IsA(other, Const))
1423  {
1424  ReleaseVariableStats(vardata);
1426  }
1427 
1428  /*
1429  * If the constant is NULL, assume operator is strict and return zero, ie,
1430  * operator will never return TRUE.
1431  */
1432  if (((Const *) other)->constisnull)
1433  {
1434  ReleaseVariableStats(vardata);
1435  PG_RETURN_FLOAT8(0.0);
1436  }
1437  constval = ((Const *) other)->constvalue;
1438  consttype = ((Const *) other)->consttype;
1439 
1440  /*
1441  * Force the var to be on the left to simplify logic in scalarineqsel.
1442  */
1443  if (!varonleft)
1444  {
1445  operator = get_commutator(operator);
1446  if (!operator)
1447  {
1448  /* Use default selectivity (should we raise an error instead?) */
1449  ReleaseVariableStats(vardata);
1451  }
1452  isgt = !isgt;
1453  }
1454 
1455  /* The rest of the work is done by scalarineqsel(). */
1456  selec = scalarineqsel(root, operator, isgt, iseq, collation,
1457  &vardata, constval, consttype);
1458 
1459  ReleaseVariableStats(vardata);
1460 
1461  PG_RETURN_FLOAT8((float8) selec);
1462 }
1463 
1464 /*
1465  * scalarltsel - Selectivity of "<" for scalars.
1466  */
1467 Datum
1469 {
1470  return scalarineqsel_wrapper(fcinfo, false, false);
1471 }
1472 
1473 /*
1474  * scalarlesel - Selectivity of "<=" for scalars.
1475  */
1476 Datum
1478 {
1479  return scalarineqsel_wrapper(fcinfo, false, true);
1480 }
1481 
1482 /*
1483  * scalargtsel - Selectivity of ">" for scalars.
1484  */
1485 Datum
1487 {
1488  return scalarineqsel_wrapper(fcinfo, true, false);
1489 }
1490 
1491 /*
1492  * scalargesel - Selectivity of ">=" for scalars.
1493  */
1494 Datum
1496 {
1497  return scalarineqsel_wrapper(fcinfo, true, true);
1498 }
1499 
1500 /*
1501  * boolvarsel - Selectivity of Boolean variable.
1502  *
1503  * This can actually be called on any boolean-valued expression. If it
1504  * involves only Vars of the specified relation, and if there are statistics
1505  * about the Var or expression (the latter is possible if it's indexed) then
1506  * we'll produce a real estimate; otherwise it's just a default.
1507  */
1509 boolvarsel(PlannerInfo *root, Node *arg, int varRelid)
1510 {
1511  VariableStatData vardata;
1512  double selec;
1513 
1514  examine_variable(root, arg, varRelid, &vardata);
1515  if (HeapTupleIsValid(vardata.statsTuple))
1516  {
1517  /*
1518  * A boolean variable V is equivalent to the clause V = 't', so we
1519  * compute the selectivity as if that is what we have.
1520  */
1521  selec = var_eq_const(&vardata, BooleanEqualOperator, InvalidOid,
1522  BoolGetDatum(true), false, true, false);
1523  }
1524  else
1525  {
1526  /* Otherwise, the default estimate is 0.5 */
1527  selec = 0.5;
1528  }
1529  ReleaseVariableStats(vardata);
1530  return selec;
1531 }
1532 
1533 /*
1534  * booltestsel - Selectivity of BooleanTest Node.
1535  */
1538  int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
1539 {
1540  VariableStatData vardata;
1541  double selec;
1542 
1543  examine_variable(root, arg, varRelid, &vardata);
1544 
1545  if (HeapTupleIsValid(vardata.statsTuple))
1546  {
1547  Form_pg_statistic stats;
1548  double freq_null;
1549  AttStatsSlot sslot;
1550 
1551  stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
1552  freq_null = stats->stanullfrac;
1553 
1554  if (get_attstatsslot(&sslot, vardata.statsTuple,
1555  STATISTIC_KIND_MCV, InvalidOid,
1557  && sslot.nnumbers > 0)
1558  {
1559  double freq_true;
1560  double freq_false;
1561 
1562  /*
1563  * Get first MCV frequency and derive frequency for true.
1564  */
1565  if (DatumGetBool(sslot.values[0]))
1566  freq_true = sslot.numbers[0];
1567  else
1568  freq_true = 1.0 - sslot.numbers[0] - freq_null;
1569 
1570  /*
1571  * Next derive frequency for false. Then use these as appropriate
1572  * to derive frequency for each case.
1573  */
1574  freq_false = 1.0 - freq_true - freq_null;
1575 
1576  switch (booltesttype)
1577  {
1578  case IS_UNKNOWN:
1579  /* select only NULL values */
1580  selec = freq_null;
1581  break;
1582  case IS_NOT_UNKNOWN:
1583  /* select non-NULL values */
1584  selec = 1.0 - freq_null;
1585  break;
1586  case IS_TRUE:
1587  /* select only TRUE values */
1588  selec = freq_true;
1589  break;
1590  case IS_NOT_TRUE:
1591  /* select non-TRUE values */
1592  selec = 1.0 - freq_true;
1593  break;
1594  case IS_FALSE:
1595  /* select only FALSE values */
1596  selec = freq_false;
1597  break;
1598  case IS_NOT_FALSE:
1599  /* select non-FALSE values */
1600  selec = 1.0 - freq_false;
1601  break;
1602  default:
1603  elog(ERROR, "unrecognized booltesttype: %d",
1604  (int) booltesttype);
1605  selec = 0.0; /* Keep compiler quiet */
1606  break;
1607  }
1608 
1609  free_attstatsslot(&sslot);
1610  }
1611  else
1612  {
1613  /*
1614  * No most-common-value info available. Still have null fraction
1615  * information, so use it for IS [NOT] UNKNOWN. Otherwise adjust
1616  * for null fraction and assume a 50-50 split of TRUE and FALSE.
1617  */
1618  switch (booltesttype)
1619  {
1620  case IS_UNKNOWN:
1621  /* select only NULL values */
1622  selec = freq_null;
1623  break;
1624  case IS_NOT_UNKNOWN:
1625  /* select non-NULL values */
1626  selec = 1.0 - freq_null;
1627  break;
1628  case IS_TRUE:
1629  case IS_FALSE:
1630  /* Assume we select half of the non-NULL values */
1631  selec = (1.0 - freq_null) / 2.0;
1632  break;
1633  case IS_NOT_TRUE:
1634  case IS_NOT_FALSE:
1635  /* Assume we select NULLs plus half of the non-NULLs */
1636  /* equiv. to freq_null + (1.0 - freq_null) / 2.0 */
1637  selec = (freq_null + 1.0) / 2.0;
1638  break;
1639  default:
1640  elog(ERROR, "unrecognized booltesttype: %d",
1641  (int) booltesttype);
1642  selec = 0.0; /* Keep compiler quiet */
1643  break;
1644  }
1645  }
1646  }
1647  else
1648  {
1649  /*
1650  * If we can't get variable statistics for the argument, perhaps
1651  * clause_selectivity can do something with it. We ignore the
1652  * possibility of a NULL value when using clause_selectivity, and just
1653  * assume the value is either TRUE or FALSE.
1654  */
1655  switch (booltesttype)
1656  {
1657  case IS_UNKNOWN:
1658  selec = DEFAULT_UNK_SEL;
1659  break;
1660  case IS_NOT_UNKNOWN:
1661  selec = DEFAULT_NOT_UNK_SEL;
1662  break;
1663  case IS_TRUE:
1664  case IS_NOT_FALSE:
1665  selec = (double) clause_selectivity(root, arg,
1666  varRelid,
1667  jointype, sjinfo);
1668  break;
1669  case IS_FALSE:
1670  case IS_NOT_TRUE:
1671  selec = 1.0 - (double) clause_selectivity(root, arg,
1672  varRelid,
1673  jointype, sjinfo);
1674  break;
1675  default:
1676  elog(ERROR, "unrecognized booltesttype: %d",
1677  (int) booltesttype);
1678  selec = 0.0; /* Keep compiler quiet */
1679  break;
1680  }
1681  }
1682 
1683  ReleaseVariableStats(vardata);
1684 
1685  /* result should be in range, but make sure... */
1686  CLAMP_PROBABILITY(selec);
1687 
1688  return (Selectivity) selec;
1689 }
1690 
1691 /*
1692  * nulltestsel - Selectivity of NullTest Node.
1693  */
1696  int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
1697 {
1698  VariableStatData vardata;
1699  double selec;
1700 
1701  examine_variable(root, arg, varRelid, &vardata);
1702 
1703  if (HeapTupleIsValid(vardata.statsTuple))
1704  {
1705  Form_pg_statistic stats;
1706  double freq_null;
1707 
1708  stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
1709  freq_null = stats->stanullfrac;
1710 
1711  switch (nulltesttype)
1712  {
1713  case IS_NULL:
1714 
1715  /*
1716  * Use freq_null directly.
1717  */
1718  selec = freq_null;
1719  break;
1720  case IS_NOT_NULL:
1721 
1722  /*
1723  * Select not unknown (not null) values. Calculate from
1724  * freq_null.
1725  */
1726  selec = 1.0 - freq_null;
1727  break;
1728  default:
1729  elog(ERROR, "unrecognized nulltesttype: %d",
1730  (int) nulltesttype);
1731  return (Selectivity) 0; /* keep compiler quiet */
1732  }
1733  }
1734  else if (vardata.var && IsA(vardata.var, Var) &&
1735  ((Var *) vardata.var)->varattno < 0)
1736  {
1737  /*
1738  * There are no stats for system columns, but we know they are never
1739  * NULL.
1740  */
1741  selec = (nulltesttype == IS_NULL) ? 0.0 : 1.0;
1742  }
1743  else
1744  {
1745  /*
1746  * No ANALYZE stats available, so make a guess
1747  */
1748  switch (nulltesttype)
1749  {
1750  case IS_NULL:
1751  selec = DEFAULT_UNK_SEL;
1752  break;
1753  case IS_NOT_NULL:
1754  selec = DEFAULT_NOT_UNK_SEL;
1755  break;
1756  default:
1757  elog(ERROR, "unrecognized nulltesttype: %d",
1758  (int) nulltesttype);
1759  return (Selectivity) 0; /* keep compiler quiet */
1760  }
1761  }
1762 
1763  ReleaseVariableStats(vardata);
1764 
1765  /* result should be in range, but make sure... */
1766  CLAMP_PROBABILITY(selec);
1767 
1768  return (Selectivity) selec;
1769 }
1770 
1771 /*
1772  * strip_array_coercion - strip binary-compatible relabeling from an array expr
1773  *
1774  * For array values, the parser normally generates ArrayCoerceExpr conversions,
1775  * but it seems possible that RelabelType might show up. Also, the planner
1776  * is not currently tense about collapsing stacked ArrayCoerceExpr nodes,
1777  * so we need to be ready to deal with more than one level.
1778  */
1779 static Node *
1781 {
1782  for (;;)
1783  {
1784  if (node && IsA(node, ArrayCoerceExpr))
1785  {
1786  ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
1787 
1788  /*
1789  * If the per-element expression is just a RelabelType on top of
1790  * CaseTestExpr, then we know it's a binary-compatible relabeling.
1791  */
1792  if (IsA(acoerce->elemexpr, RelabelType) &&
1793  IsA(((RelabelType *) acoerce->elemexpr)->arg, CaseTestExpr))
1794  node = (Node *) acoerce->arg;
1795  else
1796  break;
1797  }
1798  else if (node && IsA(node, RelabelType))
1799  {
1800  /* We don't really expect this case, but may as well cope */
1801  node = (Node *) ((RelabelType *) node)->arg;
1802  }
1803  else
1804  break;
1805  }
1806  return node;
1807 }
1808 
1809 /*
1810  * scalararraysel - Selectivity of ScalarArrayOpExpr Node.
1811  */
1814  ScalarArrayOpExpr *clause,
1815  bool is_join_clause,
1816  int varRelid,
1817  JoinType jointype,
1818  SpecialJoinInfo *sjinfo)
1819 {
1820  Oid operator = clause->opno;
1821  bool useOr = clause->useOr;
1822  bool isEquality = false;
1823  bool isInequality = false;
1824  Node *leftop;
1825  Node *rightop;
1826  Oid nominal_element_type;
1827  Oid nominal_element_collation;
1828  TypeCacheEntry *typentry;
1829  RegProcedure oprsel;
1830  FmgrInfo oprselproc;
1831  Selectivity s1;
1832  Selectivity s1disjoint;
1833 
1834  /* First, deconstruct the expression */
1835  Assert(list_length(clause->args) == 2);
1836  leftop = (Node *) linitial(clause->args);
1837  rightop = (Node *) lsecond(clause->args);
1838 
1839  /* aggressively reduce both sides to constants */
1840  leftop = estimate_expression_value(root, leftop);
1841  rightop = estimate_expression_value(root, rightop);
1842 
1843  /* get nominal (after relabeling) element type of rightop */
1844  nominal_element_type = get_base_element_type(exprType(rightop));
1845  if (!OidIsValid(nominal_element_type))
1846  return (Selectivity) 0.5; /* probably shouldn't happen */
1847  /* get nominal collation, too, for generating constants */
1848  nominal_element_collation = exprCollation(rightop);
1849 
1850  /* look through any binary-compatible relabeling of rightop */
1851  rightop = strip_array_coercion(rightop);
1852 
1853  /*
1854  * Detect whether the operator is the default equality or inequality
1855  * operator of the array element type.
1856  */
1857  typentry = lookup_type_cache(nominal_element_type, TYPECACHE_EQ_OPR);
1858  if (OidIsValid(typentry->eq_opr))
1859  {
1860  if (operator == typentry->eq_opr)
1861  isEquality = true;
1862  else if (get_negator(operator) == typentry->eq_opr)
1863  isInequality = true;
1864  }
1865 
1866  /*
1867  * If it is equality or inequality, we might be able to estimate this as a
1868  * form of array containment; for instance "const = ANY(column)" can be
1869  * treated as "ARRAY[const] <@ column". scalararraysel_containment tries
1870  * that, and returns the selectivity estimate if successful, or -1 if not.
1871  */
1872  if ((isEquality || isInequality) && !is_join_clause)
1873  {
1874  s1 = scalararraysel_containment(root, leftop, rightop,
1875  nominal_element_type,
1876  isEquality, useOr, varRelid);
1877  if (s1 >= 0.0)
1878  return s1;
1879  }
1880 
1881  /*
1882  * Look up the underlying operator's selectivity estimator. Punt if it
1883  * hasn't got one.
1884  */
1885  if (is_join_clause)
1886  oprsel = get_oprjoin(operator);
1887  else
1888  oprsel = get_oprrest(operator);
1889  if (!oprsel)
1890  return (Selectivity) 0.5;
1891  fmgr_info(oprsel, &oprselproc);
1892 
1893  /*
1894  * In the array-containment check above, we must only believe that an
1895  * operator is equality or inequality if it is the default btree equality
1896  * operator (or its negator) for the element type, since those are the
1897  * operators that array containment will use. But in what follows, we can
1898  * be a little laxer, and also believe that any operators using eqsel() or
1899  * neqsel() as selectivity estimator act like equality or inequality.
1900  */
1901  if (oprsel == F_EQSEL || oprsel == F_EQJOINSEL)
1902  isEquality = true;
1903  else if (oprsel == F_NEQSEL || oprsel == F_NEQJOINSEL)
1904  isInequality = true;
1905 
1906  /*
1907  * We consider three cases:
1908  *
1909  * 1. rightop is an Array constant: deconstruct the array, apply the
1910  * operator's selectivity function for each array element, and merge the
1911  * results in the same way that clausesel.c does for AND/OR combinations.
1912  *
1913  * 2. rightop is an ARRAY[] construct: apply the operator's selectivity
1914  * function for each element of the ARRAY[] construct, and merge.
1915  *
1916  * 3. otherwise, make a guess ...
1917  */
1918  if (rightop && IsA(rightop, Const))
1919  {
1920  Datum arraydatum = ((Const *) rightop)->constvalue;
1921  bool arrayisnull = ((Const *) rightop)->constisnull;
1922  ArrayType *arrayval;
1923  int16 elmlen;
1924  bool elmbyval;
1925  char elmalign;
1926  int num_elems;
1927  Datum *elem_values;
1928  bool *elem_nulls;
1929  int i;
1930 
1931  if (arrayisnull) /* qual can't succeed if null array */
1932  return (Selectivity) 0.0;
1933  arrayval = DatumGetArrayTypeP(arraydatum);
1935  &elmlen, &elmbyval, &elmalign);
1936  deconstruct_array(arrayval,
1937  ARR_ELEMTYPE(arrayval),
1938  elmlen, elmbyval, elmalign,
1939  &elem_values, &elem_nulls, &num_elems);
1940 
1941  /*
1942  * For generic operators, we assume the probability of success is
1943  * independent for each array element. But for "= ANY" or "<> ALL",
1944  * if the array elements are distinct (which'd typically be the case)
1945  * then the probabilities are disjoint, and we should just sum them.
1946  *
1947  * If we were being really tense we would try to confirm that the
1948  * elements are all distinct, but that would be expensive and it
1949  * doesn't seem to be worth the cycles; it would amount to penalizing
1950  * well-written queries in favor of poorly-written ones. However, we
1951  * do protect ourselves a little bit by checking whether the
1952  * disjointness assumption leads to an impossible (out of range)
1953  * probability; if so, we fall back to the normal calculation.
1954  */
1955  s1 = s1disjoint = (useOr ? 0.0 : 1.0);
1956 
1957  for (i = 0; i < num_elems; i++)
1958  {
1959  List *args;
1960  Selectivity s2;
1961 
1962  args = list_make2(leftop,
1963  makeConst(nominal_element_type,
1964  -1,
1965  nominal_element_collation,
1966  elmlen,
1967  elem_values[i],
1968  elem_nulls[i],
1969  elmbyval));
1970  if (is_join_clause)
1971  s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
1972  clause->inputcollid,
1973  PointerGetDatum(root),
1974  ObjectIdGetDatum(operator),
1975  PointerGetDatum(args),
1976  Int16GetDatum(jointype),
1977  PointerGetDatum(sjinfo)));
1978  else
1979  s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
1980  clause->inputcollid,
1981  PointerGetDatum(root),
1982  ObjectIdGetDatum(operator),
1983  PointerGetDatum(args),
1984  Int32GetDatum(varRelid)));
1985 
1986  if (useOr)
1987  {
1988  s1 = s1 + s2 - s1 * s2;
1989  if (isEquality)
1990  s1disjoint += s2;
1991  }
1992  else
1993  {
1994  s1 = s1 * s2;
1995  if (isInequality)
1996  s1disjoint += s2 - 1.0;
1997  }
1998  }
1999 
2000  /* accept disjoint-probability estimate if in range */
2001  if ((useOr ? isEquality : isInequality) &&
2002  s1disjoint >= 0.0 && s1disjoint <= 1.0)
2003  s1 = s1disjoint;
2004  }
2005  else if (rightop && IsA(rightop, ArrayExpr) &&
2006  !((ArrayExpr *) rightop)->multidims)
2007  {
2008  ArrayExpr *arrayexpr = (ArrayExpr *) rightop;
2009  int16 elmlen;
2010  bool elmbyval;
2011  ListCell *l;
2012 
2013  get_typlenbyval(arrayexpr->element_typeid,
2014  &elmlen, &elmbyval);
2015 
2016  /*
2017  * We use the assumption of disjoint probabilities here too, although
2018  * the odds of equal array elements are rather higher if the elements
2019  * are not all constants (which they won't be, else constant folding
2020  * would have reduced the ArrayExpr to a Const). In this path it's
2021  * critical to have the sanity check on the s1disjoint estimate.
2022  */
2023  s1 = s1disjoint = (useOr ? 0.0 : 1.0);
2024 
2025  foreach(l, arrayexpr->elements)
2026  {
2027  Node *elem = (Node *) lfirst(l);
2028  List *args;
2029  Selectivity s2;
2030 
2031  /*
2032  * Theoretically, if elem isn't of nominal_element_type we should
2033  * insert a RelabelType, but it seems unlikely that any operator
2034  * estimation function would really care ...
2035  */
2036  args = list_make2(leftop, elem);
2037  if (is_join_clause)
2038  s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
2039  clause->inputcollid,
2040  PointerGetDatum(root),
2041  ObjectIdGetDatum(operator),
2042  PointerGetDatum(args),
2043  Int16GetDatum(jointype),
2044  PointerGetDatum(sjinfo)));
2045  else
2046  s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
2047  clause->inputcollid,
2048  PointerGetDatum(root),
2049  ObjectIdGetDatum(operator),
2050  PointerGetDatum(args),
2051  Int32GetDatum(varRelid)));
2052 
2053  if (useOr)
2054  {
2055  s1 = s1 + s2 - s1 * s2;
2056  if (isEquality)
2057  s1disjoint += s2;
2058  }
2059  else
2060  {
2061  s1 = s1 * s2;
2062  if (isInequality)
2063  s1disjoint += s2 - 1.0;
2064  }
2065  }
2066 
2067  /* accept disjoint-probability estimate if in range */
2068  if ((useOr ? isEquality : isInequality) &&
2069  s1disjoint >= 0.0 && s1disjoint <= 1.0)
2070  s1 = s1disjoint;
2071  }
2072  else
2073  {
2074  CaseTestExpr *dummyexpr;
2075  List *args;
2076  Selectivity s2;
2077  int i;
2078 
2079  /*
2080  * We need a dummy rightop to pass to the operator selectivity
2081  * routine. It can be pretty much anything that doesn't look like a
2082  * constant; CaseTestExpr is a convenient choice.
2083  */
2084  dummyexpr = makeNode(CaseTestExpr);
2085  dummyexpr->typeId = nominal_element_type;
2086  dummyexpr->typeMod = -1;
2087  dummyexpr->collation = clause->inputcollid;
2088  args = list_make2(leftop, dummyexpr);
2089  if (is_join_clause)
2090  s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
2091  clause->inputcollid,
2092  PointerGetDatum(root),
2093  ObjectIdGetDatum(operator),
2094  PointerGetDatum(args),
2095  Int16GetDatum(jointype),
2096  PointerGetDatum(sjinfo)));
2097  else
2098  s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
2099  clause->inputcollid,
2100  PointerGetDatum(root),
2101  ObjectIdGetDatum(operator),
2102  PointerGetDatum(args),
2103  Int32GetDatum(varRelid)));
2104  s1 = useOr ? 0.0 : 1.0;
2105 
2106  /*
2107  * Arbitrarily assume 10 elements in the eventual array value (see
2108  * also estimate_array_length). We don't risk an assumption of
2109  * disjoint probabilities here.
2110  */
2111  for (i = 0; i < 10; i++)
2112  {
2113  if (useOr)
2114  s1 = s1 + s2 - s1 * s2;
2115  else
2116  s1 = s1 * s2;
2117  }
2118  }
2119 
2120  /* result should be in range, but make sure... */
2121  CLAMP_PROBABILITY(s1);
2122 
2123  return s1;
2124 }
2125 
2126 /*
2127  * Estimate number of elements in the array yielded by an expression.
2128  *
2129  * It's important that this agree with scalararraysel.
2130  */
2131 int
2133 {
2134  /* look through any binary-compatible relabeling of arrayexpr */
2135  arrayexpr = strip_array_coercion(arrayexpr);
2136 
2137  if (arrayexpr && IsA(arrayexpr, Const))
2138  {
2139  Datum arraydatum = ((Const *) arrayexpr)->constvalue;
2140  bool arrayisnull = ((Const *) arrayexpr)->constisnull;
2141  ArrayType *arrayval;
2142 
2143  if (arrayisnull)
2144  return 0;
2145  arrayval = DatumGetArrayTypeP(arraydatum);
2146  return ArrayGetNItems(ARR_NDIM(arrayval), ARR_DIMS(arrayval));
2147  }
2148  else if (arrayexpr && IsA(arrayexpr, ArrayExpr) &&
2149  !((ArrayExpr *) arrayexpr)->multidims)
2150  {
2151  return list_length(((ArrayExpr *) arrayexpr)->elements);
2152  }
2153  else
2154  {
2155  /* default guess --- see also scalararraysel */
2156  return 10;
2157  }
2158 }
2159 
2160 /*
2161  * rowcomparesel - Selectivity of RowCompareExpr Node.
2162  *
2163  * We estimate RowCompare selectivity by considering just the first (high
2164  * order) columns, which makes it equivalent to an ordinary OpExpr. While
2165  * this estimate could be refined by considering additional columns, it
2166  * seems unlikely that we could do a lot better without multi-column
2167  * statistics.
2168  */
2171  RowCompareExpr *clause,
2172  int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
2173 {
2174  Selectivity s1;
2175  Oid opno = linitial_oid(clause->opnos);
2176  Oid inputcollid = linitial_oid(clause->inputcollids);
2177  List *opargs;
2178  bool is_join_clause;
2179 
2180  /* Build equivalent arg list for single operator */
2181  opargs = list_make2(linitial(clause->largs), linitial(clause->rargs));
2182 
2183  /*
2184  * Decide if it's a join clause. This should match clausesel.c's
2185  * treat_as_join_clause(), except that we intentionally consider only the
2186  * leading columns and not the rest of the clause.
2187  */
2188  if (varRelid != 0)
2189  {
2190  /*
2191  * Caller is forcing restriction mode (eg, because we are examining an
2192  * inner indexscan qual).
2193  */
2194  is_join_clause = false;
2195  }
2196  else if (sjinfo == NULL)
2197  {
2198  /*
2199  * It must be a restriction clause, since it's being evaluated at a
2200  * scan node.
2201  */
2202  is_join_clause = false;
2203  }
2204  else
2205  {
2206  /*
2207  * Otherwise, it's a join if there's more than one relation used.
2208  */
2209  is_join_clause = (NumRelids((Node *) opargs) > 1);
2210  }
2211 
2212  if (is_join_clause)
2213  {
2214  /* Estimate selectivity for a join clause. */
2215  s1 = join_selectivity(root, opno,
2216  opargs,
2217  inputcollid,
2218  jointype,
2219  sjinfo);
2220  }
2221  else
2222  {
2223  /* Estimate selectivity for a restriction clause. */
2224  s1 = restriction_selectivity(root, opno,
2225  opargs,
2226  inputcollid,
2227  varRelid);
2228  }
2229 
2230  return s1;
2231 }
2232 
2233 /*
2234  * eqjoinsel - Join selectivity of "="
2235  */
2236 Datum
2238 {
2239  PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
2240  Oid operator = PG_GETARG_OID(1);
2241  List *args = (List *) PG_GETARG_POINTER(2);
2242 
2243 #ifdef NOT_USED
2244  JoinType jointype = (JoinType) PG_GETARG_INT16(3);
2245 #endif
2247  Oid collation = PG_GET_COLLATION();
2248  double selec;
2249  double selec_inner;
2250  VariableStatData vardata1;
2251  VariableStatData vardata2;
2252  double nd1;
2253  double nd2;
2254  bool isdefault1;
2255  bool isdefault2;
2256  Oid opfuncoid;
2257  AttStatsSlot sslot1;
2258  AttStatsSlot sslot2;
2259  Form_pg_statistic stats1 = NULL;
2260  Form_pg_statistic stats2 = NULL;
2261  bool have_mcvs1 = false;
2262  bool have_mcvs2 = false;
2263  bool join_is_reversed;
2264  RelOptInfo *inner_rel;
2265 
2266  get_join_variables(root, args, sjinfo,
2267  &vardata1, &vardata2, &join_is_reversed);
2268 
2269  nd1 = get_variable_numdistinct(&vardata1, &isdefault1);
2270  nd2 = get_variable_numdistinct(&vardata2, &isdefault2);
2271 
2272  opfuncoid = get_opcode(operator);
2273 
2274  memset(&sslot1, 0, sizeof(sslot1));
2275  memset(&sslot2, 0, sizeof(sslot2));
2276 
2277  if (HeapTupleIsValid(vardata1.statsTuple))
2278  {
2279  /* note we allow use of nullfrac regardless of security check */
2280  stats1 = (Form_pg_statistic) GETSTRUCT(vardata1.statsTuple);
2281  if (statistic_proc_security_check(&vardata1, opfuncoid))
2282  have_mcvs1 = get_attstatsslot(&sslot1, vardata1.statsTuple,
2283  STATISTIC_KIND_MCV, InvalidOid,
2285  }
2286 
2287  if (HeapTupleIsValid(vardata2.statsTuple))
2288  {
2289  /* note we allow use of nullfrac regardless of security check */
2290  stats2 = (Form_pg_statistic) GETSTRUCT(vardata2.statsTuple);
2291  if (statistic_proc_security_check(&vardata2, opfuncoid))
2292  have_mcvs2 = get_attstatsslot(&sslot2, vardata2.statsTuple,
2293  STATISTIC_KIND_MCV, InvalidOid,
2295  }
2296 
2297  /* We need to compute the inner-join selectivity in all cases */
2298  selec_inner = eqjoinsel_inner(opfuncoid, collation,
2299  &vardata1, &vardata2,
2300  nd1, nd2,
2301  isdefault1, isdefault2,
2302  &sslot1, &sslot2,
2303  stats1, stats2,
2304  have_mcvs1, have_mcvs2);
2305 
2306  switch (sjinfo->jointype)
2307  {
2308  case JOIN_INNER:
2309  case JOIN_LEFT:
2310  case JOIN_FULL:
2311  selec = selec_inner;
2312  break;
2313  case JOIN_SEMI:
2314  case JOIN_ANTI:
2315 
2316  /*
2317  * Look up the join's inner relation. min_righthand is sufficient
2318  * information because neither SEMI nor ANTI joins permit any
2319  * reassociation into or out of their RHS, so the righthand will
2320  * always be exactly that set of rels.
2321  */
2322  inner_rel = find_join_input_rel(root, sjinfo->min_righthand);
2323 
2324  if (!join_is_reversed)
2325  selec = eqjoinsel_semi(opfuncoid, collation,
2326  &vardata1, &vardata2,
2327  nd1, nd2,
2328  isdefault1, isdefault2,
2329  &sslot1, &sslot2,
2330  stats1, stats2,
2331  have_mcvs1, have_mcvs2,
2332  inner_rel);
2333  else
2334  {
2335  Oid commop = get_commutator(operator);
2336  Oid commopfuncoid = OidIsValid(commop) ? get_opcode(commop) : InvalidOid;
2337 
2338  selec = eqjoinsel_semi(commopfuncoid, collation,
2339  &vardata2, &vardata1,
2340  nd2, nd1,
2341  isdefault2, isdefault1,
2342  &sslot2, &sslot1,
2343  stats2, stats1,
2344  have_mcvs2, have_mcvs1,
2345  inner_rel);
2346  }
2347 
2348  /*
2349  * We should never estimate the output of a semijoin to be more
2350  * rows than we estimate for an inner join with the same input
2351  * rels and join condition; it's obviously impossible for that to
2352  * happen. The former estimate is N1 * Ssemi while the latter is
2353  * N1 * N2 * Sinner, so we may clamp Ssemi <= N2 * Sinner. Doing
2354  * this is worthwhile because of the shakier estimation rules we
2355  * use in eqjoinsel_semi, particularly in cases where it has to
2356  * punt entirely.
2357  */
2358  selec = Min(selec, inner_rel->rows * selec_inner);
2359  break;
2360  default:
2361  /* other values not expected here */
2362  elog(ERROR, "unrecognized join type: %d",
2363  (int) sjinfo->jointype);
2364  selec = 0; /* keep compiler quiet */
2365  break;
2366  }
2367 
2368  free_attstatsslot(&sslot1);
2369  free_attstatsslot(&sslot2);
2370 
2371  ReleaseVariableStats(vardata1);
2372  ReleaseVariableStats(vardata2);
2373 
2374  CLAMP_PROBABILITY(selec);
2375 
2376  PG_RETURN_FLOAT8((float8) selec);
2377 }
2378 
2379 /*
2380  * eqjoinsel_inner --- eqjoinsel for normal inner join
2381  *
2382  * We also use this for LEFT/FULL outer joins; it's not presently clear
2383  * that it's worth trying to distinguish them here.
2384  */
2385 static double
2386 eqjoinsel_inner(Oid opfuncoid, Oid collation,
2387  VariableStatData *vardata1, VariableStatData *vardata2,
2388  double nd1, double nd2,
2389  bool isdefault1, bool isdefault2,
2390  AttStatsSlot *sslot1, AttStatsSlot *sslot2,
2391  Form_pg_statistic stats1, Form_pg_statistic stats2,
2392  bool have_mcvs1, bool have_mcvs2)
2393 {
2394  double selec;
2395 
2396  if (have_mcvs1 && have_mcvs2)
2397  {
2398  /*
2399  * We have most-common-value lists for both relations. Run through
2400  * the lists to see which MCVs actually join to each other with the
2401  * given operator. This allows us to determine the exact join
2402  * selectivity for the portion of the relations represented by the MCV
2403  * lists. We still have to estimate for the remaining population, but
2404  * in a skewed distribution this gives us a big leg up in accuracy.
2405  * For motivation see the analysis in Y. Ioannidis and S.
2406  * Christodoulakis, "On the propagation of errors in the size of join
2407  * results", Technical Report 1018, Computer Science Dept., University
2408  * of Wisconsin, Madison, March 1991 (available from ftp.cs.wisc.edu).
2409  */
2410  LOCAL_FCINFO(fcinfo, 2);
2411  FmgrInfo eqproc;
2412  bool *hasmatch1;
2413  bool *hasmatch2;
2414  double nullfrac1 = stats1->stanullfrac;
2415  double nullfrac2 = stats2->stanullfrac;
2416  double matchprodfreq,
2417  matchfreq1,
2418  matchfreq2,
2419  unmatchfreq1,
2420  unmatchfreq2,
2421  otherfreq1,
2422  otherfreq2,
2423  totalsel1,
2424  totalsel2;
2425  int i,
2426  nmatches;
2427 
2428  fmgr_info(opfuncoid, &eqproc);
2429 
2430  /*
2431  * Save a few cycles by setting up the fcinfo struct just once. Using
2432  * FunctionCallInvoke directly also avoids failure if the eqproc
2433  * returns NULL, though really equality functions should never do
2434  * that.
2435  */
2436  InitFunctionCallInfoData(*fcinfo, &eqproc, 2, collation,
2437  NULL, NULL);
2438  fcinfo->args[0].isnull = false;
2439  fcinfo->args[1].isnull = false;
2440 
2441  hasmatch1 = (bool *) palloc0(sslot1->nvalues * sizeof(bool));
2442  hasmatch2 = (bool *) palloc0(sslot2->nvalues * sizeof(bool));
2443 
2444  /*
2445  * Note we assume that each MCV will match at most one member of the
2446  * other MCV list. If the operator isn't really equality, there could
2447  * be multiple matches --- but we don't look for them, both for speed
2448  * and because the math wouldn't add up...
2449  */
2450  matchprodfreq = 0.0;
2451  nmatches = 0;
2452  for (i = 0; i < sslot1->nvalues; i++)
2453  {
2454  int j;
2455 
2456  fcinfo->args[0].value = sslot1->values[i];
2457 
2458  for (j = 0; j < sslot2->nvalues; j++)
2459  {
2460  Datum fresult;
2461 
2462  if (hasmatch2[j])
2463  continue;
2464  fcinfo->args[1].value = sslot2->values[j];
2465  fcinfo->isnull = false;
2466  fresult = FunctionCallInvoke(fcinfo);
2467  if (!fcinfo->isnull && DatumGetBool(fresult))
2468  {
2469  hasmatch1[i] = hasmatch2[j] = true;
2470  matchprodfreq += sslot1->numbers[i] * sslot2->numbers[j];
2471  nmatches++;
2472  break;
2473  }
2474  }
2475  }
2476  CLAMP_PROBABILITY(matchprodfreq);
2477  /* Sum up frequencies of matched and unmatched MCVs */
2478  matchfreq1 = unmatchfreq1 = 0.0;
2479  for (i = 0; i < sslot1->nvalues; i++)
2480  {
2481  if (hasmatch1[i])
2482  matchfreq1 += sslot1->numbers[i];
2483  else
2484  unmatchfreq1 += sslot1->numbers[i];
2485  }
2486  CLAMP_PROBABILITY(matchfreq1);
2487  CLAMP_PROBABILITY(unmatchfreq1);
2488  matchfreq2 = unmatchfreq2 = 0.0;
2489  for (i = 0; i < sslot2->nvalues; i++)
2490  {
2491  if (hasmatch2[i])
2492  matchfreq2 += sslot2->numbers[i];
2493  else
2494  unmatchfreq2 += sslot2->numbers[i];
2495  }
2496  CLAMP_PROBABILITY(matchfreq2);
2497  CLAMP_PROBABILITY(unmatchfreq2);
2498  pfree(hasmatch1);
2499  pfree(hasmatch2);
2500 
2501  /*
2502  * Compute total frequency of non-null values that are not in the MCV
2503  * lists.
2504  */
2505  otherfreq1 = 1.0 - nullfrac1 - matchfreq1 - unmatchfreq1;
2506  otherfreq2 = 1.0 - nullfrac2 - matchfreq2 - unmatchfreq2;
2507  CLAMP_PROBABILITY(otherfreq1);
2508  CLAMP_PROBABILITY(otherfreq2);
2509 
2510  /*
2511  * We can estimate the total selectivity from the point of view of
2512  * relation 1 as: the known selectivity for matched MCVs, plus
2513  * unmatched MCVs that are assumed to match against random members of
2514  * relation 2's non-MCV population, plus non-MCV values that are
2515  * assumed to match against random members of relation 2's unmatched
2516  * MCVs plus non-MCV values.
2517  */
2518  totalsel1 = matchprodfreq;
2519  if (nd2 > sslot2->nvalues)
2520  totalsel1 += unmatchfreq1 * otherfreq2 / (nd2 - sslot2->nvalues);
2521  if (nd2 > nmatches)
2522  totalsel1 += otherfreq1 * (otherfreq2 + unmatchfreq2) /
2523  (nd2 - nmatches);
2524  /* Same estimate from the point of view of relation 2. */
2525  totalsel2 = matchprodfreq;
2526  if (nd1 > sslot1->nvalues)
2527  totalsel2 += unmatchfreq2 * otherfreq1 / (nd1 - sslot1->nvalues);
2528  if (nd1 > nmatches)
2529  totalsel2 += otherfreq2 * (otherfreq1 + unmatchfreq1) /
2530  (nd1 - nmatches);
2531 
2532  /*
2533  * Use the smaller of the two estimates. This can be justified in
2534  * essentially the same terms as given below for the no-stats case: to
2535  * a first approximation, we are estimating from the point of view of
2536  * the relation with smaller nd.
2537  */
2538  selec = (totalsel1 < totalsel2) ? totalsel1 : totalsel2;
2539  }
2540  else
2541  {
2542  /*
2543  * We do not have MCV lists for both sides. Estimate the join
2544  * selectivity as MIN(1/nd1,1/nd2)*(1-nullfrac1)*(1-nullfrac2). This
2545  * is plausible if we assume that the join operator is strict and the
2546  * non-null values are about equally distributed: a given non-null
2547  * tuple of rel1 will join to either zero or N2*(1-nullfrac2)/nd2 rows
2548  * of rel2, so total join rows are at most
2549  * N1*(1-nullfrac1)*N2*(1-nullfrac2)/nd2 giving a join selectivity of
2550  * not more than (1-nullfrac1)*(1-nullfrac2)/nd2. By the same logic it
2551  * is not more than (1-nullfrac1)*(1-nullfrac2)/nd1, so the expression
2552  * with MIN() is an upper bound. Using the MIN() means we estimate
2553  * from the point of view of the relation with smaller nd (since the
2554  * larger nd is determining the MIN). It is reasonable to assume that
2555  * most tuples in this rel will have join partners, so the bound is
2556  * probably reasonably tight and should be taken as-is.
2557  *
2558  * XXX Can we be smarter if we have an MCV list for just one side? It
2559  * seems that if we assume equal distribution for the other side, we
2560  * end up with the same answer anyway.
2561  */
2562  double nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
2563  double nullfrac2 = stats2 ? stats2->stanullfrac : 0.0;
2564 
2565  selec = (1.0 - nullfrac1) * (1.0 - nullfrac2);
2566  if (nd1 > nd2)
2567  selec /= nd1;
2568  else
2569  selec /= nd2;
2570  }
2571 
2572  return selec;
2573 }
2574 
2575 /*
2576  * eqjoinsel_semi --- eqjoinsel for semi join
2577  *
2578  * (Also used for anti join, which we are supposed to estimate the same way.)
2579  * Caller has ensured that vardata1 is the LHS variable.
2580  * Unlike eqjoinsel_inner, we have to cope with opfuncoid being InvalidOid.
2581  */
2582 static double
2583 eqjoinsel_semi(Oid opfuncoid, Oid collation,
2584  VariableStatData *vardata1, VariableStatData *vardata2,
2585  double nd1, double nd2,
2586  bool isdefault1, bool isdefault2,
2587  AttStatsSlot *sslot1, AttStatsSlot *sslot2,
2588  Form_pg_statistic stats1, Form_pg_statistic stats2,
2589  bool have_mcvs1, bool have_mcvs2,
2590  RelOptInfo *inner_rel)
2591 {
2592  double selec;
2593 
2594  /*
2595  * We clamp nd2 to be not more than what we estimate the inner relation's
2596  * size to be. This is intuitively somewhat reasonable since obviously
2597  * there can't be more than that many distinct values coming from the
2598  * inner rel. The reason for the asymmetry (ie, that we don't clamp nd1
2599  * likewise) is that this is the only pathway by which restriction clauses
2600  * applied to the inner rel will affect the join result size estimate,
2601  * since set_joinrel_size_estimates will multiply SEMI/ANTI selectivity by
2602  * only the outer rel's size. If we clamped nd1 we'd be double-counting
2603  * the selectivity of outer-rel restrictions.
2604  *
2605  * We can apply this clamping both with respect to the base relation from
2606  * which the join variable comes (if there is just one), and to the
2607  * immediate inner input relation of the current join.
2608  *
2609  * If we clamp, we can treat nd2 as being a non-default estimate; it's not
2610  * great, maybe, but it didn't come out of nowhere either. This is most
2611  * helpful when the inner relation is empty and consequently has no stats.
2612  */
2613  if (vardata2->rel)
2614  {
2615  if (nd2 >= vardata2->rel->rows)
2616  {
2617  nd2 = vardata2->rel->rows;
2618  isdefault2 = false;
2619  }
2620  }
2621  if (nd2 >= inner_rel->rows)
2622  {
2623  nd2 = inner_rel->rows;
2624  isdefault2 = false;
2625  }
2626 
2627  if (have_mcvs1 && have_mcvs2 && OidIsValid(opfuncoid))
2628  {
2629  /*
2630  * We have most-common-value lists for both relations. Run through
2631  * the lists to see which MCVs actually join to each other with the
2632  * given operator. This allows us to determine the exact join
2633  * selectivity for the portion of the relations represented by the MCV
2634  * lists. We still have to estimate for the remaining population, but
2635  * in a skewed distribution this gives us a big leg up in accuracy.
2636  */
2637  LOCAL_FCINFO(fcinfo, 2);
2638  FmgrInfo eqproc;
2639  bool *hasmatch1;
2640  bool *hasmatch2;
2641  double nullfrac1 = stats1->stanullfrac;
2642  double matchfreq1,
2643  uncertainfrac,
2644  uncertain;
2645  int i,
2646  nmatches,
2647  clamped_nvalues2;
2648 
2649  /*
2650  * The clamping above could have resulted in nd2 being less than
2651  * sslot2->nvalues; in which case, we assume that precisely the nd2
2652  * most common values in the relation will appear in the join input,
2653  * and so compare to only the first nd2 members of the MCV list. Of
2654  * course this is frequently wrong, but it's the best bet we can make.
2655  */
2656  clamped_nvalues2 = Min(sslot2->nvalues, nd2);
2657 
2658  fmgr_info(opfuncoid, &eqproc);
2659 
2660  /*
2661  * Save a few cycles by setting up the fcinfo struct just once. Using
2662  * FunctionCallInvoke directly also avoids failure if the eqproc
2663  * returns NULL, though really equality functions should never do
2664  * that.
2665  */
2666  InitFunctionCallInfoData(*fcinfo, &eqproc, 2, collation,
2667  NULL, NULL);
2668  fcinfo->args[0].isnull = false;
2669  fcinfo->args[1].isnull = false;
2670 
2671  hasmatch1 = (bool *) palloc0(sslot1->nvalues * sizeof(bool));
2672  hasmatch2 = (bool *) palloc0(clamped_nvalues2 * sizeof(bool));
2673 
2674  /*
2675  * Note we assume that each MCV will match at most one member of the
2676  * other MCV list. If the operator isn't really equality, there could
2677  * be multiple matches --- but we don't look for them, both for speed
2678  * and because the math wouldn't add up...
2679  */
2680  nmatches = 0;
2681  for (i = 0; i < sslot1->nvalues; i++)
2682  {
2683  int j;
2684 
2685  fcinfo->args[0].value = sslot1->values[i];
2686 
2687  for (j = 0; j < clamped_nvalues2; j++)
2688  {
2689  Datum fresult;
2690 
2691  if (hasmatch2[j])
2692  continue;
2693  fcinfo->args[1].value = sslot2->values[j];
2694  fcinfo->isnull = false;
2695  fresult = FunctionCallInvoke(fcinfo);
2696  if (!fcinfo->isnull && DatumGetBool(fresult))
2697  {
2698  hasmatch1[i] = hasmatch2[j] = true;
2699  nmatches++;
2700  break;
2701  }
2702  }
2703  }
2704  /* Sum up frequencies of matched MCVs */
2705  matchfreq1 = 0.0;
2706  for (i = 0; i < sslot1->nvalues; i++)
2707  {
2708  if (hasmatch1[i])
2709  matchfreq1 += sslot1->numbers[i];
2710  }
2711  CLAMP_PROBABILITY(matchfreq1);
2712  pfree(hasmatch1);
2713  pfree(hasmatch2);
2714 
2715  /*
2716  * Now we need to estimate the fraction of relation 1 that has at
2717  * least one join partner. We know for certain that the matched MCVs
2718  * do, so that gives us a lower bound, but we're really in the dark
2719  * about everything else. Our crude approach is: if nd1 <= nd2 then
2720  * assume all non-null rel1 rows have join partners, else assume for
2721  * the uncertain rows that a fraction nd2/nd1 have join partners. We
2722  * can discount the known-matched MCVs from the distinct-values counts
2723  * before doing the division.
2724  *
2725  * Crude as the above is, it's completely useless if we don't have
2726  * reliable ndistinct values for both sides. Hence, if either nd1 or
2727  * nd2 is default, punt and assume half of the uncertain rows have
2728  * join partners.
2729  */
2730  if (!isdefault1 && !isdefault2)
2731  {
2732  nd1 -= nmatches;
2733  nd2 -= nmatches;
2734  if (nd1 <= nd2 || nd2 < 0)
2735  uncertainfrac = 1.0;
2736  else
2737  uncertainfrac = nd2 / nd1;
2738  }
2739  else
2740  uncertainfrac = 0.5;
2741  uncertain = 1.0 - matchfreq1 - nullfrac1;
2742  CLAMP_PROBABILITY(uncertain);
2743  selec = matchfreq1 + uncertainfrac * uncertain;
2744  }
2745  else
2746  {
2747  /*
2748  * Without MCV lists for both sides, we can only use the heuristic
2749  * about nd1 vs nd2.
2750  */
2751  double nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
2752 
2753  if (!isdefault1 && !isdefault2)
2754  {
2755  if (nd1 <= nd2 || nd2 < 0)
2756  selec = 1.0 - nullfrac1;
2757  else
2758  selec = (nd2 / nd1) * (1.0 - nullfrac1);
2759  }
2760  else
2761  selec = 0.5 * (1.0 - nullfrac1);
2762  }
2763 
2764  return selec;
2765 }
2766 
2767 /*
2768  * neqjoinsel - Join selectivity of "!="
2769  */
2770 Datum
2772 {
2773  PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
2774  Oid operator = PG_GETARG_OID(1);
2775  List *args = (List *) PG_GETARG_POINTER(2);
2776  JoinType jointype = (JoinType) PG_GETARG_INT16(3);
2778  Oid collation = PG_GET_COLLATION();
2779  float8 result;
2780 
2781  if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
2782  {
2783  /*
2784  * For semi-joins, if there is more than one distinct value in the RHS
2785  * relation then every non-null LHS row must find a row to join since
2786  * it can only be equal to one of them. We'll assume that there is
2787  * always more than one distinct RHS value for the sake of stability,
2788  * though in theory we could have special cases for empty RHS
2789  * (selectivity = 0) and single-distinct-value RHS (selectivity =
2790  * fraction of LHS that has the same value as the single RHS value).
2791  *
2792  * For anti-joins, if we use the same assumption that there is more
2793  * than one distinct key in the RHS relation, then every non-null LHS
2794  * row must be suppressed by the anti-join.
2795  *
2796  * So either way, the selectivity estimate should be 1 - nullfrac.
2797  */
2798  VariableStatData leftvar;
2799  VariableStatData rightvar;
2800  bool reversed;
2801  HeapTuple statsTuple;
2802  double nullfrac;
2803 
2804  get_join_variables(root, args, sjinfo, &leftvar, &rightvar, &reversed);
2805  statsTuple = reversed ? rightvar.statsTuple : leftvar.statsTuple;
2806  if (HeapTupleIsValid(statsTuple))
2807  nullfrac = ((Form_pg_statistic) GETSTRUCT(statsTuple))->stanullfrac;
2808  else
2809  nullfrac = 0.0;
2810  ReleaseVariableStats(leftvar);
2811  ReleaseVariableStats(rightvar);
2812 
2813  result = 1.0 - nullfrac;
2814  }
2815  else
2816  {
2817  /*
2818  * We want 1 - eqjoinsel() where the equality operator is the one
2819  * associated with this != operator, that is, its negator.
2820  */
2821  Oid eqop = get_negator(operator);
2822 
2823  if (eqop)
2824  {
2825  result =
2827  collation,
2828  PointerGetDatum(root),
2829  ObjectIdGetDatum(eqop),
2830  PointerGetDatum(args),
2831  Int16GetDatum(jointype),
2832  PointerGetDatum(sjinfo)));
2833  }
2834  else
2835  {
2836  /* Use default selectivity (should we raise an error instead?) */
2837  result = DEFAULT_EQ_SEL;
2838  }
2839  result = 1.0 - result;
2840  }
2841 
2842  PG_RETURN_FLOAT8(result);
2843 }
2844 
2845 /*
2846  * scalarltjoinsel - Join selectivity of "<" for scalars
2847  */
2848 Datum
2850 {
2852 }
2853 
2854 /*
2855  * scalarlejoinsel - Join selectivity of "<=" for scalars
2856  */
2857 Datum
2859 {
2861 }
2862 
2863 /*
2864  * scalargtjoinsel - Join selectivity of ">" for scalars
2865  */
2866 Datum
2868 {
2870 }
2871 
2872 /*
2873  * scalargejoinsel - Join selectivity of ">=" for scalars
2874  */
2875 Datum
2877 {
2879 }
2880 
2881 
2882 /*
2883  * mergejoinscansel - Scan selectivity of merge join.
2884  *
2885  * A merge join will stop as soon as it exhausts either input stream.
2886  * Therefore, if we can estimate the ranges of both input variables,
2887  * we can estimate how much of the input will actually be read. This
2888  * can have a considerable impact on the cost when using indexscans.
2889  *
2890  * Also, we can estimate how much of each input has to be read before the
2891  * first join pair is found, which will affect the join's startup time.
2892  *
2893  * clause should be a clause already known to be mergejoinable. opfamily,
2894  * strategy, and nulls_first specify the sort ordering being used.
2895  *
2896  * The outputs are:
2897  * *leftstart is set to the fraction of the left-hand variable expected
2898  * to be scanned before the first join pair is found (0 to 1).
2899  * *leftend is set to the fraction of the left-hand variable expected
2900  * to be scanned before the join terminates (0 to 1).
2901  * *rightstart, *rightend similarly for the right-hand variable.
2902  */
2903 void
2905  Oid opfamily, int strategy, bool nulls_first,
2906  Selectivity *leftstart, Selectivity *leftend,
2907  Selectivity *rightstart, Selectivity *rightend)
2908 {
2909  Node *left,
2910  *right;
2911  VariableStatData leftvar,
2912  rightvar;
2913  int op_strategy;
2914  Oid op_lefttype;
2915  Oid op_righttype;
2916  Oid opno,
2917  collation,
2918  lsortop,
2919  rsortop,
2920  lstatop,
2921  rstatop,
2922  ltop,
2923  leop,
2924  revltop,
2925  revleop;
2926  bool isgt;
2927  Datum leftmin,
2928  leftmax,
2929  rightmin,
2930  rightmax;
2931  double selec;
2932 
2933  /* Set default results if we can't figure anything out. */
2934  /* XXX should default "start" fraction be a bit more than 0? */
2935  *leftstart = *rightstart = 0.0;
2936  *leftend = *rightend = 1.0;
2937 
2938  /* Deconstruct the merge clause */
2939  if (!is_opclause(clause))
2940  return; /* shouldn't happen */
2941  opno = ((OpExpr *) clause)->opno;
2942  collation = ((OpExpr *) clause)->inputcollid;
2943  left = get_leftop((Expr *) clause);
2944  right = get_rightop((Expr *) clause);
2945  if (!right)
2946  return; /* shouldn't happen */
2947 
2948  /* Look for stats for the inputs */
2949  examine_variable(root, left, 0, &leftvar);
2950  examine_variable(root, right, 0, &rightvar);
2951 
2952  /* Extract the operator's declared left/right datatypes */
2953  get_op_opfamily_properties(opno, opfamily, false,
2954  &op_strategy,
2955  &op_lefttype,
2956  &op_righttype);
2957  Assert(op_strategy == BTEqualStrategyNumber);
2958 
2959  /*
2960  * Look up the various operators we need. If we don't find them all, it
2961  * probably means the opfamily is broken, but we just fail silently.
2962  *
2963  * Note: we expect that pg_statistic histograms will be sorted by the '<'
2964  * operator, regardless of which sort direction we are considering.
2965  */
2966  switch (strategy)
2967  {
2968  case BTLessStrategyNumber:
2969  isgt = false;
2970  if (op_lefttype == op_righttype)
2971  {
2972  /* easy case */
2973  ltop = get_opfamily_member(opfamily,
2974  op_lefttype, op_righttype,
2976  leop = get_opfamily_member(opfamily,
2977  op_lefttype, op_righttype,
2979  lsortop = ltop;
2980  rsortop = ltop;
2981  lstatop = lsortop;
2982  rstatop = rsortop;
2983  revltop = ltop;
2984  revleop = leop;
2985  }
2986  else
2987  {
2988  ltop = get_opfamily_member(opfamily,
2989  op_lefttype, op_righttype,
2991  leop = get_opfamily_member(opfamily,
2992  op_lefttype, op_righttype,
2994  lsortop = get_opfamily_member(opfamily,
2995  op_lefttype, op_lefttype,
2997  rsortop = get_opfamily_member(opfamily,
2998  op_righttype, op_righttype,
3000  lstatop = lsortop;
3001  rstatop = rsortop;
3002  revltop = get_opfamily_member(opfamily,
3003  op_righttype, op_lefttype,
3005  revleop = get_opfamily_member(opfamily,
3006  op_righttype, op_lefttype,
3008  }
3009  break;
3011  /* descending-order case */
3012  isgt = true;
3013  if (op_lefttype == op_righttype)
3014  {
3015  /* easy case */
3016  ltop = get_opfamily_member(opfamily,
3017  op_lefttype, op_righttype,
3019  leop = get_opfamily_member(opfamily,
3020  op_lefttype, op_righttype,
3022  lsortop = ltop;
3023  rsortop = ltop;
3024  lstatop = get_opfamily_member(opfamily,
3025  op_lefttype, op_lefttype,
3027  rstatop = lstatop;
3028  revltop = ltop;
3029  revleop = leop;
3030  }
3031  else
3032  {
3033  ltop = get_opfamily_member(opfamily,
3034  op_lefttype, op_righttype,
3036  leop = get_opfamily_member(opfamily,
3037  op_lefttype, op_righttype,
3039  lsortop = get_opfamily_member(opfamily,
3040  op_lefttype, op_lefttype,
3042  rsortop = get_opfamily_member(opfamily,
3043  op_righttype, op_righttype,
3045  lstatop = get_opfamily_member(opfamily,
3046  op_lefttype, op_lefttype,
3048  rstatop = get_opfamily_member(opfamily,
3049  op_righttype, op_righttype,
3051  revltop = get_opfamily_member(opfamily,
3052  op_righttype, op_lefttype,
3054  revleop = get_opfamily_member(opfamily,
3055  op_righttype, op_lefttype,
3057  }
3058  break;
3059  default:
3060  goto fail; /* shouldn't get here */
3061  }
3062 
3063  if (!OidIsValid(lsortop) ||
3064  !OidIsValid(rsortop) ||
3065  !OidIsValid(lstatop) ||
3066  !OidIsValid(rstatop) ||
3067  !OidIsValid(ltop) ||
3068  !OidIsValid(leop) ||
3069  !OidIsValid(revltop) ||
3070  !OidIsValid(revleop))
3071  goto fail; /* insufficient info in catalogs */
3072 
3073  /* Try to get ranges of both inputs */
3074  if (!isgt)
3075  {
3076  if (!get_variable_range(root, &leftvar, lstatop, collation,
3077  &leftmin, &leftmax))
3078  goto fail; /* no range available from stats */
3079  if (!get_variable_range(root, &rightvar, rstatop, collation,
3080  &rightmin, &rightmax))
3081  goto fail; /* no range available from stats */
3082  }
3083  else
3084  {
3085  /* need to swap the max and min */
3086  if (!get_variable_range(root, &leftvar, lstatop, collation,
3087  &leftmax, &leftmin))
3088  goto fail; /* no range available from stats */
3089  if (!get_variable_range(root, &rightvar, rstatop, collation,
3090  &rightmax, &rightmin))
3091  goto fail; /* no range available from stats */
3092  }
3093 
3094  /*
3095  * Now, the fraction of the left variable that will be scanned is the
3096  * fraction that's <= the right-side maximum value. But only believe
3097  * non-default estimates, else stick with our 1.0.
3098  */
3099  selec = scalarineqsel(root, leop, isgt, true, collation, &leftvar,
3100  rightmax, op_righttype);
3101  if (selec != DEFAULT_INEQ_SEL)
3102  *leftend = selec;
3103 
3104  /* And similarly for the right variable. */
3105  selec = scalarineqsel(root, revleop, isgt, true, collation, &rightvar,
3106  leftmax, op_lefttype);
3107  if (selec != DEFAULT_INEQ_SEL)
3108  *rightend = selec;
3109 
3110  /*
3111  * Only one of the two "end" fractions can really be less than 1.0;
3112  * believe the smaller estimate and reset the other one to exactly 1.0. If
3113  * we get exactly equal estimates (as can easily happen with self-joins),
3114  * believe neither.
3115  */
3116  if (*leftend > *rightend)
3117  *leftend = 1.0;
3118  else if (*leftend < *rightend)
3119  *rightend = 1.0;
3120  else
3121  *leftend = *rightend = 1.0;
3122 
3123  /*
3124  * Also, the fraction of the left variable that will be scanned before the
3125  * first join pair is found is the fraction that's < the right-side
3126  * minimum value. But only believe non-default estimates, else stick with
3127  * our own default.
3128  */
3129  selec = scalarineqsel(root, ltop, isgt, false, collation, &leftvar,
3130  rightmin, op_righttype);
3131  if (selec != DEFAULT_INEQ_SEL)
3132  *leftstart = selec;
3133 
3134  /* And similarly for the right variable. */
3135  selec = scalarineqsel(root, revltop, isgt, false, collation, &rightvar,
3136  leftmin, op_lefttype);
3137  if (selec != DEFAULT_INEQ_SEL)
3138  *rightstart = selec;
3139 
3140  /*
3141  * Only one of the two "start" fractions can really be more than zero;
3142  * believe the larger estimate and reset the other one to exactly 0.0. If
3143  * we get exactly equal estimates (as can easily happen with self-joins),
3144  * believe neither.
3145  */
3146  if (*leftstart < *rightstart)
3147  *leftstart = 0.0;
3148  else if (*leftstart > *rightstart)
3149  *rightstart = 0.0;
3150  else
3151  *leftstart = *rightstart = 0.0;
3152 
3153  /*
3154  * If the sort order is nulls-first, we're going to have to skip over any
3155  * nulls too. These would not have been counted by scalarineqsel, and we
3156  * can safely add in this fraction regardless of whether we believe
3157  * scalarineqsel's results or not. But be sure to clamp the sum to 1.0!
3158  */
3159  if (nulls_first)
3160  {
3161  Form_pg_statistic stats;
3162 
3163  if (HeapTupleIsValid(leftvar.statsTuple))
3164  {
3165  stats = (Form_pg_statistic) GETSTRUCT(leftvar.statsTuple);
3166  *leftstart += stats->stanullfrac;
3167  CLAMP_PROBABILITY(*leftstart);
3168  *leftend += stats->stanullfrac;
3169  CLAMP_PROBABILITY(*leftend);
3170  }
3171  if (HeapTupleIsValid(rightvar.statsTuple))
3172  {
3173  stats = (Form_pg_statistic) GETSTRUCT(rightvar.statsTuple);
3174  *rightstart += stats->stanullfrac;
3175  CLAMP_PROBABILITY(*rightstart);
3176  *rightend += stats->stanullfrac;
3177  CLAMP_PROBABILITY(*rightend);
3178  }
3179  }
3180 
3181  /* Disbelieve start >= end, just in case that can happen */
3182  if (*leftstart >= *leftend)
3183  {
3184  *leftstart = 0.0;
3185  *leftend = 1.0;
3186  }
3187  if (*rightstart >= *rightend)
3188  {
3189  *rightstart = 0.0;
3190  *rightend = 1.0;
3191  }
3192 
3193 fail:
3194  ReleaseVariableStats(leftvar);
3195  ReleaseVariableStats(rightvar);
3196 }
3197 
3198 
3199 /*
3200  * matchingsel -- generic matching-operator selectivity support
3201  *
3202  * Use these for any operators that (a) are on data types for which we collect
3203  * standard statistics, and (b) have behavior for which the default estimate
3204  * (twice DEFAULT_EQ_SEL) is sane. Typically that is good for match-like
3205  * operators.
3206  */
3207 
3208 Datum
3210 {
3211  PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
3212  Oid operator = PG_GETARG_OID(1);
3213  List *args = (List *) PG_GETARG_POINTER(2);
3214  int varRelid = PG_GETARG_INT32(3);
3215  Oid collation = PG_GET_COLLATION();
3216  double selec;
3217 
3218  /* Use generic restriction selectivity logic. */
3219  selec = generic_restriction_selectivity(root, operator, collation,
3220  args, varRelid,
3222 
3223  PG_RETURN_FLOAT8((float8) selec);
3224 }
3225 
3226 Datum
3228 {
3229  /* Just punt, for the moment. */
3231 }
3232 
3233 
3234 /*
3235  * Helper routine for estimate_num_groups: add an item to a list of
3236  * GroupVarInfos, but only if it's not known equal to any of the existing
3237  * entries.
3238  */
3239 typedef struct
3240 {
3241  Node *var; /* might be an expression, not just a Var */
3242  RelOptInfo *rel; /* relation it belongs to */
3243  double ndistinct; /* # distinct values */
3244 } GroupVarInfo;
3245 
3246 static List *
3248  Node *var, VariableStatData *vardata)
3249 {
3250  GroupVarInfo *varinfo;
3251  double ndistinct;
3252  bool isdefault;
3253  ListCell *lc;
3254 
3255  ndistinct = get_variable_numdistinct(vardata, &isdefault);
3256 
3257  foreach(lc, varinfos)
3258  {
3259  varinfo = (GroupVarInfo *) lfirst(lc);
3260 
3261  /* Drop exact duplicates */
3262  if (equal(var, varinfo->var))
3263  return varinfos;
3264 
3265  /*
3266  * Drop known-equal vars, but only if they belong to different
3267  * relations (see comments for estimate_num_groups)
3268  */
3269  if (vardata->rel != varinfo->rel &&
3270  exprs_known_equal(root, var, varinfo->var))
3271  {
3272  if (varinfo->ndistinct <= ndistinct)
3273  {
3274  /* Keep older item, forget new one */
3275  return varinfos;
3276  }
3277  else
3278  {
3279  /* Delete the older item */
3280  varinfos = foreach_delete_current(varinfos, lc);
3281  }
3282  }
3283  }
3284 
3285  varinfo = (GroupVarInfo *) palloc(sizeof(GroupVarInfo));
3286 
3287  varinfo->var = var;
3288  varinfo->rel = vardata->rel;
3289  varinfo->ndistinct = ndistinct;
3290  varinfos = lappend(varinfos, varinfo);
3291  return varinfos;
3292 }
3293 
3294 /*
3295  * estimate_num_groups - Estimate number of groups in a grouped query
3296  *
3297  * Given a query having a GROUP BY clause, estimate how many groups there
3298  * will be --- ie, the number of distinct combinations of the GROUP BY
3299  * expressions.
3300  *
3301  * This routine is also used to estimate the number of rows emitted by
3302  * a DISTINCT filtering step; that is an isomorphic problem. (Note:
3303  * actually, we only use it for DISTINCT when there's no grouping or
3304  * aggregation ahead of the DISTINCT.)
3305  *
3306  * Inputs:
3307  * root - the query
3308  * groupExprs - list of expressions being grouped by
3309  * input_rows - number of rows estimated to arrive at the group/unique
3310  * filter step
3311  * pgset - NULL, or a List** pointing to a grouping set to filter the
3312  * groupExprs against
3313  *
3314  * Given the lack of any cross-correlation statistics in the system, it's
3315  * impossible to do anything really trustworthy with GROUP BY conditions
3316  * involving multiple Vars. We should however avoid assuming the worst
3317  * case (all possible cross-product terms actually appear as groups) since
3318  * very often the grouped-by Vars are highly correlated. Our current approach
3319  * is as follows:
3320  * 1. Expressions yielding boolean are assumed to contribute two groups,
3321  * independently of their content, and are ignored in the subsequent
3322  * steps. This is mainly because tests like "col IS NULL" break the
3323  * heuristic used in step 2 especially badly.
3324  * 2. Reduce the given expressions to a list of unique Vars used. For
3325  * example, GROUP BY a, a + b is treated the same as GROUP BY a, b.
3326  * It is clearly correct not to count the same Var more than once.
3327  * It is also reasonable to treat f(x) the same as x: f() cannot
3328  * increase the number of distinct values (unless it is volatile,
3329  * which we consider unlikely for grouping), but it probably won't
3330  * reduce the number of distinct values much either.
3331  * As a special case, if a GROUP BY expression can be matched to an
3332  * expressional index for which we have statistics, then we treat the
3333  * whole expression as though it were just a Var.
3334  * 3. If the list contains Vars of different relations that are known equal
3335  * due to equivalence classes, then drop all but one of the Vars from each
3336  * known-equal set, keeping the one with smallest estimated # of values
3337  * (since the extra values of the others can't appear in joined rows).
3338  * Note the reason we only consider Vars of different relations is that
3339  * if we considered ones of the same rel, we'd be double-counting the
3340  * restriction selectivity of the equality in the next step.
3341  * 4. For Vars within a single source rel, we multiply together the numbers
3342  * of values, clamp to the number of rows in the rel (divided by 10 if
3343  * more than one Var), and then multiply by a factor based on the
3344  * selectivity of the restriction clauses for that rel. When there's
3345  * more than one Var, the initial product is probably too high (it's the
3346  * worst case) but clamping to a fraction of the rel's rows seems to be a
3347  * helpful heuristic for not letting the estimate get out of hand. (The
3348  * factor of 10 is derived from pre-Postgres-7.4 practice.) The factor
3349  * we multiply by to adjust for the restriction selectivity assumes that
3350  * the restriction clauses are independent of the grouping, which may not
3351  * be a valid assumption, but it's hard to do better.
3352  * 5. If there are Vars from multiple rels, we repeat step 4 for each such
3353  * rel, and multiply the results together.
3354  * Note that rels not containing grouped Vars are ignored completely, as are
3355  * join clauses. Such rels cannot increase the number of groups, and we
3356  * assume such clauses do not reduce the number either (somewhat bogus,
3357  * but we don't have the info to do better).
3358  */
3359 double
3360 estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows,
3361  List **pgset)
3362 {
3363  List *varinfos = NIL;
3364  double srf_multiplier = 1.0;
3365  double numdistinct;
3366  ListCell *l;
3367  int i;
3368 
3369  /*
3370  * We don't ever want to return an estimate of zero groups, as that tends
3371  * to lead to division-by-zero and other unpleasantness. The input_rows
3372  * estimate is usually already at least 1, but clamp it just in case it
3373  * isn't.
3374  */
3375  input_rows = clamp_row_est(input_rows);
3376 
3377  /*
3378  * If no grouping columns, there's exactly one group. (This can't happen
3379  * for normal cases with GROUP BY or DISTINCT, but it is possible for
3380  * corner cases with set operations.)
3381  */
3382  if (groupExprs == NIL || (pgset && list_length(*pgset) < 1))
3383  return 1.0;
3384 
3385  /*
3386  * Count groups derived from boolean grouping expressions. For other
3387  * expressions, find the unique Vars used, treating an expression as a Var
3388  * if we can find stats for it. For each one, record the statistical
3389  * estimate of number of distinct values (total in its table, without
3390  * regard for filtering).
3391  */
3392  numdistinct = 1.0;
3393 
3394  i = 0;
3395  foreach(l, groupExprs)
3396  {
3397  Node *groupexpr = (Node *) lfirst(l);
3398  double this_srf_multiplier;
3399  VariableStatData vardata;
3400  List *varshere;
3401  ListCell *l2;
3402 
3403  /* is expression in this grouping set? */
3404  if (pgset && !list_member_int(*pgset, i++))
3405  continue;
3406 
3407  /*
3408  * Set-returning functions in grouping columns are a bit problematic.
3409  * The code below will effectively ignore their SRF nature and come up
3410  * with a numdistinct estimate as though they were scalar functions.
3411  * We compensate by scaling up the end result by the largest SRF
3412  * rowcount estimate. (This will be an overestimate if the SRF
3413  * produces multiple copies of any output value, but it seems best to
3414  * assume the SRF's outputs are distinct. In any case, it's probably
3415  * pointless to worry too much about this without much better
3416  * estimates for SRF output rowcounts than we have today.)
3417  */
3418  this_srf_multiplier = expression_returns_set_rows(root, groupexpr);
3419  if (srf_multiplier < this_srf_multiplier)
3420  srf_multiplier = this_srf_multiplier;
3421 
3422  /* Short-circuit for expressions returning boolean */
3423  if (exprType(groupexpr) == BOOLOID)
3424  {
3425  numdistinct *= 2.0;
3426  continue;
3427  }
3428 
3429  /*
3430  * If examine_variable is able to deduce anything about the GROUP BY
3431  * expression, treat it as a single variable even if it's really more
3432  * complicated.
3433  */
3434  examine_variable(root, groupexpr, 0, &vardata);
3435  if (HeapTupleIsValid(vardata.statsTuple) || vardata.isunique)
3436  {
3437  varinfos = add_unique_group_var(root, varinfos,
3438  groupexpr, &vardata);
3439  ReleaseVariableStats(vardata);
3440  continue;
3441  }
3442  ReleaseVariableStats(vardata);
3443 
3444  /*
3445  * Else pull out the component Vars. Handle PlaceHolderVars by
3446  * recursing into their arguments (effectively assuming that the
3447  * PlaceHolderVar doesn't change the number of groups, which boils
3448  * down to ignoring the possible addition of nulls to the result set).
3449  */
3450  varshere = pull_var_clause(groupexpr,
3454 
3455  /*
3456  * If we find any variable-free GROUP BY item, then either it is a
3457  * constant (and we can ignore it) or it contains a volatile function;
3458  * in the latter case we punt and assume that each input row will
3459  * yield a distinct group.
3460  */
3461  if (varshere == NIL)
3462  {
3463  if (contain_volatile_functions(groupexpr))
3464  return input_rows;
3465  continue;
3466  }
3467 
3468  /*
3469  * Else add variables to varinfos list
3470  */
3471  foreach(l2, varshere)
3472  {
3473  Node *var = (Node *) lfirst(l2);
3474 
3475  examine_variable(root, var, 0, &vardata);
3476  varinfos = add_unique_group_var(root, varinfos, var, &vardata);
3477  ReleaseVariableStats(vardata);
3478  }
3479  }
3480 
3481  /*
3482  * If now no Vars, we must have an all-constant or all-boolean GROUP BY
3483  * list.
3484  */
3485  if (varinfos == NIL)
3486  {
3487  /* Apply SRF multiplier as we would do in the long path */
3488  numdistinct *= srf_multiplier;
3489  /* Round off */
3490  numdistinct = ceil(numdistinct);
3491  /* Guard against out-of-range answers */
3492  if (numdistinct > input_rows)
3493  numdistinct = input_rows;
3494  if (numdistinct < 1.0)
3495  numdistinct = 1.0;
3496  return numdistinct;
3497  }
3498 
3499  /*
3500  * Group Vars by relation and estimate total numdistinct.
3501  *
3502  * For each iteration of the outer loop, we process the frontmost Var in
3503  * varinfos, plus all other Vars in the same relation. We remove these
3504  * Vars from the newvarinfos list for the next iteration. This is the
3505  * easiest way to group Vars of same rel together.
3506  */
3507  do
3508  {
3509  GroupVarInfo *varinfo1 = (GroupVarInfo *) linitial(varinfos);
3510  RelOptInfo *rel = varinfo1->rel;
3511  double reldistinct = 1;
3512  double relmaxndistinct = reldistinct;
3513  int relvarcount = 0;
3514  List *newvarinfos = NIL;
3515  List *relvarinfos = NIL;
3516 
3517  /*
3518  * Split the list of varinfos in two - one for the current rel, one
3519  * for remaining Vars on other rels.
3520  */
3521  relvarinfos = lappend(relvarinfos, varinfo1);
3522  for_each_from(l, varinfos, 1)
3523  {
3524  GroupVarInfo *varinfo2 = (GroupVarInfo *) lfirst(l);
3525 
3526  if (varinfo2->rel == varinfo1->rel)
3527  {
3528  /* varinfos on current rel */
3529  relvarinfos = lappend(relvarinfos, varinfo2);
3530  }
3531  else
3532  {
3533  /* not time to process varinfo2 yet */
3534  newvarinfos = lappend(newvarinfos, varinfo2);
3535  }
3536  }
3537 
3538  /*
3539  * Get the numdistinct estimate for the Vars of this rel. We
3540  * iteratively search for multivariate n-distinct with maximum number
3541  * of vars; assuming that each var group is independent of the others,
3542  * we multiply them together. Any remaining relvarinfos after no more
3543  * multivariate matches are found are assumed independent too, so
3544  * their individual ndistinct estimates are multiplied also.
3545  *
3546  * While iterating, count how many separate numdistinct values we
3547  * apply. We apply a fudge factor below, but only if we multiplied
3548  * more than one such values.
3549  */
3550  while (relvarinfos)
3551  {
3552  double mvndistinct;
3553 
3554  if (estimate_multivariate_ndistinct(root, rel, &relvarinfos,
3555  &mvndistinct))
3556  {
3557  reldistinct *= mvndistinct;
3558  if (relmaxndistinct < mvndistinct)
3559  relmaxndistinct = mvndistinct;
3560  relvarcount++;
3561  }
3562  else
3563  {
3564  foreach(l, relvarinfos)
3565  {
3566  GroupVarInfo *varinfo2 = (GroupVarInfo *) lfirst(l);
3567 
3568  reldistinct *= varinfo2->ndistinct;
3569  if (relmaxndistinct < varinfo2->ndistinct)
3570  relmaxndistinct = varinfo2->ndistinct;
3571  relvarcount++;
3572  }
3573 
3574  /* we're done with this relation */
3575  relvarinfos = NIL;
3576  }
3577  }
3578 
3579  /*
3580  * Sanity check --- don't divide by zero if empty relation.
3581  */
3582  Assert(IS_SIMPLE_REL(rel));
3583  if (rel->tuples > 0)
3584  {
3585  /*
3586  * Clamp to size of rel, or size of rel / 10 if multiple Vars. The
3587  * fudge factor is because the Vars are probably correlated but we
3588  * don't know by how much. We should never clamp to less than the
3589  * largest ndistinct value for any of the Vars, though, since
3590  * there will surely be at least that many groups.
3591  */
3592  double clamp = rel->tuples;
3593 
3594  if (relvarcount > 1)
3595  {
3596  clamp *= 0.1;
3597  if (clamp < relmaxndistinct)
3598  {
3599  clamp = relmaxndistinct;
3600  /* for sanity in case some ndistinct is too large: */
3601  if (clamp > rel->tuples)
3602  clamp = rel->tuples;
3603  }
3604  }
3605  if (reldistinct > clamp)
3606  reldistinct = clamp;
3607 
3608  /*
3609  * Update the estimate based on the restriction selectivity,
3610  * guarding against division by zero when reldistinct is zero.
3611  * Also skip this if we know that we are returning all rows.
3612  */
3613  if (reldistinct > 0 && rel->rows < rel->tuples)
3614  {
3615  /*
3616  * Given a table containing N rows with n distinct values in a
3617  * uniform distribution, if we select p rows at random then
3618  * the expected number of distinct values selected is
3619  *
3620  * n * (1 - product((N-N/n-i)/(N-i), i=0..p-1))
3621  *
3622  * = n * (1 - (N-N/n)! / (N-N/n-p)! * (N-p)! / N!)
3623  *
3624  * See "Approximating block accesses in database
3625  * organizations", S. B. Yao, Communications of the ACM,
3626  * Volume 20 Issue 4, April 1977 Pages 260-261.
3627  *
3628  * Alternatively, re-arranging the terms from the factorials,
3629  * this may be written as
3630  *
3631  * n * (1 - product((N-p-i)/(N-i), i=0..N/n-1))
3632  *
3633  * This form of the formula is more efficient to compute in
3634  * the common case where p is larger than N/n. Additionally,
3635  * as pointed out by Dell'Era, if i << N for all terms in the
3636  * product, it can be approximated by
3637  *
3638  * n * (1 - ((N-p)/N)^(N/n))
3639  *
3640  * See "Expected distinct values when selecting from a bag
3641  * without replacement", Alberto Dell'Era,
3642  * http://www.adellera.it/investigations/distinct_balls/.
3643  *
3644  * The condition i << N is equivalent to n >> 1, so this is a
3645  * good approximation when the number of distinct values in
3646  * the table is large. It turns out that this formula also
3647  * works well even when n is small.
3648  */
3649  reldistinct *=
3650  (1 - pow((rel->tuples - rel->rows) / rel->tuples,
3651  rel->tuples / reldistinct));
3652  }
3653  reldistinct = clamp_row_est(reldistinct);
3654 
3655  /*
3656  * Update estimate of total distinct groups.
3657  */
3658  numdistinct *= reldistinct;
3659  }
3660 
3661  varinfos = newvarinfos;
3662  } while (varinfos != NIL);
3663 
3664  /* Now we can account for the effects of any SRFs */
3665  numdistinct *= srf_multiplier;
3666 
3667  /* Round off */
3668  numdistinct = ceil(numdistinct);
3669 
3670  /* Guard against out-of-range answers */
3671  if (numdistinct > input_rows)
3672  numdistinct = input_rows;
3673  if (numdistinct < 1.0)
3674  numdistinct = 1.0;
3675 
3676  return numdistinct;
3677 }
3678 
3679 /*
3680  * Estimate hash bucket statistics when the specified expression is used
3681  * as a hash key for the given number of buckets.
3682  *
3683  * This attempts to determine two values:
3684  *
3685  * 1. The frequency of the most common value of the expression (returns
3686  * zero into *mcv_freq if we can't get that).
3687  *
3688  * 2. The "bucketsize fraction", ie, average number of entries in a bucket
3689  * divided by total tuples in relation.
3690  *
3691  * XXX This is really pretty bogus since we're effectively assuming that the
3692  * distribution of hash keys will be the same after applying restriction
3693  * clauses as it was in the underlying relation. However, we are not nearly
3694  * smart enough to figure out how the restrict clauses might change the
3695  * distribution, so this will have to do for now.
3696  *
3697  * We are passed the number of buckets the executor will use for the given
3698  * input relation. If the data were perfectly distributed, with the same
3699  * number of tuples going into each available bucket, then the bucketsize
3700  * fraction would be 1/nbuckets. But this happy state of affairs will occur
3701  * only if (a) there are at least nbuckets distinct data values, and (b)
3702  * we have a not-too-skewed data distribution. Otherwise the buckets will
3703  * be nonuniformly occupied. If the other relation in the join has a key
3704  * distribution similar to this one's, then the most-loaded buckets are
3705  * exactly those that will be probed most often. Therefore, the "average"
3706  * bucket size for costing purposes should really be taken as something close
3707  * to the "worst case" bucket size. We try to estimate this by adjusting the
3708  * fraction if there are too few distinct data values, and then scaling up
3709  * by the ratio of the most common value's frequency to the average frequency.
3710  *
3711  * If no statistics are available, use a default estimate of 0.1. This will
3712  * discourage use of a hash rather strongly if the inner relation is large,
3713  * which is what we want. We do not want to hash unless we know that the
3714  * inner rel is well-dispersed (or the alternatives seem much worse).
3715  *
3716  * The caller should also check that the mcv_freq is not so large that the
3717  * most common value would by itself require an impractically large bucket.
3718  * In a hash join, the executor can split buckets if they get too big, but
3719  * obviously that doesn't help for a bucket that contains many duplicates of
3720  * the same value.
3721  */
3722 void
3723 estimate_hash_bucket_stats(PlannerInfo *root, Node *hashkey, double nbuckets,
3724  Selectivity *mcv_freq,
3725  Selectivity *bucketsize_frac)
3726 {
3727  VariableStatData vardata;
3728  double estfract,
3729  ndistinct,
3730  stanullfrac,
3731  avgfreq;
3732  bool isdefault;
3733  AttStatsSlot sslot;
3734 
3735  examine_variable(root, hashkey, 0, &vardata);
3736 
3737  /* Look up the frequency of the most common value, if available */
3738  *mcv_freq = 0.0;
3739 
3740  if (HeapTupleIsValid(vardata.statsTuple))
3741  {
3742  if (get_attstatsslot(&sslot, vardata.statsTuple,
3743  STATISTIC_KIND_MCV, InvalidOid,
3745  {
3746  /*
3747  * The first MCV stat is for the most common value.
3748  */
3749  if (sslot.nnumbers > 0)
3750  *mcv_freq = sslot.numbers[0];
3751  free_attstatsslot(&sslot);
3752  }
3753  }
3754 
3755  /* Get number of distinct values */
3756  ndistinct = get_variable_numdistinct(&vardata, &isdefault);
3757 
3758  /*
3759  * If ndistinct isn't real, punt. We normally return 0.1, but if the
3760  * mcv_freq is known to be even higher than that, use it instead.
3761  */
3762  if (isdefault)
3763  {
3764  *bucketsize_frac = (Selectivity) Max(0.1, *mcv_freq);
3765  ReleaseVariableStats(vardata);
3766  return;
3767  }
3768 
3769  /* Get fraction that are null */
3770  if (HeapTupleIsValid(vardata.statsTuple))
3771  {
3772  Form_pg_statistic stats;
3773 
3774  stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
3775  stanullfrac = stats->stanullfrac;
3776  }
3777  else
3778  stanullfrac = 0.0;
3779 
3780  /* Compute avg freq of all distinct data values in raw relation */
3781  avgfreq = (1.0 - stanullfrac) / ndistinct;
3782 
3783  /*
3784  * Adjust ndistinct to account for restriction clauses. Observe we are
3785  * assuming that the data distribution is affected uniformly by the
3786  * restriction clauses!
3787  *
3788  * XXX Possibly better way, but much more expensive: multiply by
3789  * selectivity of rel's restriction clauses that mention the target Var.
3790  */
3791  if (vardata.rel && vardata.rel->tuples > 0)
3792  {
3793  ndistinct *= vardata.rel->rows / vardata.rel->tuples;
3794  ndistinct = clamp_row_est(ndistinct);
3795  }
3796 
3797  /*
3798  * Initial estimate of bucketsize fraction is 1/nbuckets as long as the
3799  * number of buckets is less than the expected number of distinct values;
3800  * otherwise it is 1/ndistinct.
3801  */
3802  if (ndistinct > nbuckets)
3803  estfract = 1.0 / nbuckets;
3804  else
3805  estfract = 1.0 / ndistinct;
3806 
3807  /*
3808  * Adjust estimated bucketsize upward to account for skewed distribution.
3809  */
3810  if (avgfreq > 0.0 && *mcv_freq > avgfreq)
3811  estfract *= *mcv_freq / avgfreq;
3812 
3813  /*
3814  * Clamp bucketsize to sane range (the above adjustment could easily
3815  * produce an out-of-range result). We set the lower bound a little above
3816  * zero, since zero isn't a very sane result.
3817  */
3818  if (estfract < 1.0e-6)
3819  estfract = 1.0e-6;
3820  else if (estfract > 1.0)
3821  estfract = 1.0;
3822 
3823  *bucketsize_frac = (Selectivity) estfract;
3824 
3825  ReleaseVariableStats(vardata);
3826 }
3827 
3828 /*
3829  * estimate_hashagg_tablesize
3830  * estimate the number of bytes that a hash aggregate hashtable will
3831  * require based on the agg_costs, path width and number of groups.
3832  *
3833  * We return the result as "double" to forestall any possible overflow
3834  * problem in the multiplication by dNumGroups.
3835  *
3836  * XXX this may be over-estimating the size now that hashagg knows to omit
3837  * unneeded columns from the hashtable. Also for mixed-mode grouping sets,
3838  * grouping columns not in the hashed set are counted here even though hashagg
3839  * won't store them. Is this a problem?
3840  */
3841 double
3843  double dNumGroups)
3844 {
3845  Size hashentrysize = hash_agg_entry_size(agg_costs->numAggs,
3846  path->pathtarget->width,
3847  agg_costs->transitionSpace);
3848 
3849  /*
3850  * Note that this disregards the effect of fill-factor and growth policy
3851  * of the hash table. That's probably ok, given that the default
3852  * fill-factor is relatively high. It'd be hard to meaningfully factor in
3853  * "double-in-size" growth policies here.
3854  */
3855  return hashentrysize * dNumGroups;
3856 }
3857 
3858 
3859 /*-------------------------------------------------------------------------
3860  *
3861  * Support routines
3862  *
3863  *-------------------------------------------------------------------------
3864  */
3865 
3866 /*
3867  * Find applicable ndistinct statistics for the given list of VarInfos (which
3868  * must all belong to the given rel), and update *ndistinct to the estimate of
3869  * the MVNDistinctItem that best matches. If a match it found, *varinfos is
3870  * updated to remove the list of matched varinfos.
3871  *
3872  * Varinfos that aren't for simple Vars are ignored.
3873  *
3874  * Return true if we're able to find a match, false otherwise.
3875  */
3876 static bool
3878  List **varinfos, double *ndistinct)
3879 {
3880  ListCell *lc;
3881  Bitmapset *attnums = NULL;
3882  int nmatches;
3883  Oid statOid = InvalidOid;
3884  MVNDistinct *stats;
3885  Bitmapset *matched = NULL;
3886 
3887  /* bail out immediately if the table has no extended statistics */
3888  if (!rel->statlist)
3889  return false;
3890 
3891  /* Determine the attnums we're looking for */
3892  foreach(lc, *varinfos)
3893  {
3894  GroupVarInfo *varinfo = (GroupVarInfo *) lfirst(lc);
3896 
3897  Assert(varinfo->rel == rel);
3898 
3899  if (!IsA(varinfo->var, Var))
3900  continue;
3901 
3902  attnum = ((Var *) varinfo->var)->varattno;
3903 
3904  if (!AttrNumberIsForUserDefinedAttr(attnum))
3905  continue;
3906 
3907  attnums = bms_add_member(attnums, attnum);
3908  }
3909 
3910  /* look for the ndistinct statistics matching the most vars */
3911  nmatches = 1; /* we require at least two matches */
3912  foreach(lc, rel->statlist)
3913  {
3914  StatisticExtInfo *info = (StatisticExtInfo *) lfirst(lc);
3915  Bitmapset *shared;
3916  int nshared;
3917 
3918  /* skip statistics of other kinds */
3919  if (info->kind != STATS_EXT_NDISTINCT)
3920  continue;
3921 
3922  /* compute attnums shared by the vars and the statistics object */
3923  shared = bms_intersect(info->keys, attnums);
3924  nshared = bms_num_members(shared);
3925 
3926  /*
3927  * Does this statistics object match more columns than the currently
3928  * best object? If so, use this one instead.
3929  *
3930  * XXX This should break ties using name of the object, or something
3931  * like that, to make the outcome stable.
3932  */
3933  if (nshared > nmatches)
3934  {
3935  statOid = info->statOid;
3936  nmatches = nshared;
3937  matched = shared;
3938  }
3939  }
3940 
3941  /* No match? */
3942  if (statOid == InvalidOid)
3943  return false;
3944  Assert(nmatches > 1 && matched != NULL);
3945 
3946  stats = statext_ndistinct_load(statOid);
3947 
3948  /*
3949  * If we have a match, search it for the specific item that matches (there
3950  * must be one), and construct the output values.
3951  */
3952  if (stats)
3953  {
3954  int i;
3955  List *newlist = NIL;
3956  MVNDistinctItem *item = NULL;
3957 
3958  /* Find the specific item that exactly matches the combination */
3959  for (i = 0; i < stats->nitems; i++)
3960  {
3961  MVNDistinctItem *tmpitem = &stats->items[i];
3962 
3963  if (bms_subset_compare(tmpitem->attrs, matched) == BMS_EQUAL)
3964  {
3965  item = tmpitem;
3966  break;
3967  }
3968  }
3969 
3970  /* make sure we found an item */
3971  if (!item)
3972  elog(ERROR, "corrupt MVNDistinct entry");
3973 
3974  /* Form the output varinfo list, keeping only unmatched ones */
3975  foreach(lc, *varinfos)
3976  {
3977  GroupVarInfo *varinfo = (GroupVarInfo *) lfirst(lc);
3979 
3980  if (!IsA(varinfo->var, Var))
3981  {
3982  newlist = lappend(newlist, varinfo);
3983  continue;
3984  }
3985 
3986  attnum = ((Var *) varinfo->var)->varattno;
3987 
3988  if (!AttrNumberIsForUserDefinedAttr(attnum))
3989  continue;
3990 
3991  if (!bms_is_member(attnum, matched))
3992  newlist = lappend(newlist, varinfo);
3993  }
3994 
3995  *varinfos = newlist;
3996  *ndistinct = item->ndistinct;
3997  return true;
3998  }
3999 
4000  return false;
4001 }
4002 
4003 /*
4004  * convert_to_scalar
4005  * Convert non-NULL values of the indicated types to the comparison
4006  * scale needed by scalarineqsel().
4007  * Returns "true" if successful.
4008  *
4009  * XXX this routine is a hack: ideally we should look up the conversion
4010  * subroutines in pg_type.
4011  *
4012  * All numeric datatypes are simply converted to their equivalent
4013  * "double" values. (NUMERIC values that are outside the range of "double"
4014  * are clamped to +/- HUGE_VAL.)
4015  *
4016  * String datatypes are converted by convert_string_to_scalar(),
4017  * which is explained below. The reason why this routine deals with
4018  * three values at a time, not just one, is that we need it for strings.
4019  *
4020  * The bytea datatype is just enough different from strings that it has
4021  * to be treated separately.
4022  *
4023  * The several datatypes representing absolute times are all converted
4024  * to Timestamp, which is actually an int64, and then we promote that to
4025  * a double. Note this will give correct results even for the "special"
4026  * values of Timestamp, since those are chosen to compare correctly;
4027  * see timestamp_cmp.
4028  *
4029  * The several datatypes representing relative times (intervals) are all
4030  * converted to measurements expressed in seconds.
4031  */
4032 static bool
4033 convert_to_scalar(Datum value, Oid valuetypid, Oid collid, double *scaledvalue,
4034  Datum lobound, Datum hibound, Oid boundstypid,
4035  double *scaledlobound, double *scaledhibound)
4036 {
4037  bool failure = false;
4038 
4039  /*
4040  * Both the valuetypid and the boundstypid should exactly match the
4041  * declared input type(s) of the operator we are invoked for. However,
4042  * extensions might try to use scalarineqsel as estimator for operators
4043  * with input type(s) we don't handle here; in such cases, we want to
4044  * return false, not fail. In any case, we mustn't assume that valuetypid
4045  * and boundstypid are identical.
4046  *
4047  * XXX The histogram we are interpolating between points of could belong
4048  * to a column that's only binary-compatible with the declared type. In
4049  * essence we are assuming that the semantics of binary-compatible types
4050  * are enough alike that we can use a histogram generated with one type's
4051  * operators to estimate selectivity for the other's. This is outright
4052  * wrong in some cases --- in particular signed versus unsigned
4053  * interpretation could trip us up. But it's useful enough in the
4054  * majority of cases that we do it anyway. Should think about more
4055  * rigorous ways to do it.
4056  */
4057  switch (valuetypid)
4058  {
4059  /*
4060  * Built-in numeric types
4061  */
4062  case BOOLOID:
4063  case INT2OID:
4064  case INT4OID:
4065  case INT8OID:
4066  case FLOAT4OID:
4067  case FLOAT8OID:
4068  case NUMERICOID:
4069  case OIDOID:
4070  case REGPROCOID:
4071  case REGPROCEDUREOID:
4072  case REGOPEROID:
4073  case REGOPERATOROID:
4074  case REGCLASSOID:
4075  case REGTYPEOID:
4076  case REGCONFIGOID:
4077  case REGDICTIONARYOID:
4078  case REGROLEOID:
4079  case REGNAMESPACEOID:
4080  *scaledvalue = convert_numeric_to_scalar(value, valuetypid,
4081  &failure);
4082  *scaledlobound = convert_numeric_to_scalar(lobound, boundstypid,
4083  &failure);
4084  *scaledhibound = convert_numeric_to_scalar(hibound, boundstypid,
4085  &failure);
4086  return !failure;
4087 
4088  /*
4089  * Built-in string types
4090  */
4091  case CHAROID:
4092  case BPCHAROID:
4093  case VARCHAROID:
4094  case TEXTOID:
4095  case NAMEOID:
4096  {
4097  char *valstr = convert_string_datum(value, valuetypid,
4098  collid, &failure);
4099  char *lostr = convert_string_datum(lobound, boundstypid,
4100  collid, &failure);
4101  char *histr = convert_string_datum(hibound, boundstypid,
4102  collid, &failure);
4103 
4104  /*
4105  * Bail out if any of the values is not of string type. We
4106  * might leak converted strings for the other value(s), but
4107  * that's not worth troubling over.
4108  */
4109  if (failure)
4110  return false;
4111 
4112  convert_string_to_scalar(valstr, scaledvalue,
4113  lostr, scaledlobound,
4114  histr, scaledhibound);
4115  pfree(valstr);
4116  pfree(lostr);
4117  pfree(histr);
4118  return true;
4119  }
4120 
4121  /*
4122  * Built-in bytea type
4123  */
4124  case BYTEAOID:
4125  {
4126  /* We only support bytea vs bytea comparison */
4127  if (boundstypid != BYTEAOID)
4128  return false;
4129  convert_bytea_to_scalar(value, scaledvalue,
4130  lobound, scaledlobound,
4131  hibound, scaledhibound);
4132  return true;
4133  }
4134 
4135  /*
4136  * Built-in time types
4137  */
4138  case TIMESTAMPOID:
4139  case TIMESTAMPTZOID:
4140  case DATEOID:
4141  case INTERVALOID:
4142  case TIMEOID:
4143  case TIMETZOID:
4144  *scaledvalue = convert_timevalue_to_scalar(value, valuetypid,
4145  &failure);
4146  *scaledlobound = convert_timevalue_to_scalar(lobound, boundstypid,
4147  &failure);
4148  *scaledhibound = convert_timevalue_to_scalar(hibound, boundstypid,
4149  &failure);
4150  return !failure;
4151 
4152  /*
4153  * Built-in network types
4154  */
4155  case INETOID:
4156  case CIDROID:
4157  case MACADDROID:
4158  case MACADDR8OID:
4159  *scaledvalue = convert_network_to_scalar(value, valuetypid,
4160  &failure);
4161  *scaledlobound = convert_network_to_scalar(lobound, boundstypid,
4162  &failure);
4163  *scaledhibound = convert_network_to_scalar(hibound, boundstypid,
4164  &failure);
4165  return !failure;
4166  }
4167  /* Don't know how to convert */
4168  *scaledvalue = *scaledlobound = *scaledhibound = 0;
4169  return false;
4170 }
4171 
4172 /*
4173  * Do convert_to_scalar()'s work for any numeric data type.
4174  *
4175  * On failure (e.g., unsupported typid), set *failure to true;
4176  * otherwise, that variable is not changed.
4177  */
4178 static double
4180 {
4181  switch (typid)
4182  {
4183  case BOOLOID:
4184  return (double) DatumGetBool(value);
4185  case INT2OID:
4186  return (double) DatumGetInt16(value);
4187  case INT4OID:
4188  return (double) DatumGetInt32(value);
4189  case INT8OID:
4190  return (double) DatumGetInt64(value);
4191  case FLOAT4OID:
4192  return (double) DatumGetFloat4(value);
4193  case FLOAT8OID:
4194  return (double) DatumGetFloat8(value);
4195  case NUMERICOID:
4196  /* Note: out-of-range values will be clamped to +-HUGE_VAL */
4197  return (double)
4199  value));
4200  case OIDOID:
4201  case REGPROCOID:
4202  case REGPROCEDUREOID:
4203  case REGOPEROID:
4204  case REGOPERATOROID:
4205  case REGCLASSOID:
4206  case REGTYPEOID:
4207  case REGCONFIGOID:
4208  case REGDICTIONARYOID:
4209  case REGROLEOID:
4210  case REGNAMESPACEOID:
4211  /* we can treat OIDs as integers... */
4212  return (double) DatumGetObjectId(value);
4213  }
4214 
4215  *failure = true;
4216  return 0;
4217 }
4218 
4219 /*
4220  * Do convert_to_scalar()'s work for any character-string data type.
4221  *
4222  * String datatypes are converted to a scale that ranges from 0 to 1,
4223  * where we visualize the bytes of the string as fractional digits.
4224  *
4225  * We do not want the base to be 256, however, since that tends to
4226  * generate inflated selectivity estimates; few databases will have
4227  * occurrences of all 256 possible byte values at each position.
4228  * Instead, use the smallest and largest byte values seen in the bounds
4229  * as the estimated range for each byte, after some fudging to deal with
4230  * the fact that we probably aren't going to see the full range that way.
4231  *
4232  * An additional refinement is that we discard any common prefix of the
4233  * three strings before computing the scaled values. This allows us to
4234  * "zoom in" when we encounter a narrow data range. An example is a phone
4235  * number database where all the values begin with the same area code.
4236  * (Actually, the bounds will be adjacent histogram-bin-boundary values,
4237  * so this is more likely to happen than you might think.)
4238  */
4239 static void
4241  double *scaledvalue,
4242  char *lobound,
4243  double *scaledlobound,
4244  char *hibound,
4245  double *scaledhibound)
4246 {
4247  int rangelo,
4248  rangehi;
4249  char *sptr;
4250 
4251  rangelo = rangehi = (unsigned char) hibound[0];
4252  for (sptr = lobound; *sptr; sptr++)
4253  {
4254  if (rangelo > (unsigned char) *sptr)
4255  rangelo = (unsigned char) *sptr;
4256  if (rangehi < (unsigned char) *sptr)
4257  rangehi = (unsigned char) *sptr;
4258  }
4259  for (sptr = hibound; *sptr; sptr++)
4260  {
4261  if (rangelo > (unsigned char) *sptr)
4262  rangelo = (unsigned char) *sptr;
4263  if (rangehi < (unsigned char) *sptr)
4264  rangehi = (unsigned char) *sptr;
4265  }
4266  /* If range includes any upper-case ASCII chars, make it include all */
4267  if (rangelo <= 'Z' && rangehi >= 'A')
4268  {
4269  if (rangelo > 'A')
4270  rangelo = 'A';
4271  if (rangehi < 'Z')
4272  rangehi = 'Z';
4273  }
4274  /* Ditto lower-case */
4275  if (rangelo <= 'z' && rangehi >= 'a')
4276  {
4277  if (rangelo > 'a')
4278  rangelo = 'a';
4279  if (rangehi < 'z')
4280  rangehi = 'z';
4281  }
4282  /* Ditto digits */
4283  if (rangelo <= '9' && rangehi >= '0')
4284  {
4285  if (rangelo > '0')
4286  rangelo = '0';
4287  if (rangehi < '9')
4288  rangehi = '9';
4289  }
4290 
4291  /*
4292  * If range includes less than 10 chars, assume we have not got enough
4293  * data, and make it include regular ASCII set.
4294  */
4295  if (rangehi - rangelo < 9)
4296  {
4297  rangelo = ' ';
4298  rangehi = 127;
4299  }
4300 
4301  /*
4302  * Now strip any common prefix of the three strings.
4303  */
4304  while (*lobound)
4305  {
4306  if (*lobound != *hibound || *lobound != *value)
4307  break;
4308  lobound++, hibound++, value++;
4309  }
4310 
4311  /*
4312  * Now we can do the conversions.
4313  */
4314  *scaledvalue = convert_one_string_to_scalar(value, rangelo, rangehi);
4315  *scaledlobound = convert_one_string_to_scalar(lobound, rangelo, rangehi);
4316  *scaledhibound = convert_one_string_to_scalar(hibound, rangelo, rangehi);
4317 }
4318 
4319 static double
4320 convert_one_string_to_scalar(char *value, int rangelo, int rangehi)
4321 {
4322  int slen = strlen(value);
4323  double num,
4324  denom,
4325  base;
4326 
4327  if (slen <= 0)
4328  return 0.0; /* empty string has scalar value 0 */
4329 
4330  /*
4331  * There seems little point in considering more than a dozen bytes from
4332  * the string. Since base is at least 10, that will give us nominal
4333  * resolution of at least 12 decimal digits, which is surely far more
4334  * precision than this estimation technique has got anyway (especially in
4335  * non-C locales). Also, even with the maximum possible base of 256, this
4336  * ensures denom cannot grow larger than 256^13 = 2.03e31, which will not
4337  * overflow on any known machine.
4338  */
4339  if (slen > 12)
4340  slen = 12;
4341 
4342  /* Convert initial characters to fraction */
4343  base = rangehi - rangelo + 1;
4344  num = 0.0;
4345  denom = base;
4346  while (slen-- > 0)
4347  {
4348  int ch = (unsigned char) *value++;
4349 
4350  if (ch < rangelo)
4351  ch = rangelo - 1;
4352  else if (ch > rangehi)
4353  ch = rangehi + 1;
4354  num += ((double) (ch - rangelo)) / denom;
4355  denom *= base;
4356  }
4357 
4358  return num;
4359 }
4360 
4361 /*
4362  * Convert a string-type Datum into a palloc'd, null-terminated string.
4363  *
4364  * On failure (e.g., unsupported typid), set *failure to true;
4365  * otherwise, that variable is not changed. (We'll return NULL on failure.)
4366  *
4367  * When using a non-C locale, we must pass the string through strxfrm()
4368  * before continuing, so as to generate correct locale-specific results.
4369  */
4370 static char *
4371 convert_string_datum(Datum value, Oid typid, Oid collid, bool *failure)
4372 {
4373  char *val;
4374 
4375  switch (typid)
4376  {
4377  case CHAROID:
4378  val = (char *) palloc(2);
4379  val[0] = DatumGetChar(value);
4380  val[1] = '\0';
4381  break;
4382  case BPCHAROID:
4383  case VARCHAROID:
4384  case TEXTOID:
4385  val = TextDatumGetCString(value);
4386  break;
4387  case NAMEOID:
4388  {
4389  NameData *nm = (NameData *) DatumGetPointer(value);
4390 
4391  val = pstrdup(NameStr(*nm));
4392  break;
4393  }
4394  default:
4395  *failure = true;
4396  return NULL;
4397  }
4398 
4399  if (!lc_collate_is_c(collid))
4400  {
4401  char *xfrmstr;
4402  size_t xfrmlen;
4403  size_t xfrmlen2 PG_USED_FOR_ASSERTS_ONLY;
4404 
4405  /*
4406  * XXX: We could guess at a suitable output buffer size and only call
4407  * strxfrm twice if our guess is too small.
4408  *
4409  * XXX: strxfrm doesn't support UTF-8 encoding on Win32, it can return
4410  * bogus data or set an error. This is not really a problem unless it
4411  * crashes since it will only give an estimation error and nothing
4412  * fatal.
4413  */
4414  xfrmlen = strxfrm(NULL, val, 0);
4415 #ifdef WIN32
4416 
4417  /*
4418  * On Windows, strxfrm returns INT_MAX when an error occurs. Instead
4419  * of trying to allocate this much memory (and fail), just return the
4420  * original string unmodified as if we were in the C locale.
4421  */
4422  if (xfrmlen == INT_MAX)
4423  return val;
4424 #endif
4425  xfrmstr = (char *) palloc(xfrmlen + 1);
4426  xfrmlen2 = strxfrm(xfrmstr, val, xfrmlen + 1);
4427 
4428  /*
4429  * Some systems (e.g., glibc) can return a smaller value from the
4430  * second call than the first; thus the Assert must be <= not ==.
4431  */
4432  Assert(xfrmlen2 <= xfrmlen);
4433  pfree(val);
4434  val = xfrmstr;
4435  }
4436 
4437  return val;
4438 }
4439 
4440 /*
4441  * Do convert_to_scalar()'s work for any bytea data type.
4442  *
4443  * Very similar to convert_string_to_scalar except we can't assume
4444  * null-termination and therefore pass explicit lengths around.
4445  *
4446  * Also, assumptions about likely "normal" ranges of characters have been
4447  * removed - a data range of 0..255 is always used, for now. (Perhaps
4448  * someday we will add information about actual byte data range to
4449  * pg_statistic.)
4450  */
4451 static void
4453  double *scaledvalue,
4454  Datum lobound,
4455  double *scaledlobound,
4456  Datum hibound,
4457  double *scaledhibound)
4458 {
4459  bytea *valuep = DatumGetByteaPP(value);
4460  bytea *loboundp = DatumGetByteaPP(lobound);
4461  bytea *hiboundp = DatumGetByteaPP(hibound);
4462  int rangelo,
4463  rangehi,
4464  valuelen = VARSIZE_ANY_EXHDR(valuep),
4465  loboundlen = VARSIZE_ANY_EXHDR(loboundp),
4466  hiboundlen = VARSIZE_ANY_EXHDR(hiboundp),
4467  i,
4468  minlen;
4469  unsigned char *valstr = (unsigned char *) VARDATA_ANY(valuep);
4470  unsigned char *lostr = (unsigned char *) VARDATA_ANY(loboundp);
4471  unsigned char *histr = (unsigned char *) VARDATA_ANY(hiboundp);
4472 
4473  /*
4474  * Assume bytea data is uniformly distributed across all byte values.
4475  */
4476  rangelo = 0;
4477  rangehi = 255;
4478 
4479  /*
4480  * Now strip any common prefix of the three strings.
4481  */
4482  minlen = Min(Min(valuelen, loboundlen), hiboundlen);
4483  for (i = 0; i < minlen; i++)
4484  {
4485  if (*lostr != *histr || *lostr != *valstr)
4486  break;
4487  lostr++, histr++, valstr++;
4488  loboundlen--, hiboundlen--, valuelen--;
4489  }
4490 
4491  /*
4492  * Now we can do the conversions.
4493  */
4494  *scaledvalue = convert_one_bytea_to_scalar(valstr, valuelen, rangelo, rangehi);
4495  *scaledlobound = convert_one_bytea_to_scalar(lostr, loboundlen, rangelo, rangehi);
4496  *scaledhibound = convert_one_bytea_to_scalar(histr, hiboundlen, rangelo, rangehi);
4497 }
4498 
4499 static double
4500 convert_one_bytea_to_scalar(unsigned char *value, int valuelen,
4501  int rangelo, int rangehi)
4502 {
4503  double num,
4504  denom,
4505  base;
4506 
4507  if (valuelen <= 0)
4508  return 0.0; /* empty string has scalar value 0 */
4509 
4510  /*
4511  * Since base is 256, need not consider more than about 10 chars (even
4512  * this many seems like overkill)
4513  */
4514  if (valuelen > 10)
4515  valuelen = 10;
4516 
4517  /* Convert initial characters to fraction */
4518  base = rangehi - rangelo + 1;
4519  num = 0.0;
4520  denom = base;
4521  while (valuelen-- > 0)
4522  {
4523  int ch = *value++;
4524 
4525  if (ch < rangelo)
4526  ch = rangelo - 1;
4527  else if (ch > rangehi)
4528  ch = rangehi + 1;
4529  num += ((double) (ch - rangelo)) / denom;
4530  denom *= base;
4531  }
4532 
4533  return num;
4534 }
4535 
4536 /*
4537  * Do convert_to_scalar()'s work for any timevalue data type.
4538  *
4539  * On failure (e.g., unsupported typid), set *failure to true;
4540  * otherwise, that variable is not changed.
4541  */
4542 static double
4544 {
4545  switch (typid)
4546  {
4547  case TIMESTAMPOID:
4548  return DatumGetTimestamp(value);
4549  case TIMESTAMPTZOID:
4550  return DatumGetTimestampTz(value);
4551  case DATEOID:
4553  case INTERVALOID:
4554  {
4556 
4557  /*
4558  * Convert the month part of Interval to days using assumed
4559  * average month length of 365.25/12.0 days. Not too
4560  * accurate, but plenty good enough for our purposes.
4561  */
4562  return interval->time + interval->day * (double) USECS_PER_DAY +
4563  interval->month * ((DAYS_PER_YEAR / (double) MONTHS_PER_YEAR) * USECS_PER_DAY);
4564  }
4565  case TIMEOID:
4566  return DatumGetTimeADT(value);
4567  case TIMETZOID:
4568  {
4569  TimeTzADT *timetz = DatumGetTimeTzADTP(value);
4570 
4571  /* use GMT-equivalent time */
4572  return (double) (timetz->time + (timetz->zone * 1000000.0));
4573  }
4574  }
4575 
4576  *failure = true;
4577  return 0;
4578 }
4579 
4580 
4581 /*
4582  * get_restriction_variable
4583  * Examine the args of a restriction clause to see if it's of the
4584  * form (variable op pseudoconstant) or (pseudoconstant op variable),
4585  * where "variable" could be either a Var or an expression in vars of a
4586  * single relation. If so, extract information about the variable,
4587  * and also indicate which side it was on and the other argument.
4588  *
4589  * Inputs:
4590  * root: the planner info
4591  * args: clause argument list
4592  * varRelid: see specs for restriction selectivity functions
4593  *
4594  * Outputs: (these are valid only if true is returned)
4595  * *vardata: gets information about variable (see examine_variable)
4596  * *other: gets other clause argument, aggressively reduced to a constant
4597  * *varonleft: set true if variable is on the left, false if on the right
4598  *
4599  * Returns true if a variable is identified, otherwise false.
4600  *
4601  * Note: if there are Vars on both sides of the clause, we must fail, because
4602  * callers are expecting that the other side will act like a pseudoconstant.
4603  */
4604 bool
4606  VariableStatData *vardata, Node **other,
4607  bool *varonleft)
4608 {
4609  Node *left,
4610  *right;
4611  VariableStatData rdata;
4612 
4613  /* Fail if not a binary opclause (probably shouldn't happen) */
4614  if (list_length(args) != 2)
4615  return false;
4616 
4617  left = (Node *) linitial(args);
4618  right = (Node *) lsecond(args);
4619 
4620  /*
4621  * Examine both sides. Note that when varRelid is nonzero, Vars of other
4622  * relations will be treated as pseudoconstants.
4623  */
4624  examine_variable(root, left, varRelid, vardata);
4625  examine_variable(root, right, varRelid, &rdata);
4626 
4627  /*
4628  * If one side is a variable and the other not, we win.
4629  */
4630  if (vardata->rel && rdata.rel == NULL)
4631  {
4632  *varonleft = true;
4633  *other = estimate_expression_value(root, rdata.var);
4634  /* Assume we need no ReleaseVariableStats(rdata) here */
4635  return true;
4636  }
4637 
4638  if (vardata->rel == NULL && rdata.rel)
4639  {
4640  *varonleft = false;
4641  *other = estimate_expression_value(root, vardata->var);
4642  /* Assume we need no ReleaseVariableStats(*vardata) here */
4643  *vardata = rdata;
4644  return true;
4645  }
4646 
4647  /* Oops, clause has wrong structure (probably var op var) */
4648  ReleaseVariableStats(*vardata);
4649  ReleaseVariableStats(rdata);
4650 
4651  return false;
4652 }
4653 
4654 /*
4655  * get_join_variables
4656  * Apply examine_variable() to each side of a join clause.
4657  * Also, attempt to identify whether the join clause has the same
4658  * or reversed sense compared to the SpecialJoinInfo.
4659  *
4660  * We consider the join clause "normal" if it is "lhs_var OP rhs_var",
4661  * or "reversed" if it is "rhs_var OP lhs_var". In complicated cases
4662  * where we can't tell for sure, we default to assuming it's normal.
4663  */
4664 void
4666  VariableStatData *vardata1, VariableStatData *vardata2,
4667  bool *join_is_reversed)
4668 {
4669  Node *left,
4670  *right;
4671 
4672  if (list_length(args) != 2)
4673  elog(ERROR, "join operator should take two arguments");
4674 
4675  left = (Node *) linitial(args);
4676  right = (Node *) lsecond(args);
4677 
4678  examine_variable(root, left, 0, vardata1);
4679  examine_variable(root, right, 0, vardata2);
4680 
4681  if (vardata1->rel &&
4682  bms_is_subset(vardata1->rel->relids, sjinfo->syn_righthand))
4683  *join_is_reversed = true; /* var1 is on RHS */
4684  else if (vardata2->rel &&
4685  bms_is_subset(vardata2->rel->relids, sjinfo->syn_lefthand))
4686  *join_is_reversed = true; /* var2 is on LHS */
4687  else
4688  *join_is_reversed = false;
4689 }
4690 
4691 /*
4692  * examine_variable
4693  * Try to look up statistical data about an expression.
4694  * Fill in a VariableStatData struct to describe the expression.
4695  *
4696  * Inputs:
4697  * root: the planner info
4698  * node: the expression tree to examine
4699  * varRelid: see specs for restriction selectivity functions
4700  *
4701  * Outputs: *vardata is filled as follows:
4702  * var: the input expression (with any binary relabeling stripped, if
4703  * it is or contains a variable; but otherwise the type is preserved)
4704  * rel: RelOptInfo for relation containing variable; NULL if expression
4705  * contains no Vars (NOTE this could point to a RelOptInfo of a
4706  * subquery, not one in the current query).
4707  * statsTuple: the pg_statistic entry for the variable, if one exists;
4708  * otherwise NULL.
4709  * freefunc: pointer to a function to release statsTuple with.
4710  * vartype: exposed type of the expression; this should always match
4711  * the declared input type of the operator we are estimating for.
4712  * atttype, atttypmod: actual type/typmod of the "var" expression. This is
4713  * commonly the same as the exposed type of the variable argument,
4714  * but can be different in binary-compatible-type cases.
4715  * isunique: true if we were able to match the var to a unique index or a
4716  * single-column DISTINCT clause, implying its values are unique for
4717  * this query. (Caution: this should be trusted for statistical
4718  * purposes only, since we do not check indimmediate nor verify that
4719  * the exact same definition of equality applies.)
4720  * acl_ok: true if current user has permission to read the column(s)
4721  * underlying the pg_statistic entry. This is consulted by
4722  * statistic_proc_security_check().
4723  *
4724  * Caller is responsible for doing ReleaseVariableStats() before exiting.
4725  */
4726 void
4727 examine_variable(PlannerInfo *root, Node *node, int varRelid,
4728  VariableStatData *vardata)
4729 {
4730  Node *basenode;
4731  Relids varnos;
4732  RelOptInfo *onerel;
4733 
4734  /* Make sure we don't return dangling pointers in vardata */
4735  MemSet(vardata, 0, sizeof(VariableStatData));
4736 
4737  /* Save the exposed type of the expression */
4738  vardata->vartype = exprType(node);
4739 
4740  /* Look inside any binary-compatible relabeling */
4741 
4742  if (IsA(node, RelabelType))
4743  basenode = (Node *) ((RelabelType *) node)->arg;
4744  else
4745  basenode = node;
4746 
4747  /* Fast path for a simple Var */
4748 
4749  if (IsA(basenode, Var) &&
4750  (varRelid == 0 || varRelid == ((Var *) basenode)->varno))
4751  {
4752  Var *var = (Var *) basenode;
4753 
4754  /* Set up result fields other than the stats tuple */
4755  vardata->var = basenode; /* return Var without relabeling */
4756  vardata->rel = find_base_rel(root, var->varno);
4757  vardata->atttype = var->vartype;
4758  vardata->atttypmod = var->vartypmod;
4759  vardata->isunique = has_unique_index(vardata->rel, var->varattno);
4760 
4761  /* Try to locate some stats */
4762  examine_simple_variable(root, var, vardata);
4763 
4764  return;
4765  }
4766 
4767  /*
4768  * Okay, it's a more complicated expression. Determine variable
4769  * membership. Note that when varRelid isn't zero, only vars of that
4770  * relation are considered "real" vars.
4771  */
4772  varnos = pull_varnos(basenode);
4773 
4774  onerel = NULL;
4775 
4776  switch (bms_membership(varnos))
4777  {
4778  case BMS_EMPTY_SET:
4779  /* No Vars at all ... must be pseudo-constant clause */
4780  break;
4781  case BMS_SINGLETON:
4782  if (varRelid == 0 || bms_is_member(varRelid, varnos))
4783  {
4784  onerel = find_base_rel(root,
4785  (varRelid ? varRelid : bms_singleton_member(varnos)));
4786  vardata->rel = onerel;
4787  node = basenode; /* strip any relabeling */
4788  }
4789  /* else treat it as a constant */
4790  break;
4791  case BMS_MULTIPLE:
4792  if (varRelid == 0)
4793  {
4794  /* treat it as a variable of a join relation */
4795  vardata->rel = find_join_rel(root, varnos);
4796  node = basenode; /* strip any relabeling */
4797  }
4798  else if (bms_is_member(varRelid, varnos))
4799  {
4800  /* ignore the vars belonging to other relations */
4801  vardata->rel = find_base_rel(root, varRelid);
4802  node = basenode; /* strip any relabeling */
4803  /* note: no point in expressional-index search here */
4804  }
4805  /* else treat it as a constant */
4806  break;
4807  }
4808 
4809  bms_free(varnos);
4810 
4811  vardata->var = node;
4812  vardata->atttype = exprType(node);
4813  vardata->atttypmod = exprTypmod(node);
4814 
4815  if (onerel)
4816  {
4817  /*
4818  * We have an expression in vars of a single relation. Try to match
4819  * it to expressional index columns, in hopes of finding some
4820  * statistics.
4821  *
4822  * Note that we consider all index columns including INCLUDE columns,
4823  * since there could be stats for such columns. But the test for
4824  * uniqueness needs to be warier.
4825  *
4826  * XXX it's conceivable that there are multiple matches with different
4827  * index opfamilies; if so, we need to pick one that matches the
4828  * operator we are estimating for. FIXME later.
4829  */
4830  ListCell *ilist;
4831 
4832  foreach(ilist, onerel->indexlist)
4833  {
4834  IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
4835  ListCell *indexpr_item;
4836  int pos;
4837 
4838  indexpr_item = list_head(index->indexprs);
4839  if (indexpr_item == NULL)
4840  continue; /* no expressions here... */
4841 
4842  for (pos = 0; pos < index->ncolumns; pos++)
4843  {
4844  if (index->indexkeys[pos] == 0)
4845  {
4846  Node *indexkey;
4847 
4848  if (indexpr_item == NULL)
4849  elog(ERROR, "too few entries in indexprs list");
4850  indexkey = (Node *) lfirst(indexpr_item);
4851  if (indexkey && IsA(indexkey, RelabelType))
4852  indexkey = (Node *) ((RelabelType *) indexkey)->arg;
4853  if (equal(node, indexkey))
4854  {
4855  /*
4856  * Found a match ... is it a unique index? Tests here
4857  * should match has_unique_index().
4858  */
4859  if (index->unique &&
4860  index->nkeycolumns == 1 &&
4861  pos == 0 &&
4862  (index->indpred == NIL || index->predOK))
4863  vardata->isunique = true;
4864 
4865  /*
4866  * Has it got stats? We only consider stats for
4867  * non-partial indexes, since partial indexes probably
4868  * don't reflect whole-relation statistics; the above
4869  * check for uniqueness is the only info we take from
4870  * a partial index.
4871  *
4872  * An index stats hook, however, must make its own
4873  * decisions about what to do with partial indexes.
4874  */
4875  if (get_index_stats_hook &&
4876  (*get_index_stats_hook) (root, index->indexoid,
4877  pos + 1, vardata))
4878  {
4879  /*
4880  * The hook took control of acquiring a stats
4881  * tuple. If it did supply a tuple, it'd better
4882  * have supplied a freefunc.
4883  */
4884  if (HeapTupleIsValid(vardata->statsTuple) &&
4885  !vardata->freefunc)
4886  elog(ERROR, "no function provided to release variable stats with");
4887  }
4888  else if (index->indpred == NIL)
4889  {
4890  vardata->statsTuple =
4892  ObjectIdGetDatum(index->indexoid),
4893  Int16GetDatum(pos + 1),
4894  BoolGetDatum(false));
4895  vardata->freefunc = ReleaseSysCache;
4896 
4897  if (HeapTupleIsValid(vardata->statsTuple))
4898  {
4899  /* Get index's table for permission check */
4900  RangeTblEntry *rte;
4901  Oid userid;
4902 
4903  rte = planner_rt_fetch(index->rel->relid, root);
4904  Assert(rte->rtekind == RTE_RELATION);
4905 
4906  /*
4907  * Use checkAsUser if it's set, in case we're
4908  * accessing the table via a view.
4909  */
4910  userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
4911 
4912  /*
4913  * For simplicity, we insist on the whole
4914  * table being selectable, rather than trying
4915  * to identify which column(s) the index
4916  * depends on. Also require all rows to be
4917  * selectable --- there must be no
4918  * securityQuals from security barrier views
4919  * or RLS policies.
4920  */
4921  vardata->acl_ok =
4922  rte->securityQuals == NIL &&
4923  (pg_class_aclcheck(rte->relid, userid,
4924  ACL_SELECT) == ACLCHECK_OK);
4925 
4926  /*
4927  * If the user doesn't have permissions to
4928  * access an inheritance child relation, check
4929  * the permissions of the table actually
4930  * mentioned in the query, since most likely
4931  * the user does have that permission. Note
4932  * that whole-table select privilege on the
4933  * parent doesn't quite guarantee that the
4934  * user could read all columns of the child.
4935  * But in practice it's unlikely that any
4936  * interesting security violation could result
4937  * from allowing access to the expression
4938  * index's stats, so we allow it anyway. See
4939  * similar code in examine_simple_variable()
4940  * for additional comments.
4941  */
4942  if (!vardata->acl_ok &&
4943  root->append_rel_array != NULL)
4944  {
4945  AppendRelInfo *appinfo;
4946  Index varno = index->rel->relid;
4947 
4948  appinfo = root->append_rel_array[varno];
4949  while (appinfo &&
4950  planner_rt_fetch(appinfo->parent_relid,
4951  root)->rtekind == RTE_RELATION)
4952  {
4953  varno = appinfo->parent_relid;
4954  appinfo = root->append_rel_array[varno];
4955  }
4956  if (varno != index->rel->relid)
4957  {
4958  /* Repeat access check on this rel */
4959  rte = planner_rt_fetch(varno, root);
4960  Assert(rte->rtekind == RTE_RELATION);
4961 
4962  userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
4963 
4964  vardata->acl_ok =
4965  rte->securityQuals == NIL &&
4966  (pg_class_aclcheck(rte->relid,
4967  userid,
4968  ACL_SELECT) == ACLCHECK_OK);
4969  }
4970  }
4971  }
4972  else
4973  {
4974  /* suppress leakproofness checks later */
4975  vardata->acl_ok = true;
4976  }
4977  }
4978  if (vardata->statsTuple)
4979  break;
4980  }
4981  indexpr_item = lnext(index->indexprs, indexpr_item);
4982  }
4983  }
4984  if (vardata->statsTuple)
4985  break;
4986  }
4987  }
4988 }
4989 
4990 /*
4991  * examine_simple_variable
4992  * Handle a simple Var for examine_variable
4993  *
4994  * This is split out as a subroutine so that we can recurse to deal with
4995  * Vars referencing subqueries.
4996  *
4997  * We already filled in all the fields of *vardata except for the stats tuple.
4998  */
4999 static void
5001  VariableStatData *vardata)
5002 {
5003  RangeTblEntry *rte = root->simple_rte_array[var->varno];
5004 
5005  Assert(IsA(rte, RangeTblEntry));
5006 
5008  (*get_relation_stats_hook) (root, rte, var->varattno, vardata))
5009  {
5010  /*
5011  * The hook took control of acquiring a stats tuple. If it did supply
5012  * a tuple, it'd better have supplied a freefunc.
5013  */
5014  if (HeapTupleIsValid(vardata->statsTuple) &&
5015  !vardata->freefunc)
5016  elog(ERROR, "no function provided to release variable stats with");
5017  }
5018  else if (rte->rtekind == RTE_RELATION)
5019  {
5020  /*
5021  * Plain table or parent of an inheritance appendrel, so look up the
5022  * column in pg_statistic
5023  */
5025  ObjectIdGetDatum(rte->relid),
5026  Int16GetDatum(var->varattno),
5027  BoolGetDatum(rte->inh));
5028  vardata->freefunc = ReleaseSysCache;
5029 
5030  if (HeapTupleIsValid(vardata->statsTuple))
5031  {
5032  Oid userid;
5033 
5034  /*
5035  * Check if user has permission to read this column. We require
5036  * all rows to be accessible, so there must be no securityQuals
5037  * from security barrier views or RLS policies. Use checkAsUser
5038  * if it's set, in case we're accessing the table via a view.
5039  */
5040  userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
5041 
5042  vardata->acl_ok =
5043  rte->securityQuals == NIL &&
5044  ((pg_class_aclcheck(rte->relid, userid,
5045  ACL_SELECT) == ACLCHECK_OK) ||
5046  (pg_attribute_aclcheck(rte->relid, var->varattno, userid,
5047  ACL_SELECT) == ACLCHECK_OK));
5048 
5049  /*
5050  * If the user doesn't have permissions to access an inheritance
5051  * child relation or specifically this attribute, check the
5052  * permissions of the table/column actually mentioned in the
5053  * query, since most likely the user does have that permission
5054  * (else the query will fail at runtime), and if the user can read
5055  * the column there then he can get the values of the child table
5056  * too. To do that, we must find out which of the root parent's
5057  * attributes the child relation's attribute corresponds to.
5058  */
5059  if (!vardata->acl_ok && var->varattno > 0 &&
5060  root->append_rel_array != NULL)
5061  {
5062  AppendRelInfo *appinfo;
5063  Index varno = var->varno;
5064  int varattno = var->varattno;
5065  bool found = false;
5066 
5067  appinfo = root->append_rel_array[varno];
5068 
5069  /*
5070  * Partitions are mapped to their immediate parent, not the
5071  * root parent, so must be ready to walk up multiple
5072  * AppendRelInfos. But stop if we hit a parent that is not
5073  * RTE_RELATION --- that's a flattened UNION ALL subquery, not
5074  * an inheritance parent.
5075  */
5076  while (appinfo &&
5077  planner_rt_fetch(appinfo->parent_relid,
5078  root)->rtekind == RTE_RELATION)
5079  {
5080  int parent_varattno;
5081 
5082  found = false;
5083  if (varattno <= 0 || varattno > appinfo->num_child_cols)
5084  break; /* safety check */
5085  parent_varattno = appinfo->parent_colnos[varattno - 1];
5086  if (parent_varattno == 0)
5087  break; /* Var is local to child */
5088 
5089  varno = appinfo->parent_relid;
5090  varattno = parent_varattno;
5091  found = true;
5092 
5093  /* If the parent is itself a child, continue up. */
5094  appinfo = root->append_rel_array[varno];
5095  }
5096 
5097  /*
5098  * In rare cases, the Var may be local to the child table, in
5099  * which case, we've got to live with having no access to this
5100  * column's stats.
5101  */
5102  if (!found)
5103  return;
5104 
5105  /* Repeat the access check on this parent rel & column */
5106  rte = planner_rt_fetch(varno, root);
5107  Assert(rte->rtekind == RTE_RELATION);
5108 
5109  userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
5110 
5111  vardata->acl_ok =
5112  rte->securityQuals == NIL &&
5113  ((pg_class_aclcheck(rte->relid, userid,
5114  ACL_SELECT) == ACLCHECK_OK) ||
5115  (pg_attribute_aclcheck(rte->relid, varattno, userid,
5116  ACL_SELECT) == ACLCHECK_OK));
5117  }
5118  }
5119  else
5120  {
5121  /* suppress any possible leakproofness checks later */
5122  vardata->acl_ok = true;
5123  }
5124  }
5125  else if (rte->rtekind == RTE_SUBQUERY && !rte->inh)
5126  {
5127  /*
5128  * Plain subquery (not one that was converted to an appendrel).
5129  */
5130  Query *subquery = rte->subquery;
5131  RelOptInfo *rel;
5132  TargetEntry *ste;
5133 
5134  /*
5135  * Punt if it's a whole-row var rather than a plain column reference.
5136  */
5137  if (var->varattno == InvalidAttrNumber)
5138  return;
5139 
5140  /*
5141  * Punt if subquery uses set operations or GROUP BY, as these will
5142  * mash underlying columns' stats beyond recognition. (Set ops are
5143  * particularly nasty; if we forged ahead, we would return stats
5144  * relevant to only the leftmost subselect...) DISTINCT is also
5145  * problematic, but we check that later because there is a possibility
5146  * of learning something even with it.
5147  */
5148  if (subquery->setOperations ||
5149  subquery->groupClause)
5150  return;
5151 
5152  /*
5153  * OK, fetch RelOptInfo for subquery. Note that we don't change the
5154  * rel returned in vardata, since caller expects it to be a rel of the
5155  * caller's query level. Because we might already be recursing, we
5156  * can't use that rel pointer either, but have to look up the Var's
5157  * rel afresh.
5158  */
5159  rel = find_base_rel(root, var->varno);
5160 
5161  /* If the subquery hasn't been planned yet, we have to punt */
5162  if (rel->subroot == NULL)
5163  return;
5164  Assert(IsA(rel->subroot, PlannerInfo));
5165 
5166  /*
5167  * Switch our attention to the subquery as mangled by the planner. It
5168  * was okay to look at the pre-planning version for the tests above,
5169  * but now we need a Var that will refer to the subroot's live
5170  * RelOptInfos. For instance, if any subquery pullup happened during
5171  * planning, Vars in the targetlist might have gotten replaced, and we
5172  * need to see the replacement expressions.
5173  */
5174  subquery = rel->subroot->parse;
5175  Assert(IsA(subquery, Query));
5176 
5177  /* Get the subquery output expression referenced by the upper Var */
5178  ste = get_tle_by_resno(subquery->targetList, var->varattno);
5179  if (ste == NULL || ste->resjunk)
5180  elog(ERROR, "subquery %s does not have attribute %d",
5181  rte->eref->aliasname, var->varattno);
5182  var = (Var *) ste->expr;
5183 
5184  /*
5185  * If subquery uses DISTINCT, we can't make use of any stats for the
5186  * variable ... but, if it's the only DISTINCT column, we are entitled
5187  * to consider it unique. We do the test this way so that it works
5188  * for cases involving DISTINCT ON.
5189  */
5190  if (subquery->distinctClause)
5191  {
5192  if (list_length(subquery->distinctClause) == 1 &&
5193  targetIsInSortList(ste, InvalidOid, subquery->distinctClause))
5194  vardata->isunique = true;
5195  /* cannot go further */
5196  return;
5197  }
5198 
5199  /*
5200  * If the sub-query originated from a view with the security_barrier
5201  * attribute, we must not look at the variable's statistics, though it
5202  * seems all right to notice the existence of a DISTINCT clause. So
5203  * stop here.
5204  *
5205  * This is probably a harsher restriction than necessary; it's
5206  * certainly OK for the selectivity estimator (which is a C function,
5207  * and therefore omnipotent anyway) to look at the statistics. But
5208  * many selectivity estimators will happily *invoke the operator
5209  * function* to try to work out a good estimate - and that's not OK.
5210  * So for now, don't dig down for stats.
5211  */
5212  if (rte->security_barrier)
5213  return;
5214 
5215  /* Can only handle a simple Var of subquery's query level */
5216  if (var && IsA(var, Var) &&
5217  var->varlevelsup == 0)
5218  {
5219  /*
5220  * OK, recurse into the subquery. Note that the original setting
5221  * of vardata->isunique (which will surely be false) is left
5222  * unchanged in this situation. That's what we want, since even
5223  * if the underlying column is unique, the subquery may have
5224  * joined to other tables in a way that creates duplicates.
5225  */
5226  examine_simple_variable(rel->subroot, var, vardata);
5227  }
5228  }
5229  else
5230  {
5231  /*
5232  * Otherwise, the Var comes from a FUNCTION, VALUES, or CTE RTE. (We
5233  * won't see RTE_JOIN here because join alias Vars have already been
5234  * flattened.) There's not much we can do with function outputs, but
5235  * maybe someday try to be smarter about VALUES and/or CTEs.
5236  */
5237  }
5238 }
5239 
5240 /*
5241  * Check whether it is permitted to call func_oid passing some of the
5242  * pg_statistic data in vardata. We allow this either if the user has SELECT
5243  * privileges on the table or column underlying the pg_statistic data or if
5244  * the function is marked leak-proof.
5245  */
5246 bool
5248 {
5249  if (vardata->acl_ok)
5250  return true;
5251 
5252  if (!OidIsValid(func_oid))
5253  return false;
5254 
5255  if (get_func_leakproof(func_oid))
5256  return true;
5257 
5258  ereport(DEBUG2,
5259  (errmsg_internal("not using statistics because function \"%s\" is not leak-proof",
5260  get_func_name(func_oid))));
5261  return false;
5262 }
5263 
5264 /*
5265  * get_variable_numdistinct
5266  * Estimate the number of distinct values of a variable.
5267  *
5268  * vardata: results of examine_variable
5269  * *isdefault: set to true if the result is a default rather than based on
5270  * anything meaningful.
5271  *
5272  * NB: be careful to produce a positive integral result, since callers may
5273  * compare the result to exact integer counts, or might divide by it.
5274  */
5275 double
5276 get_variable_numdistinct(VariableStatData *vardata, bool *isdefault)
5277 {
5278  double stadistinct;
5279  double stanullfrac = 0.0;
5280  double ntuples;
5281 
5282  *isdefault = false;
5283 
5284  /*
5285  * Determine the stadistinct value to use. There are cases where we can
5286  * get an estimate even without a pg_statistic entry, or can get a better
5287  * value than is in pg_statistic. Grab stanullfrac too if we can find it
5288  * (otherwise, assume no nulls, for lack of any better idea).
5289  */
5290  if (HeapTupleIsValid(vardata->statsTuple))
5291  {
5292  /* Use the pg_statistic entry */
5293  Form_pg_statistic stats;
5294 
5295  stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
5296  stadistinct = stats->stadistinct;
5297  stanullfrac = stats->stanullfrac;
5298  }
5299  else if (vardata->vartype == BOOLOID)
5300  {
5301  /*
5302  * Special-case boolean columns: presumably, two distinct values.
5303  *
5304  * Are there any other datatypes we should wire in special estimates
5305  * for?
5306  */
5307  stadistinct = 2.0;
5308  }
5309  else if (vardata->rel && vardata->rel->rtekind == RTE_VALUES)
5310  {
5311  /*
5312  * If the Var represents a column of a VALUES RTE, assume it's unique.
5313  * This could of course be very wrong, but it should tend to be true
5314  * in well-written queries. We could consider examining the VALUES'
5315  * contents to get some real statistics; but that only works if the
5316  * entries are all constants, and it would be pretty expensive anyway.
5317  */
5318  stadistinct = -1.0; /* unique (and all non null) */
5319  }
5320  else
5321  {
5322  /*
5323  * We don't keep statistics for system columns, but in some cases we
5324  * can infer distinctness anyway.
5325  */
5326  if (vardata->var && IsA(vardata->var, Var))
5327  {
5328  switch (((Var *) vardata->var)->varattno)
5329  {
5331  stadistinct = -1.0; /* unique (and all non null) */
5332  break;
5334  stadistinct = 1.0; /* only 1 value */
5335  break;
5336  default:
5337  stadistinct = 0.0; /* means "unknown" */
5338  break;
5339  }
5340  }
5341  else
5342  stadistinct = 0.0; /* means "unknown" */
5343 
5344  /*
5345  * XXX consider using estimate_num_groups on expressions?
5346  */
5347  }
5348 
5349  /*
5350  * If there is a unique index or DISTINCT clause for the variable, assume
5351  * it is unique no matter what pg_statistic says; the statistics could be
5352  * out of date, or we might have found a partial unique index that proves
5353  * the var is unique for this query. However, we'd better still believe
5354  * the null-fraction statistic.
5355  */
5356  if (vardata->isunique)
5357  stadistinct = -1.0 * (1.0 - stanullfrac);
5358 
5359  /*
5360  * If we had an absolute estimate, use that.
5361  */
5362  if (stadistinct > 0.0)
5363  return clamp_row_est(stadistinct);
5364 
5365  /*
5366  * Otherwise we need to get the relation size; punt if not available.
5367  */
5368  if (vardata->rel == NULL)
5369  {
5370  *isdefault = true;
5371  return DEFAULT_NUM_DISTINCT;
5372  }
5373  ntuples = vardata->rel->tuples;
5374  if (ntuples <= 0.0)
5375  {
5376  *isdefault = true;
5377  return DEFAULT_NUM_DISTINCT;
5378  }
5379 
5380  /*
5381  * If we had a relative estimate, use that.
5382  */
5383  if (stadistinct < 0.0)
5384  return clamp_row_est(-stadistinct * ntuples);
5385 
5386  /*
5387  * With no data, estimate ndistinct = ntuples if the table is small, else
5388  * use default. We use DEFAULT_NUM_DISTINCT as the cutoff for "small" so
5389  * that the behavior isn't discontinuous.
5390  */
5391  if (ntuples < DEFAULT_NUM_DISTINCT)
5392  return clamp_row_est(ntuples);
5393 
5394  *isdefault = true;
5395  return DEFAULT_NUM_DISTINCT;
5396 }
5397 
5398 /*
5399  * get_variable_range
5400  * Estimate the minimum and maximum value of the specified variable.
5401  * If successful, store values in *min and *max, and return true.
5402  * If no data available, return false.
5403  *
5404  * sortop is the "<" comparison operator to use. This should generally
5405  * be "<" not ">", as only the former is likely to be found in pg_statistic.
5406  * The collation must be specified too.
5407  */
5408 static bool
5410  Oid sortop, Oid collation,
5411  Datum *min, Datum *max)
5412 {
5413  Datum tmin = 0;
5414  Datum tmax = 0;
5415  bool have_data = false;
5416  int16 typLen;
5417  bool typByVal;
5418  Oid opfuncoid;
5419  FmgrInfo opproc;
5420  AttStatsSlot sslot;
5421 
5422  /*
5423  * XXX It's very tempting to try to use the actual column min and max, if
5424  * we can get them relatively-cheaply with an index probe. However, since
5425  * this function is called many times during join planning, that could
5426  * have unpleasant effects on planning speed. Need more investigation
5427  * before enabling this.
5428  */
5429 #ifdef NOT_USED
5430  if (get_actual_variable_range(root, vardata, sortop, collation, min, max))
5431  return true;
5432 #endif
5433 
5434  if (!HeapTupleIsValid(vardata->statsTuple))
5435  {
5436  /* no stats available, so default result */
5437  return false;
5438  }
5439 
5440  /*
5441  * If we can't apply the sortop to the stats data, just fail. In
5442  * principle, if there's a histogram and no MCVs, we could return the
5443  * histogram endpoints without ever applying the sortop ... but it's
5444  * probably not worth trying, because whatever the caller wants to do with
5445  * the endpoints would likely fail the security check too.
5446  */
5447  if (!statistic_proc_security_check(vardata,
5448  (opfuncoid = get_opcode(sortop))))
5449  return false;
5450 
5451  opproc.fn_oid = InvalidOid; /* mark this as not looked up yet */
5452 
5453  get_typlenbyval(vardata->atttype, &typLen, &typByVal);
5454 
5455  /*
5456  * If there is a histogram with the ordering we want, grab the first and
5457  * last values.
5458  */
5459  if (get_attstatsslot(&sslot, vardata->statsTuple,
5460  STATISTIC_KIND_HISTOGRAM, sortop,
5462  {
5463  if (sslot.stacoll == collation && sslot.nvalues > 0)
5464  {
5465  tmin = datumCopy(sslot.values[0], typByVal, typLen);
5466  tmax = datumCopy(sslot.values[sslot.nvalues - 1], typByVal, typLen);
5467  have_data = true;
5468  }
5469  free_attstatsslot(&sslot);
5470  }
5471 
5472  /*
5473  * Otherwise, if there is a histogram with some other ordering, scan it
5474  * and get the min and max values according to the ordering we want. This
5475  * of course may not find values that are really extremal according to our
5476  * ordering, but it beats ignoring available data.
5477  */
5478  if (!have_data &&
5479  get_attstatsslot(&sslot, vardata->statsTuple,
5480  STATISTIC_KIND_HISTOGRAM, InvalidOid,
5482  {
5483  get_stats_slot_range(&sslot, opfuncoid, &opproc,
5484  collation, typLen, typByVal,
5485  &tmin, &tmax, &have_data);
5486  free_attstatsslot(&sslot);
5487  }
5488 
5489  /*
5490  * If we have most-common-values info, look for extreme MCVs. This is
5491  * needed even if we also have a histogram, since the histogram excludes
5492  * the MCVs.
5493  */
5494  if (get_attstatsslot(&sslot, vardata->statsTuple,
5495  STATISTIC_KIND_MCV, InvalidOid,
5497  {
5498  get_stats_slot_range(&sslot, opfuncoid, &opproc,
5499  collation, typLen, typByVal,
5500  &tmin, &tmax, &have_data);
5501  free_attstatsslot(&sslot);
5502  }
5503 
5504  *min = tmin;
5505  *max = tmax;
5506  return have_data;
5507 }
5508 
5509 /*
5510  * get_stats_slot_range: scan sslot for min/max values
5511  *
5512  * Subroutine for get_variable_range: update min/max/have_data according
5513  * to what we find in the statistics array.
5514  */
5515 static void
5516 get_stats_slot_range(AttStatsSlot *sslot, Oid opfuncoid, FmgrInfo *opproc,
5517  Oid collation, int16 typLen, bool typByVal,
5518  Datum *min, Datum *max, bool *p_have_data)
5519 {
5520  Datum tmin = *min;
5521  Datum tmax = *max;
5522  bool have_data = *p_have_data;
5523  bool found_tmin = false;
5524  bool found_tmax = false;
5525 
5526  /* Look up the comparison function, if we didn't already do so */
5527  if (opproc->fn_oid != opfuncoid)
5528  fmgr_info(opfuncoid, opproc);
5529 
5530  /* Scan all the slot's values */
5531  for (int i = 0; i < sslot->nvalues; i++)
5532  {
5533  if (!have_data)
5534  {
5535  tmin = tmax = sslot->values[i];
5536  found_tmin = found_tmax = true;
5537  *p_have_data = have_data = true;
5538  continue;
5539  }
5540  if (DatumGetBool(FunctionCall2Coll(opproc,
5541  collation,
5542  sslot->values[i], tmin)))
5543  {
5544  tmin = sslot->values[i];
5545  found_tmin = true;
5546  }
5547  if (DatumGetBool(FunctionCall2Coll(opproc,
5548  collation,
5549  tmax, sslot->values[i])))
5550  {
5551  tmax = sslot->values[i];
5552  found_tmax = true;
5553  }
5554  }
5555 
5556  /*
5557  * Copy the slot's values, if we found new extreme values.
5558  */
5559  if (found_tmin)
5560  *min = datumCopy(tmin, typByVal, typLen);
5561  if (found_tmax)
5562  *max = datumCopy(tmax, typByVal, typLen);
5563 }
5564 
5565 
5566 /*
5567  * get_actual_variable_range
5568  * Attempt to identify the current *actual* minimum and/or maximum
5569  * of the specified variable, by looking for a suitable btree index
5570  * and fetching its low and/or high values.
5571  * If successful, store values in *min and *max, and return true.
5572  * (Either pointer can be NULL if that endpoint isn't needed.)
5573  * If no data available, return false.
5574  *
5575  * sortop is the "<" comparison operator to use.
5576  * collation is the required collation.
5577  */
5578 static bool
5580  Oid sortop, Oid collation,
5581  Datum *min, Datum *max)
5582 {
5583  bool have_data = false;
5584  RelOptInfo *rel = vardata->rel;
5585  RangeTblEntry *rte;
5586  ListCell *lc;
5587 
5588  /* No hope if no relation or it doesn't have indexes */
5589  if (rel == NULL || rel->indexlist == NIL)
5590  return false;
5591  /* If it has indexes it must be a plain relation */
5592  rte = root->simple_rte_array[rel->relid];
5593  Assert(rte->rtekind == RTE_RELATION);
5594 
5595  /* Search through the indexes to see if any match our problem */
5596  foreach(lc, rel->indexlist)
5597  {
5599  ScanDirection indexscandir;
5600 
5601  /* Ignore non-btree indexes */
5602  if (index->relam != BTREE_AM_OID)
5603  continue;
5604 
5605  /*
5606  * Ignore partial indexes --- we only want stats that cover the entire
5607  * relation.
5608  */
5609  if (index->indpred != NIL)
5610  continue;
5611 
5612  /*
5613  * The index list might include hypothetical indexes inserted by a
5614  * get_relation_info hook --- don't try to access them.
5615  */
5616  if (index->hypothetical)
5617  continue;
5618 
5619  /*
5620  * The first index column must match the desired variable, sortop, and
5621  * collation --- but we can use a descending-order index.
5622  */
5623  if (collation != index->indexcollations[0])
5624  continue; /* test first 'cause it's cheapest */
5625  if (!match_index_to_operand(vardata->var, 0, index))
5626  continue;
5627  switch (get_op_opfamily_strategy(sortop, index->sortopfamily[0]))
5628  {
5629  case BTLessStrategyNumber:
5630  if (index->reverse_sort[0])
5631  indexscandir = BackwardScanDirection;
5632  else
5633  indexscandir = ForwardScanDirection;
5634  break;
5636  if (index->reverse_sort[0])
5637  indexscandir = ForwardScanDirection;
5638  else
5639  indexscandir = BackwardScanDirection;
5640  break;
5641  default:
5642  /* index doesn't match the sortop */
5643  continue;
5644  }
5645 
5646  /*
5647  * Found a suitable index to extract data from. Set up some data that
5648  * can be used by both invocations of get_actual_variable_endpoint.
5649  */
5650  {
5651  MemoryContext tmpcontext;
5652  MemoryContext oldcontext;
5653  Relation heapRel;
5654  Relation indexRel;
5655  TupleTableSlot *slot;
5656  int16 typLen;
5657  bool typByVal;
5658  ScanKeyData scankeys[1];
5659 
5660  /* Make sure any cruft gets recycled when we're done */
5662  "get_actual_variable_range workspace",
5664  oldcontext = MemoryContextSwitchTo(tmpcontext);
5665 
5666  /*
5667  * Open the table and index so we can read from them. We should
5668  * already have some type of lock on each.
5669  */
5670  heapRel = table_open(rte->relid, NoLock);
5671  indexRel = index_open(index->indexoid, NoLock);
5672 
5673  /* build some stuff needed for indexscan execution */
5674  slot = table_slot_create(heapRel, NULL);
5675  get_typlenbyval(vardata->atttype, &typLen, &typByVal);
5676 
5677  /* set up an IS NOT NULL scan key so that we ignore nulls */
5678  ScanKeyEntryInitialize(&scankeys[0],
5680  1, /* index col to scan */
5681  InvalidStrategy, /* no strategy */
5682  InvalidOid, /* no strategy subtype */
5683  InvalidOid, /* no collation */
5684  InvalidOid, /* no reg proc for this */
5685  (Datum) 0); /* constant */
5686 
5687  /* If min is requested ... */
5688  if (min)
5689  {
5690  have_data = get_actual_variable_endpoint(heapRel,
5691  indexRel,
5692  indexscandir,
5693  scankeys,
5694  typLen,
5695  typByVal,
5696  slot,
5697  oldcontext,
5698  min);
5699  }
5700  else
5701  {
5702  /* If min not requested, assume index is nonempty */
5703  have_data = true;
5704  }
5705 
5706  /* If max is requested, and we didn't find the index is empty */
5707  if (max && have_data)
5708  {
5709  /* scan in the opposite direction; all else is the same */
5710  have_data = get_actual_variable_endpoint(heapRel,
5711  indexRel,
5712  -indexscandir,
5713  scankeys,
5714  typLen,
5715  typByVal,
5716  slot,
5717  oldcontext,
5718  max);
5719  }
5720 
5721  /* Clean everything up */
5723 
5724  index_close(indexRel, NoLock);
5725  table_close(heapRel, NoLock);
5726 
5727  MemoryContextSwitchTo(oldcontext);
5728  MemoryContextDelete(tmpcontext);
5729 
5730  /* And we're done */
5731  break;
5732  }
5733  }
5734 
5735  return have_data;
5736 }
5737 
5738 /*
5739  * Get one endpoint datum (min or max depending on indexscandir) from the
5740  * specified index. Return true if successful, false if index is empty.
5741  * On success, endpoint value is stored to *endpointDatum (and copied into
5742  * outercontext).
5743  *
5744  * scankeys is a 1-element scankey array set up to reject nulls.
5745  * typLen/typByVal describe the datatype of the index's first column.
5746  * tableslot is a slot suitable to hold table tuples, in case we need
5747  * to probe the heap.
5748  * (We could compute these values locally, but that would mean computing them
5749  * twice when get_actual_variable_range needs both the min and the max.)
5750  */
5751 static bool
5753  Relation indexRel,
5754  ScanDirection indexscandir,
5755  ScanKey scankeys,
5756  int16 typLen,
5757  bool typByVal,
5758  TupleTableSlot *tableslot,
5759  MemoryContext outercontext,
5760  Datum *endpointDatum)
5761 {
5762  bool have_data = false;
5763  SnapshotData SnapshotNonVacuumable;
5764  IndexScanDesc index_scan;
5765  Buffer vmbuffer = InvalidBuffer;
5766  ItemPointer tid;
5768  bool isnull[INDEX_MAX_KEYS];
5769  MemoryContext oldcontext;
5770 
5771  /*
5772  * We use the index-only-scan machinery for this. With mostly-static
5773  * tables that's a win because it avoids a heap visit. It's also a win
5774  * for dynamic data, but the reason is less obvious; read on for details.
5775  *
5776  * In principle, we should scan the index with our current active
5777  * snapshot, which is the best approximation we've got to what the query
5778  * will see when executed. But that won't be exact if a new snap is taken
5779  * before running the query, and it can be very expensive if a lot of
5780  * recently-dead or uncommitted rows exist at the beginning or end of the
5781  * index (because we'll laboriously fetch each one and reject it).
5782  * Instead, we use SnapshotNonVacuumable. That will accept recently-dead
5783  * and uncommitted rows as well as normal visible rows. On the other
5784  * hand, it will reject known-dead rows, and thus not give a bogus answer
5785  * when the extreme value has been deleted (unless the deletion was quite
5786  * recent); that case motivates not using SnapshotAny here.
5787  *
5788  * A crucial point here is that SnapshotNonVacuumable, with
5789  * GlobalVisTestFor(heapRel) as horizon, yields the inverse of the
5790  * condition that the indexscan will use to decide that index entries are
5791  * killable (see heap_hot_search_buffer()). Therefore, if the snapshot
5792  * rejects a tuple (or more precisely, all tuples of a HOT chain) and we
5793  * have to continue scanning past it, we know that the indexscan will mark
5794  * that index entry killed. That means that the next
5795  * get_actual_variable_endpoint() call will not have to re-consider that
5796  * index entry. In this way we avoid repetitive work when this function
5797  * is used a lot during planning.
5798  *
5799  * But using SnapshotNonVacuumable creates a hazard of its own. In a
5800  * recently-created index, some index entries may point at "broken" HOT
5801  * chains in which not all the tuple versions contain data matching the
5802  * index entry. The live tuple version(s) certainly do match the index,
5803  * but SnapshotNonVacuumable can accept recently-dead tuple versions that
5804  * don't match. Hence, if we took data from the selected heap tuple, we
5805  * might get a bogus answer that's not close to the index extremal value,
5806  * or could even be NULL. We avoid this hazard because we take the data
5807  * from the index entry not the heap.
5808  */
5809  InitNonVacuumableSnapshot(SnapshotNonVacuumable,
5810  GlobalVisTestFor(heapRel));
5811 
5812  index_scan = index_beginscan(heapRel, indexRel,
5813  &SnapshotNonVacuumable,
5814  1, 0);
5815  /* Set it up for index-only scan */
5816  index_scan->xs_want_itup = true;
5817  index_rescan(index_scan, scankeys, 1, NULL, 0);
5818 
5819  /* Fetch first/next tuple in specified direction */
5820  while ((tid = index_getnext_tid(index_scan, indexscandir)) != NULL)
5821  {
5822  if (!VM_ALL_VISIBLE(heapRel,
5824  &vmbuffer))
5825  {
5826  /* Rats, we have to visit the heap to check visibility */
5827  if (!index_fetch_heap(index_scan, tableslot))
5828  continue; /* no visible tuple, try next index entry */
5829 
5830  /* We don't actually need the heap tuple for anything */
5831  ExecClearTuple(tableslot);
5832 
5833  /*
5834  * We don't care whether there's more than one visible tuple in
5835  * the HOT chain; if any are visible, that's good enough.
5836  */
5837  }
5838 
5839  /*
5840  * We expect that btree will return data in IndexTuple not HeapTuple
5841  * format. It's not lossy either.
5842  */
5843  if (!index_scan->xs_itup)
5844  elog(ERROR, "no data returned for index-only scan");
5845  if (index_scan->xs_recheck)
5846  elog(ERROR, "unexpected recheck indication from btree");
5847 
5848  /* OK to deconstruct the index tuple */
5849  index_deform_tuple(index_scan->xs_itup,
5850  index_scan->xs_itupdesc,
5851  values, isnull);
5852 
5853  /* Shouldn't have got a null, but be careful */
5854  if (isnull[0])
5855  elog(ERROR, "found unexpected null value in index \"%s\"",
5856  RelationGetRelationName(indexRel));
5857 
5858  /* Copy the index column value out to caller's context */
5859  oldcontext = MemoryContextSwitchTo(outercontext);
5860  *endpointDatum = datumCopy(values[0], typByVal, typLen);
5861  MemoryContextSwitchTo(oldcontext);
5862  have_data = true;
5863  break;
5864  }
5865 
5866  if (vmbuffer != InvalidBuffer)
5867  ReleaseBuffer(vmbuffer);
5868  index_endscan(index_scan);
5869 
5870  return have_data;
5871 }
5872 
5873 /*
5874  * find_join_input_rel
5875  * Look up the input relation for a join.
5876  *
5877  * We assume that the input relation's RelOptInfo must have been constructed
5878  * already.
5879  */
5880 static RelOptInfo *
5882 {
5883  RelOptInfo *rel = NULL;
5884 
5885  switch (bms_membership(relids))
5886  {
5887  case BMS_EMPTY_SET:
5888  /* should not happen */
5889  break;
5890  case BMS_SINGLETON:
5891  rel = find_base_rel(root, bms_singleton_member(relids));
5892  break;
5893  case BMS_MULTIPLE:
5894  rel = find_join_rel(root, relids);
5895  break;
5896  }
5897 
5898  if (rel == NULL)
5899  elog(ERROR, "could not find RelOptInfo for given relids");
5900 
5901  return rel;
5902 }
5903 
5904 
5905 /*-------------------------------------------------------------------------
5906  *
5907  * Index cost estimation functions
5908  *
5909  *-------------------------------------------------------------------------
5910  */
5911 
5912 /*
5913  * Extract the actual indexquals (as RestrictInfos) from an IndexClause list
5914  */
5915 List *
5917 {
5918  List *result = NIL;
5919  ListCell *lc;
5920 
5921  foreach(lc, indexclauses)
5922  {
5923  IndexClause *iclause = lfirst_node(IndexClause, lc);
5924  ListCell *lc2;
5925 
5926  foreach(lc2, iclause->indexquals)
5927  {
5928  RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc2);
5929 
5930  result = lappend(result, rinfo);
5931  }
5932  }
5933  return result;
5934 }
5935 
5936 /*
5937  * Compute the total evaluation cost of the comparison operands in a list
5938  * of index qual expressions. Since we know these will be evaluated just
5939  * once per scan, there's no need to distinguish startup from per-row cost.
5940  *
5941  * This can be used either on the result of get_quals_from_indexclauses(),
5942  * or directly on an indexorderbys list. In both cases, we expect that the
5943  * index key expression is on the left side of binary clauses.
5944  */
5945 Cost
5947 {
5948  Cost qual_arg_cost = 0;
5949  ListCell *lc;
5950 
5951  foreach(lc, indexquals)
5952  {
5953  Expr *clause = (Expr *) lfirst(lc);
5954  Node *other_operand;
5955  QualCost index_qual_cost;
5956 
5957  /*
5958  * Index quals will have RestrictInfos, indexorderbys won't. Look
5959  * through RestrictInfo if present.
5960  */
5961  if (IsA(clause, RestrictInfo))
5962  clause = ((RestrictInfo *) clause)->clause;
5963 
5964  if (IsA(clause, OpExpr))
5965  {
5966  OpExpr *op = (OpExpr *) clause;
5967 
5968  other_operand = (Node *) lsecond(op->args);
5969  }
5970  else if (IsA(clause, RowCompareExpr))
5971  {
5972  RowCompareExpr *rc = (RowCompareExpr *) clause;
5973 
5974  other_operand = (Node *) rc->rargs;
5975  }
5976  else if (IsA(clause, ScalarArrayOpExpr))
5977  {
5978  ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
5979 
5980  other_operand = (Node *) lsecond(saop->args);
5981  }
5982  else if (IsA(clause, NullTest))
5983  {
5984  other_operand = NULL;
5985  }
5986  else
5987  {
5988  elog(ERROR, "unsupported indexqual type: %d",
5989  (int) nodeTag(clause));
5990  other_operand = NULL; /* keep compiler quiet */
5991  }
5992 
5993  cost_qual_eval_node(&index_qual_cost, other_operand, root);
5994  qual_arg_cost += index_qual_cost.startup + index_qual_cost.per_tuple;
5995  }
5996  return qual_arg_cost;
5997 }
5998 
5999 void
6001  IndexPath *path,
6002  double loop_count,
6003  GenericCosts *costs)
6004 {
6005  IndexOptInfo *index = path->indexinfo;
6006  List *indexQuals = get_quals_from_indexclauses(path->indexclauses);
6007  List *indexOrderBys = path->indexorderbys;
6008  Cost indexStartupCost;
6009  Cost indexTotalCost;
6010  Selectivity indexSelectivity;
6011  double indexCorrelation;
6012  double numIndexPages;
6013  double numIndexTuples;
6014  double spc_random_page_cost;
6015  double num_sa_scans;
6016  double num_outer_scans;
6017  double num_scans;
6018  double qual_op_cost;
6019  double qual_arg_cost;
6020  List *selectivityQuals;
6021  ListCell *l;
6022 
6023  /*
6024  * If the index is partial, AND the index predicate with the explicitly
6025  * given indexquals to produce a more accurate idea of the index
6026  * selectivity.
6027  */
6028  selectivityQuals = add_predicate_to_index_quals(index, indexQuals);
6029 
6030  /*
6031  * Check for ScalarArrayOpExpr index quals, and estimate the number of
6032  * index scans that will be performed.
6033  */
6034  num_sa_scans = 1;
6035  foreach(l, indexQuals)
6036  {
6037  RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
6038 
6039  if (IsA(rinfo->clause, ScalarArrayOpExpr))
6040  {
6041  ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) rinfo->clause;
6042  int alength = estimate_array_length(lsecond(saop->args));
6043 
6044  if (alength > 1)
6045  num_sa_scans *= alength;
6046  }
6047  }
6048 
6049  /* Estimate the fraction of main-table tuples that will be visited */
6050  indexSelectivity = clauselist_selectivity(root, selectivityQuals,
6051  index->rel->relid,
6052  JOIN_INNER,
6053  NULL);
6054 
6055  /*
6056  * If caller didn't give us an estimate, estimate the number of index
6057  * tuples that will be visited. We do it in this rather peculiar-looking
6058  * way in order to get the right answer for partial indexes.
6059  */
6060  numIndexTuples = costs->numIndexTuples;
6061  if (numIndexTuples <= 0.0)
6062  {
6063  numIndexTuples = indexSelectivity * index->rel->tuples;
6064 
6065  /*
6066  * The above calculation counts all the tuples visited across all
6067  * scans induced by ScalarArrayOpExpr nodes. We want to consider the
6068  * average per-indexscan number, so adjust. This is a handy place to
6069  * round to integer, too. (If caller supplied tuple estimate, it's
6070  * responsible for handling these considerations.)
6071  */
6072  numIndexTuples = rint(numIndexTuples / num_sa_scans);
6073  }
6074 
6075  /*
6076  * We can bound the number of tuples by the index size in any case. Also,
6077  * always estimate at least one tuple is touched, even when
6078  * indexSelectivity estimate is tiny.
6079  */
6080  if (numIndexTuples > index->tuples)
6081  numIndexTuples = index->tuples;
6082  if (numIndexTuples < 1.0)
6083  numIndexTuples = 1.0;
6084 
6085  /*
6086  * Estimate the number of index pages that will be retrieved.
6087  *
6088  * We use the simplistic method of taking a pro-rata fraction of the total
6089  * number of index pages. In effect, this counts only leaf pages and not
6090  * any overhead such as index metapage or upper tree levels.
6091  *
6092  * In practice access to upper index levels is often nearly free because
6093  * those tend to stay in cache under load; moreover, the cost involved is
6094  * highly dependent on index type. We therefore ignore such costs here
6095  * and leave it to the caller to add a suitable charge if needed.
6096  */
6097  if (index->pages > 1 && index->tuples > 1)
6098  numIndexPages = ceil(numIndexTuples * index->pages / index->tuples);
6099  else
6100  numIndexPages = 1.0;
6101 
6102  /* fetch estimated page cost for tablespace containing index */
6104  &spc_random_page_cost,
6105  NULL);
6106 
6107  /*
6108  * Now compute the disk access costs.
6109  *
6110  * The above calculations are all per-index-scan. However, if we are in a
6111  * nestloop inner scan, we can expect the scan to be repeated (with
6112  * different search keys) for each row of the outer relation. Likewise,
6113  * ScalarArrayOpExpr quals result in multiple index scans. This creates
6114  * the potential for cache effects to reduce the number of disk page
6115  * fetches needed. We want to estimate the average per-scan I/O cost in
6116  * the presence of caching.
6117  *
6118  * We use the Mackert-Lohman formula (see costsize.c for details) to
6119  * estimate the total number of page fetches that occur. While this
6120  * wasn't what it was designed for, it seems a reasonable model anyway.
6121  * Note that we are counting pages not tuples anymore, so we take N = T =
6122  * index size, as if there were one "tuple" per page.
6123  */
6124  num_outer_scans = loop_count;
6125  num_scans = num_sa_scans * num_outer_scans;
6126 
6127  if (num_scans > 1)
6128  {
6129  double pages_fetched;
6130 
6131  /* total page fetches ignoring cache effects */
6132  pages_fetched = numIndexPages * num_scans;
6133 
6134  /* use Mackert and Lohman formula to adjust for cache effects */
6135  pages_fetched = index_pages_fetched(pages_fetched,
6136  index->pages,
6137  (double) index->pages,
6138  root);
6139 
6140  /*
6141  * Now compute the total disk access cost, and then report a pro-rated
6142  * share for each outer scan. (Don't pro-rate for ScalarArrayOpExpr,
6143  * since that's internal to the indexscan.)
6144  */
6145  indexTotalCost = (pages_fetched * spc_random_page_cost)
6146  / num_outer_scans;
6147  }
6148  else
6149  {
6150  /*
6151  * For a single index scan, we just charge spc_random_page_cost per
6152  * page touched.
6153  */
6154  indexTotalCost = numIndexPages * spc_random_page_cost;
6155  }
6156 
6157  /*
6158  * CPU cost: any complex expressions in the indexquals will need to be
6159  * evaluated once at the start of the scan to reduce them to runtime keys
6160  * to pass to the index AM (see nodeIndexscan.c). We model the per-tuple
6161  * CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per
6162  * indexqual operator. Because we have numIndexTuples as a per-scan
6163  * number, we have to multiply by num_sa_scans to get the correct result
6164  * for ScalarArrayOpExpr cases. Similarly add in costs for any index
6165  * ORDER BY expressions.
6166  *
6167  * Note: this neglects the possible costs of rechecking lossy operators.
6168  * Detecting that that might be needed seems more expensive than it's
6169  * worth, though, considering all the other inaccuracies here ...
6170  */
6171  qual_arg_cost = index_other_operands_eval_cost(root, indexQuals) +
6172  index_other_operands_eval_cost(root, indexOrderBys);
6173  qual_op_cost = cpu_operator_cost *
6174  (list_length(indexQuals) + list_length(indexOrderBys));
6175 
6176  indexStartupCost = qual_arg_cost;
6177  indexTotalCost += qual_arg_cost;
6178  indexTotalCost += numIndexTuples * num_sa_scans * (cpu_index_tuple_cost + qual_op_cost);
6179 
6180  /*
6181  * Generic assumption about index correlation: there isn't any.
6182  */
6183  indexCorrelation = 0.0;
6184 
6185  /*
6186  * Return everything to caller.
6187  */
6188  costs->indexStartupCost = indexStartupCost;
6189  costs->indexTotalCost = indexTotalCost;
6190  costs->indexSelectivity = indexSelectivity;
6191  costs->