PostgreSQL Source Code  git master
selfuncs.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * selfuncs.c
4  * Selectivity functions and index cost estimation functions for
5  * standard operators and index access methods.
6  *
7  * Selectivity routines are registered in the pg_operator catalog
8  * in the "oprrest" and "oprjoin" attributes.
9  *
10  * Index cost functions are located via the index AM's API struct,
11  * which is obtained from the handler function registered in pg_am.
12  *
13  * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
14  * Portions Copyright (c) 1994, Regents of the University of California
15  *
16  *
17  * IDENTIFICATION
18  * src/backend/utils/adt/selfuncs.c
19  *
20  *-------------------------------------------------------------------------
21  */
22 
23 /*----------
24  * Operator selectivity estimation functions are called to estimate the
25  * selectivity of WHERE clauses whose top-level operator is their operator.
26  * We divide the problem into two cases:
27  * Restriction clause estimation: the clause involves vars of just
28  * one relation.
29  * Join clause estimation: the clause involves vars of multiple rels.
30  * Join selectivity estimation is far more difficult and usually less accurate
31  * than restriction estimation.
32  *
33  * When dealing with the inner scan of a nestloop join, we consider the
34  * join's joinclauses as restriction clauses for the inner relation, and
35  * treat vars of the outer relation as parameters (a/k/a constants of unknown
36  * values). So, restriction estimators need to be able to accept an argument
37  * telling which relation is to be treated as the variable.
38  *
39  * The call convention for a restriction estimator (oprrest function) is
40  *
41  * Selectivity oprrest (PlannerInfo *root,
42  * Oid operator,
43  * List *args,
44  * int varRelid);
45  *
46  * root: general information about the query (rtable and RelOptInfo lists
47  * are particularly important for the estimator).
48  * operator: OID of the specific operator in question.
49  * args: argument list from the operator clause.
50  * varRelid: if not zero, the relid (rtable index) of the relation to
51  * be treated as the variable relation. May be zero if the args list
52  * is known to contain vars of only one relation.
53  *
54  * This is represented at the SQL level (in pg_proc) as
55  *
56  * float8 oprrest (internal, oid, internal, int4);
57  *
58  * The result is a selectivity, that is, a fraction (0 to 1) of the rows
59  * of the relation that are expected to produce a TRUE result for the
60  * given operator.
61  *
62  * The call convention for a join estimator (oprjoin function) is similar
63  * except that varRelid is not needed, and instead join information is
64  * supplied:
65  *
66  * Selectivity oprjoin (PlannerInfo *root,
67  * Oid operator,
68  * List *args,
69  * JoinType jointype,
70  * SpecialJoinInfo *sjinfo);
71  *
72  * float8 oprjoin (internal, oid, internal, int2, internal);
73  *
74  * (Before Postgres 8.4, join estimators had only the first four of these
75  * parameters. That signature is still allowed, but deprecated.) The
76  * relationship between jointype and sjinfo is explained in the comments for
77  * clause_selectivity() --- the short version is that jointype is usually
78  * best ignored in favor of examining sjinfo.
79  *
80  * Join selectivity for regular inner and outer joins is defined as the
81  * fraction (0 to 1) of the cross product of the relations that is expected
82  * to produce a TRUE result for the given operator. For both semi and anti
83  * joins, however, the selectivity is defined as the fraction of the left-hand
84  * side relation's rows that are expected to have a match (ie, at least one
85  * row with a TRUE result) in the right-hand side.
86  *
87  * For both oprrest and oprjoin functions, the operator's input collation OID
88  * (if any) is passed using the standard fmgr mechanism, so that the estimator
89  * function can fetch it with PG_GET_COLLATION(). Note, however, that all
90  * statistics in pg_statistic are currently built using the relevant column's
91  * collation.
92  *----------
93  */
94 
95 #include "postgres.h"
96 
97 #include <ctype.h>
98 #include <math.h>
99 
100 #include "access/brin.h"
101 #include "access/brin_page.h"
102 #include "access/gin.h"
103 #include "access/table.h"
104 #include "access/tableam.h"
105 #include "access/visibilitymap.h"
106 #include "catalog/pg_am.h"
107 #include "catalog/pg_collation.h"
108 #include "catalog/pg_operator.h"
109 #include "catalog/pg_statistic.h"
111 #include "executor/nodeAgg.h"
112 #include "miscadmin.h"
113 #include "nodes/makefuncs.h"
114 #include "nodes/nodeFuncs.h"
115 #include "optimizer/clauses.h"
116 #include "optimizer/cost.h"
117 #include "optimizer/optimizer.h"
118 #include "optimizer/pathnode.h"
119 #include "optimizer/paths.h"
120 #include "optimizer/plancat.h"
121 #include "parser/parse_clause.h"
122 #include "parser/parsetree.h"
123 #include "statistics/statistics.h"
124 #include "storage/bufmgr.h"
125 #include "utils/acl.h"
126 #include "utils/builtins.h"
127 #include "utils/date.h"
128 #include "utils/datum.h"
129 #include "utils/fmgroids.h"
130 #include "utils/index_selfuncs.h"
131 #include "utils/lsyscache.h"
132 #include "utils/memutils.h"
133 #include "utils/pg_locale.h"
134 #include "utils/rel.h"
135 #include "utils/selfuncs.h"
136 #include "utils/snapmgr.h"
137 #include "utils/spccache.h"
138 #include "utils/syscache.h"
139 #include "utils/timestamp.h"
140 #include "utils/typcache.h"
141 
142 
143 /* Hooks for plugins to get control when we ask for stats */
146 
147 static double eqsel_internal(PG_FUNCTION_ARGS, bool negate);
148 static double eqjoinsel_inner(Oid opfuncoid, Oid collation,
149  VariableStatData *vardata1, VariableStatData *vardata2,
150  double nd1, double nd2,
151  bool isdefault1, bool isdefault2,
152  AttStatsSlot *sslot1, AttStatsSlot *sslot2,
153  Form_pg_statistic stats1, Form_pg_statistic stats2,
154  bool have_mcvs1, bool have_mcvs2);
155 static double eqjoinsel_semi(Oid opfuncoid, Oid collation,
156  VariableStatData *vardata1, VariableStatData *vardata2,
157  double nd1, double nd2,
158  bool isdefault1, bool isdefault2,
159  AttStatsSlot *sslot1, AttStatsSlot *sslot2,
160  Form_pg_statistic stats1, Form_pg_statistic stats2,
161  bool have_mcvs1, bool have_mcvs2,
162  RelOptInfo *inner_rel);
164  RelOptInfo *rel, List **varinfos, double *ndistinct);
165 static bool convert_to_scalar(Datum value, Oid valuetypid, Oid collid,
166  double *scaledvalue,
167  Datum lobound, Datum hibound, Oid boundstypid,
168  double *scaledlobound, double *scaledhibound);
169 static double convert_numeric_to_scalar(Datum value, Oid typid, bool *failure);
170 static void convert_string_to_scalar(char *value,
171  double *scaledvalue,
172  char *lobound,
173  double *scaledlobound,
174  char *hibound,
175  double *scaledhibound);
177  double *scaledvalue,
178  Datum lobound,
179  double *scaledlobound,
180  Datum hibound,
181  double *scaledhibound);
182 static double convert_one_string_to_scalar(char *value,
183  int rangelo, int rangehi);
184 static double convert_one_bytea_to_scalar(unsigned char *value, int valuelen,
185  int rangelo, int rangehi);
186 static char *convert_string_datum(Datum value, Oid typid, Oid collid,
187  bool *failure);
188 static double convert_timevalue_to_scalar(Datum value, Oid typid,
189  bool *failure);
190 static void examine_simple_variable(PlannerInfo *root, Var *var,
191  VariableStatData *vardata);
192 static bool get_variable_range(PlannerInfo *root, VariableStatData *vardata,
193  Oid sortop, Oid collation,
194  Datum *min, Datum *max);
195 static void get_stats_slot_range(AttStatsSlot *sslot,
196  Oid opfuncoid, FmgrInfo *opproc,
197  Oid collation, int16 typLen, bool typByVal,
198  Datum *min, Datum *max, bool *p_have_data);
199 static bool get_actual_variable_range(PlannerInfo *root,
200  VariableStatData *vardata,
201  Oid sortop, Oid collation,
202  Datum *min, Datum *max);
203 static bool get_actual_variable_endpoint(Relation heapRel,
204  Relation indexRel,
205  ScanDirection indexscandir,
206  ScanKey scankeys,
207  int16 typLen,
208  bool typByVal,
209  TupleTableSlot *tableslot,
210  MemoryContext outercontext,
211  Datum *endpointDatum);
212 static RelOptInfo *find_join_input_rel(PlannerInfo *root, Relids relids);
213 
214 
215 /*
216  * eqsel - Selectivity of "=" for any data types.
217  *
218  * Note: this routine is also used to estimate selectivity for some
219  * operators that are not "=" but have comparable selectivity behavior,
220  * such as "~=" (geometric approximate-match). Even for "=", we must
221  * keep in mind that the left and right datatypes may differ.
222  */
223 Datum
225 {
226  PG_RETURN_FLOAT8((float8) eqsel_internal(fcinfo, false));
227 }
228 
229 /*
230  * Common code for eqsel() and neqsel()
231  */
232 static double
234 {
236  Oid operator = PG_GETARG_OID(1);
237  List *args = (List *) PG_GETARG_POINTER(2);
238  int varRelid = PG_GETARG_INT32(3);
239  Oid collation = PG_GET_COLLATION();
240  VariableStatData vardata;
241  Node *other;
242  bool varonleft;
243  double selec;
244 
245  /*
246  * When asked about <>, we do the estimation using the corresponding =
247  * operator, then convert to <> via "1.0 - eq_selectivity - nullfrac".
248  */
249  if (negate)
250  {
251  operator = get_negator(operator);
252  if (!OidIsValid(operator))
253  {
254  /* Use default selectivity (should we raise an error instead?) */
255  return 1.0 - DEFAULT_EQ_SEL;
256  }
257  }
258 
259  /*
260  * If expression is not variable = something or something = variable, then
261  * punt and return a default estimate.
262  */
263  if (!get_restriction_variable(root, args, varRelid,
264  &vardata, &other, &varonleft))
265  return negate ? (1.0 - DEFAULT_EQ_SEL) : DEFAULT_EQ_SEL;
266 
267  /*
268  * We can do a lot better if the something is a constant. (Note: the
269  * Const might result from estimation rather than being a simple constant
270  * in the query.)
271  */
272  if (IsA(other, Const))
273  selec = var_eq_const(&vardata, operator, collation,
274  ((Const *) other)->constvalue,
275  ((Const *) other)->constisnull,
276  varonleft, negate);
277  else
278  selec = var_eq_non_const(&vardata, operator, collation, other,
279  varonleft, negate);
280 
281  ReleaseVariableStats(vardata);
282 
283  return selec;
284 }
285 
286 /*
287  * var_eq_const --- eqsel for var = const case
288  *
289  * This is exported so that some other estimation functions can use it.
290  */
291 double
292 var_eq_const(VariableStatData *vardata, Oid operator, Oid collation,
293  Datum constval, bool constisnull,
294  bool varonleft, bool negate)
295 {
296  double selec;
297  double nullfrac = 0.0;
298  bool isdefault;
299  Oid opfuncoid;
300 
301  /*
302  * If the constant is NULL, assume operator is strict and return zero, ie,
303  * operator will never return TRUE. (It's zero even for a negator op.)
304  */
305  if (constisnull)
306  return 0.0;
307 
308  /*
309  * Grab the nullfrac for use below. Note we allow use of nullfrac
310  * regardless of security check.
311  */
312  if (HeapTupleIsValid(vardata->statsTuple))
313  {
314  Form_pg_statistic stats;
315 
316  stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
317  nullfrac = stats->stanullfrac;
318  }
319 
320  /*
321  * If we matched the var to a unique index or DISTINCT clause, assume
322  * there is exactly one match regardless of anything else. (This is
323  * slightly bogus, since the index or clause's equality operator might be
324  * different from ours, but it's much more likely to be right than
325  * ignoring the information.)
326  */
327  if (vardata->isunique && vardata->rel && vardata->rel->tuples >= 1.0)
328  {
329  selec = 1.0 / vardata->rel->tuples;
330  }
331  else if (HeapTupleIsValid(vardata->statsTuple) &&
333  (opfuncoid = get_opcode(operator))))
334  {
335  AttStatsSlot sslot;
336  bool match = false;
337  int i;
338 
339  /*
340  * Is the constant "=" to any of the column's most common values?
341  * (Although the given operator may not really be "=", we will assume
342  * that seeing whether it returns TRUE is an appropriate test. If you
343  * don't like this, maybe you shouldn't be using eqsel for your
344  * operator...)
345  */
346  if (get_attstatsslot(&sslot, vardata->statsTuple,
347  STATISTIC_KIND_MCV, InvalidOid,
349  {
350  LOCAL_FCINFO(fcinfo, 2);
351  FmgrInfo eqproc;
352 
353  fmgr_info(opfuncoid, &eqproc);
354 
355  /*
356  * Save a few cycles by setting up the fcinfo struct just once.
357  * Using FunctionCallInvoke directly also avoids failure if the
358  * eqproc returns NULL, though really equality functions should
359  * never do that.
360  */
361  InitFunctionCallInfoData(*fcinfo, &eqproc, 2, collation,
362  NULL, NULL);
363  fcinfo->args[0].isnull = false;
364  fcinfo->args[1].isnull = false;
365  /* be careful to apply operator right way 'round */
366  if (varonleft)
367  fcinfo->args[1].value = constval;
368  else
369  fcinfo->args[0].value = constval;
370 
371  for (i = 0; i < sslot.nvalues; i++)
372  {
373  Datum fresult;
374 
375  if (varonleft)
376  fcinfo->args[0].value = sslot.values[i];
377  else
378  fcinfo->args[1].value = sslot.values[i];
379  fcinfo->isnull = false;
380  fresult = FunctionCallInvoke(fcinfo);
381  if (!fcinfo->isnull && DatumGetBool(fresult))
382  {
383  match = true;
384  break;
385  }
386  }
387  }
388  else
389  {
390  /* no most-common-value info available */
391  i = 0; /* keep compiler quiet */
392  }
393 
394  if (match)
395  {
396  /*
397  * Constant is "=" to this common value. We know selectivity
398  * exactly (or as exactly as ANALYZE could calculate it, anyway).
399  */
400  selec = sslot.numbers[i];
401  }
402  else
403  {
404  /*
405  * Comparison is against a constant that is neither NULL nor any
406  * of the common values. Its selectivity cannot be more than
407  * this:
408  */
409  double sumcommon = 0.0;
410  double otherdistinct;
411 
412  for (i = 0; i < sslot.nnumbers; i++)
413  sumcommon += sslot.numbers[i];
414  selec = 1.0 - sumcommon - nullfrac;
415  CLAMP_PROBABILITY(selec);
416 
417  /*
418  * and in fact it's probably a good deal less. We approximate that
419  * all the not-common values share this remaining fraction
420  * equally, so we divide by the number of other distinct values.
421  */
422  otherdistinct = get_variable_numdistinct(vardata, &isdefault) -
423  sslot.nnumbers;
424  if (otherdistinct > 1)
425  selec /= otherdistinct;
426 
427  /*
428  * Another cross-check: selectivity shouldn't be estimated as more
429  * than the least common "most common value".
430  */
431  if (sslot.nnumbers > 0 && selec > sslot.numbers[sslot.nnumbers - 1])
432  selec = sslot.numbers[sslot.nnumbers - 1];
433  }
434 
435  free_attstatsslot(&sslot);
436  }
437  else
438  {
439  /*
440  * No ANALYZE stats available, so make a guess using estimated number
441  * of distinct values and assuming they are equally common. (The guess
442  * is unlikely to be very good, but we do know a few special cases.)
443  */
444  selec = 1.0 / get_variable_numdistinct(vardata, &isdefault);
445  }
446 
447  /* now adjust if we wanted <> rather than = */
448  if (negate)
449  selec = 1.0 - selec - nullfrac;
450 
451  /* result should be in range, but make sure... */
452  CLAMP_PROBABILITY(selec);
453 
454  return selec;
455 }
456 
457 /*
458  * var_eq_non_const --- eqsel for var = something-other-than-const case
459  *
460  * This is exported so that some other estimation functions can use it.
461  */
462 double
463 var_eq_non_const(VariableStatData *vardata, Oid operator, Oid collation,
464  Node *other,
465  bool varonleft, bool negate)
466 {
467  double selec;
468  double nullfrac = 0.0;
469  bool isdefault;
470 
471  /*
472  * Grab the nullfrac for use below.
473  */
474  if (HeapTupleIsValid(vardata->statsTuple))
475  {
476  Form_pg_statistic stats;
477 
478  stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
479  nullfrac = stats->stanullfrac;
480  }
481 
482  /*
483  * If we matched the var to a unique index or DISTINCT clause, assume
484  * there is exactly one match regardless of anything else. (This is
485  * slightly bogus, since the index or clause's equality operator might be
486  * different from ours, but it's much more likely to be right than
487  * ignoring the information.)
488  */
489  if (vardata->isunique && vardata->rel && vardata->rel->tuples >= 1.0)
490  {
491  selec = 1.0 / vardata->rel->tuples;
492  }
493  else if (HeapTupleIsValid(vardata->statsTuple))
494  {
495  double ndistinct;
496  AttStatsSlot sslot;
497 
498  /*
499  * Search is for a value that we do not know a priori, but we will
500  * assume it is not NULL. Estimate the selectivity as non-null
501  * fraction divided by number of distinct values, so that we get a
502  * result averaged over all possible values whether common or
503  * uncommon. (Essentially, we are assuming that the not-yet-known
504  * comparison value is equally likely to be any of the possible
505  * values, regardless of their frequency in the table. Is that a good
506  * idea?)
507  */
508  selec = 1.0 - nullfrac;
509  ndistinct = get_variable_numdistinct(vardata, &isdefault);
510  if (ndistinct > 1)
511  selec /= ndistinct;
512 
513  /*
514  * Cross-check: selectivity should never be estimated as more than the
515  * most common value's.
516  */
517  if (get_attstatsslot(&sslot, vardata->statsTuple,
518  STATISTIC_KIND_MCV, InvalidOid,
520  {
521  if (sslot.nnumbers > 0 && selec > sslot.numbers[0])
522  selec = sslot.numbers[0];
523  free_attstatsslot(&sslot);
524  }
525  }
526  else
527  {
528  /*
529  * No ANALYZE stats available, so make a guess using estimated number
530  * of distinct values and assuming they are equally common. (The guess
531  * is unlikely to be very good, but we do know a few special cases.)
532  */
533  selec = 1.0 / get_variable_numdistinct(vardata, &isdefault);
534  }
535 
536  /* now adjust if we wanted <> rather than = */
537  if (negate)
538  selec = 1.0 - selec - nullfrac;
539 
540  /* result should be in range, but make sure... */
541  CLAMP_PROBABILITY(selec);
542 
543  return selec;
544 }
545 
546 /*
547  * neqsel - Selectivity of "!=" for any data types.
548  *
549  * This routine is also used for some operators that are not "!="
550  * but have comparable selectivity behavior. See above comments
551  * for eqsel().
552  */
553 Datum
555 {
556  PG_RETURN_FLOAT8((float8) eqsel_internal(fcinfo, true));
557 }
558 
559 /*
560  * scalarineqsel - Selectivity of "<", "<=", ">", ">=" for scalars.
561  *
562  * This is the guts of scalarltsel/scalarlesel/scalargtsel/scalargesel.
563  * The isgt and iseq flags distinguish which of the four cases apply.
564  *
565  * The caller has commuted the clause, if necessary, so that we can treat
566  * the variable as being on the left. The caller must also make sure that
567  * the other side of the clause is a non-null Const, and dissect that into
568  * a value and datatype. (This definition simplifies some callers that
569  * want to estimate against a computed value instead of a Const node.)
570  *
571  * This routine works for any datatype (or pair of datatypes) known to
572  * convert_to_scalar(). If it is applied to some other datatype,
573  * it will return an approximate estimate based on assuming that the constant
574  * value falls in the middle of the bin identified by binary search.
575  */
576 static double
577 scalarineqsel(PlannerInfo *root, Oid operator, bool isgt, bool iseq,
578  Oid collation,
579  VariableStatData *vardata, Datum constval, Oid consttype)
580 {
581  Form_pg_statistic stats;
582  FmgrInfo opproc;
583  double mcv_selec,
584  hist_selec,
585  sumcommon;
586  double selec;
587 
588  if (!HeapTupleIsValid(vardata->statsTuple))
589  {
590  /*
591  * No stats are available. Typically this means we have to fall back
592  * on the default estimate; but if the variable is CTID then we can
593  * make an estimate based on comparing the constant to the table size.
594  */
595  if (vardata->var && IsA(vardata->var, Var) &&
596  ((Var *) vardata->var)->varattno == SelfItemPointerAttributeNumber)
597  {
598  ItemPointer itemptr;
599  double block;
600  double density;
601 
602  /*
603  * If the relation's empty, we're going to include all of it.
604  * (This is mostly to avoid divide-by-zero below.)
605  */
606  if (vardata->rel->pages == 0)
607  return 1.0;
608 
609  itemptr = (ItemPointer) DatumGetPointer(constval);
610  block = ItemPointerGetBlockNumberNoCheck(itemptr);
611 
612  /*
613  * Determine the average number of tuples per page (density).
614  *
615  * Since the last page will, on average, be only half full, we can
616  * estimate it to have half as many tuples as earlier pages. So
617  * give it half the weight of a regular page.
618  */
619  density = vardata->rel->tuples / (vardata->rel->pages - 0.5);
620 
621  /* If target is the last page, use half the density. */
622  if (block >= vardata->rel->pages - 1)
623  density *= 0.5;
624 
625  /*
626  * Using the average tuples per page, calculate how far into the
627  * page the itemptr is likely to be and adjust block accordingly,
628  * by adding that fraction of a whole block (but never more than a
629  * whole block, no matter how high the itemptr's offset is). Here
630  * we are ignoring the possibility of dead-tuple line pointers,
631  * which is fairly bogus, but we lack the info to do better.
632  */
633  if (density > 0.0)
634  {
636 
637  block += Min(offset / density, 1.0);
638  }
639 
640  /*
641  * Convert relative block number to selectivity. Again, the last
642  * page has only half weight.
643  */
644  selec = block / (vardata->rel->pages - 0.5);
645 
646  /*
647  * The calculation so far gave us a selectivity for the "<=" case.
648  * We'll have one fewer tuple for "<" and one additional tuple for
649  * ">=", the latter of which we'll reverse the selectivity for
650  * below, so we can simply subtract one tuple for both cases. The
651  * cases that need this adjustment can be identified by iseq being
652  * equal to isgt.
653  */
654  if (iseq == isgt && vardata->rel->tuples >= 1.0)
655  selec -= (1.0 / vardata->rel->tuples);
656 
657  /* Finally, reverse the selectivity for the ">", ">=" cases. */
658  if (isgt)
659  selec = 1.0 - selec;
660 
661  CLAMP_PROBABILITY(selec);
662  return selec;
663  }
664 
665  /* no stats available, so default result */
666  return DEFAULT_INEQ_SEL;
667  }
668  stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
669 
670  fmgr_info(get_opcode(operator), &opproc);
671 
672  /*
673  * If we have most-common-values info, add up the fractions of the MCV
674  * entries that satisfy MCV OP CONST. These fractions contribute directly
675  * to the result selectivity. Also add up the total fraction represented
676  * by MCV entries.
677  */
678  mcv_selec = mcv_selectivity(vardata, &opproc, collation, constval, true,
679  &sumcommon);
680 
681  /*
682  * If there is a histogram, determine which bin the constant falls in, and
683  * compute the resulting contribution to selectivity.
684  */
685  hist_selec = ineq_histogram_selectivity(root, vardata,
686  operator, &opproc, isgt, iseq,
687  collation,
688  constval, consttype);
689 
690  /*
691  * Now merge the results from the MCV and histogram calculations,
692  * realizing that the histogram covers only the non-null values that are
693  * not listed in MCV.
694  */
695  selec = 1.0 - stats->stanullfrac - sumcommon;
696 
697  if (hist_selec >= 0.0)
698  selec *= hist_selec;
699  else
700  {
701  /*
702  * If no histogram but there are values not accounted for by MCV,
703  * arbitrarily assume half of them will match.
704  */
705  selec *= 0.5;
706  }
707 
708  selec += mcv_selec;
709 
710  /* result should be in range, but make sure... */
711  CLAMP_PROBABILITY(selec);
712 
713  return selec;
714 }
715 
716 /*
717  * mcv_selectivity - Examine the MCV list for selectivity estimates
718  *
719  * Determine the fraction of the variable's MCV population that satisfies
720  * the predicate (VAR OP CONST), or (CONST OP VAR) if !varonleft. Also
721  * compute the fraction of the total column population represented by the MCV
722  * list. This code will work for any boolean-returning predicate operator.
723  *
724  * The function result is the MCV selectivity, and the fraction of the
725  * total population is returned into *sumcommonp. Zeroes are returned
726  * if there is no MCV list.
727  */
728 double
729 mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc, Oid collation,
730  Datum constval, bool varonleft,
731  double *sumcommonp)
732 {
733  double mcv_selec,
734  sumcommon;
735  AttStatsSlot sslot;
736  int i;
737 
738  mcv_selec = 0.0;
739  sumcommon = 0.0;
740 
741  if (HeapTupleIsValid(vardata->statsTuple) &&
742  statistic_proc_security_check(vardata, opproc->fn_oid) &&
743  get_attstatsslot(&sslot, vardata->statsTuple,
744  STATISTIC_KIND_MCV, InvalidOid,
746  {
747  LOCAL_FCINFO(fcinfo, 2);
748 
749  /*
750  * We invoke the opproc "by hand" so that we won't fail on NULL
751  * results. Such cases won't arise for normal comparison functions,
752  * but generic_restriction_selectivity could perhaps be used with
753  * operators that can return NULL. A small side benefit is to not
754  * need to re-initialize the fcinfo struct from scratch each time.
755  */
756  InitFunctionCallInfoData(*fcinfo, opproc, 2, collation,
757  NULL, NULL);
758  fcinfo->args[0].isnull = false;
759  fcinfo->args[1].isnull = false;
760  /* be careful to apply operator right way 'round */
761  if (varonleft)
762  fcinfo->args[1].value = constval;
763  else
764  fcinfo->args[0].value = constval;
765 
766  for (i = 0; i < sslot.nvalues; i++)
767  {
768  Datum fresult;
769 
770  if (varonleft)
771  fcinfo->args[0].value = sslot.values[i];
772  else
773  fcinfo->args[1].value = sslot.values[i];
774  fcinfo->isnull = false;
775  fresult = FunctionCallInvoke(fcinfo);
776  if (!fcinfo->isnull && DatumGetBool(fresult))
777  mcv_selec += sslot.numbers[i];
778  sumcommon += sslot.numbers[i];
779  }
780  free_attstatsslot(&sslot);
781  }
782 
783  *sumcommonp = sumcommon;
784  return mcv_selec;
785 }
786 
787 /*
788  * histogram_selectivity - Examine the histogram for selectivity estimates
789  *
790  * Determine the fraction of the variable's histogram entries that satisfy
791  * the predicate (VAR OP CONST), or (CONST OP VAR) if !varonleft.
792  *
793  * This code will work for any boolean-returning predicate operator, whether
794  * or not it has anything to do with the histogram sort operator. We are
795  * essentially using the histogram just as a representative sample. However,
796  * small histograms are unlikely to be all that representative, so the caller
797  * should be prepared to fall back on some other estimation approach when the
798  * histogram is missing or very small. It may also be prudent to combine this
799  * approach with another one when the histogram is small.
800  *
801  * If the actual histogram size is not at least min_hist_size, we won't bother
802  * to do the calculation at all. Also, if the n_skip parameter is > 0, we
803  * ignore the first and last n_skip histogram elements, on the grounds that
804  * they are outliers and hence not very representative. Typical values for
805  * these parameters are 10 and 1.
806  *
807  * The function result is the selectivity, or -1 if there is no histogram
808  * or it's smaller than min_hist_size.
809  *
810  * The output parameter *hist_size receives the actual histogram size,
811  * or zero if no histogram. Callers may use this number to decide how
812  * much faith to put in the function result.
813  *
814  * Note that the result disregards both the most-common-values (if any) and
815  * null entries. The caller is expected to combine this result with
816  * statistics for those portions of the column population. It may also be
817  * prudent to clamp the result range, ie, disbelieve exact 0 or 1 outputs.
818  */
819 double
821  FmgrInfo *opproc, Oid collation,
822  Datum constval, bool varonleft,
823  int min_hist_size, int n_skip,
824  int *hist_size)
825 {
826  double result;
827  AttStatsSlot sslot;
828 
829  /* check sanity of parameters */
830  Assert(n_skip >= 0);
831  Assert(min_hist_size > 2 * n_skip);
832 
833  if (HeapTupleIsValid(vardata->statsTuple) &&
834  statistic_proc_security_check(vardata, opproc->fn_oid) &&
835  get_attstatsslot(&sslot, vardata->statsTuple,
836  STATISTIC_KIND_HISTOGRAM, InvalidOid,
838  {
839  *hist_size = sslot.nvalues;
840  if (sslot.nvalues >= min_hist_size)
841  {
842  LOCAL_FCINFO(fcinfo, 2);
843  int nmatch = 0;
844  int i;
845 
846  /*
847  * We invoke the opproc "by hand" so that we won't fail on NULL
848  * results. Such cases won't arise for normal comparison
849  * functions, but generic_restriction_selectivity could perhaps be
850  * used with operators that can return NULL. A small side benefit
851  * is to not need to re-initialize the fcinfo struct from scratch
852  * each time.
853  */
854  InitFunctionCallInfoData(*fcinfo, opproc, 2, collation,
855  NULL, NULL);
856  fcinfo->args[0].isnull = false;
857  fcinfo->args[1].isnull = false;
858  /* be careful to apply operator right way 'round */
859  if (varonleft)
860  fcinfo->args[1].value = constval;
861  else
862  fcinfo->args[0].value = constval;
863 
864  for (i = n_skip; i < sslot.nvalues - n_skip; i++)
865  {
866  Datum fresult;
867 
868  if (varonleft)
869  fcinfo->args[0].value = sslot.values[i];
870  else
871  fcinfo->args[1].value = sslot.values[i];
872  fcinfo->isnull = false;
873  fresult = FunctionCallInvoke(fcinfo);
874  if (!fcinfo->isnull && DatumGetBool(fresult))
875  nmatch++;
876  }
877  result = ((double) nmatch) / ((double) (sslot.nvalues - 2 * n_skip));
878  }
879  else
880  result = -1;
881  free_attstatsslot(&sslot);
882  }
883  else
884  {
885  *hist_size = 0;
886  result = -1;
887  }
888 
889  return result;
890 }
891 
892 /*
893  * generic_restriction_selectivity - Selectivity for almost anything
894  *
895  * This function estimates selectivity for operators that we don't have any
896  * special knowledge about, but are on data types that we collect standard
897  * MCV and/or histogram statistics for. (Additional assumptions are that
898  * the operator is strict and immutable, or at least stable.)
899  *
900  * If we have "VAR OP CONST" or "CONST OP VAR", selectivity is estimated by
901  * applying the operator to each element of the column's MCV and/or histogram
902  * stats, and merging the results using the assumption that the histogram is
903  * a reasonable random sample of the column's non-MCV population. Note that
904  * if the operator's semantics are related to the histogram ordering, this
905  * might not be such a great assumption; other functions such as
906  * scalarineqsel() are probably a better match in such cases.
907  *
908  * Otherwise, fall back to the default selectivity provided by the caller.
909  */
910 double
912  List *args, int varRelid,
913  double default_selectivity)
914 {
915  double selec;
916  VariableStatData vardata;
917  Node *other;
918  bool varonleft;
919 
920  /*
921  * If expression is not variable OP something or something OP variable,
922  * then punt and return the default estimate.
923  */
924  if (!get_restriction_variable(root, args, varRelid,
925  &vardata, &other, &varonleft))
926  return default_selectivity;
927 
928  /*
929  * If the something is a NULL constant, assume operator is strict and
930  * return zero, ie, operator will never return TRUE.
931  */
932  if (IsA(other, Const) &&
933  ((Const *) other)->constisnull)
934  {
935  ReleaseVariableStats(vardata);
936  return 0.0;
937  }
938 
939  if (IsA(other, Const))
940  {
941  /* Variable is being compared to a known non-null constant */
942  Datum constval = ((Const *) other)->constvalue;
943  FmgrInfo opproc;
944  double mcvsum;
945  double mcvsel;
946  double nullfrac;
947  int hist_size;
948 
949  fmgr_info(get_opcode(oproid), &opproc);
950 
951  /*
952  * Calculate the selectivity for the column's most common values.
953  */
954  mcvsel = mcv_selectivity(&vardata, &opproc, collation,
955  constval, varonleft,
956  &mcvsum);
957 
958  /*
959  * If the histogram is large enough, see what fraction of it matches
960  * the query, and assume that's representative of the non-MCV
961  * population. Otherwise use the default selectivity for the non-MCV
962  * population.
963  */
964  selec = histogram_selectivity(&vardata, &opproc, collation,
965  constval, varonleft,
966  10, 1, &hist_size);
967  if (selec < 0)
968  {
969  /* Nope, fall back on default */
970  selec = default_selectivity;
971  }
972  else if (hist_size < 100)
973  {
974  /*
975  * For histogram sizes from 10 to 100, we combine the histogram
976  * and default selectivities, putting increasingly more trust in
977  * the histogram for larger sizes.
978  */
979  double hist_weight = hist_size / 100.0;
980 
981  selec = selec * hist_weight +
982  default_selectivity * (1.0 - hist_weight);
983  }
984 
985  /* In any case, don't believe extremely small or large estimates. */
986  if (selec < 0.0001)
987  selec = 0.0001;
988  else if (selec > 0.9999)
989  selec = 0.9999;
990 
991  /* Don't forget to account for nulls. */
992  if (HeapTupleIsValid(vardata.statsTuple))
993  nullfrac = ((Form_pg_statistic) GETSTRUCT(vardata.statsTuple))->stanullfrac;
994  else
995  nullfrac = 0.0;
996 
997  /*
998  * Now merge the results from the MCV and histogram calculations,
999  * realizing that the histogram covers only the non-null values that
1000  * are not listed in MCV.
1001  */
1002  selec *= 1.0 - nullfrac - mcvsum;
1003  selec += mcvsel;
1004  }
1005  else
1006  {
1007  /* Comparison value is not constant, so we can't do anything */
1008  selec = default_selectivity;
1009  }
1010 
1011  ReleaseVariableStats(vardata);
1012 
1013  /* result should be in range, but make sure... */
1014  CLAMP_PROBABILITY(selec);
1015 
1016  return selec;
1017 }
1018 
1019 /*
1020  * ineq_histogram_selectivity - Examine the histogram for scalarineqsel
1021  *
1022  * Determine the fraction of the variable's histogram population that
1023  * satisfies the inequality condition, ie, VAR < (or <=, >, >=) CONST.
1024  * The isgt and iseq flags distinguish which of the four cases apply.
1025  *
1026  * While opproc could be looked up from the operator OID, common callers
1027  * also need to call it separately, so we make the caller pass both.
1028  *
1029  * Returns -1 if there is no histogram (valid results will always be >= 0).
1030  *
1031  * Note that the result disregards both the most-common-values (if any) and
1032  * null entries. The caller is expected to combine this result with
1033  * statistics for those portions of the column population.
1034  *
1035  * This is exported so that some other estimation functions can use it.
1036  */
1037 double
1039  VariableStatData *vardata,
1040  Oid opoid, FmgrInfo *opproc, bool isgt, bool iseq,
1041  Oid collation,
1042  Datum constval, Oid consttype)
1043 {
1044  double hist_selec;
1045  AttStatsSlot sslot;
1046 
1047  hist_selec = -1.0;
1048 
1049  /*
1050  * Someday, ANALYZE might store more than one histogram per rel/att,
1051  * corresponding to more than one possible sort ordering defined for the
1052  * column type. Right now, we know there is only one, so just grab it and
1053  * see if it matches the query.
1054  *
1055  * Note that we can't use opoid as search argument; the staop appearing in
1056  * pg_statistic will be for the relevant '<' operator, but what we have
1057  * might be some other inequality operator such as '>='. (Even if opoid
1058  * is a '<' operator, it could be cross-type.) Hence we must use
1059  * comparison_ops_are_compatible() to see if the operators match.
1060  */
1061  if (HeapTupleIsValid(vardata->statsTuple) &&
1062  statistic_proc_security_check(vardata, opproc->fn_oid) &&
1063  get_attstatsslot(&sslot, vardata->statsTuple,
1064  STATISTIC_KIND_HISTOGRAM, InvalidOid,
1066  {
1067  if (sslot.nvalues > 1 &&
1068  sslot.stacoll == collation &&
1069  comparison_ops_are_compatible(sslot.staop, opoid))
1070  {
1071  /*
1072  * Use binary search to find the desired location, namely the
1073  * right end of the histogram bin containing the comparison value,
1074  * which is the leftmost entry for which the comparison operator
1075  * succeeds (if isgt) or fails (if !isgt).
1076  *
1077  * In this loop, we pay no attention to whether the operator iseq
1078  * or not; that detail will be mopped up below. (We cannot tell,
1079  * anyway, whether the operator thinks the values are equal.)
1080  *
1081  * If the binary search accesses the first or last histogram
1082  * entry, we try to replace that endpoint with the true column min
1083  * or max as found by get_actual_variable_range(). This
1084  * ameliorates misestimates when the min or max is moving as a
1085  * result of changes since the last ANALYZE. Note that this could
1086  * result in effectively including MCVs into the histogram that
1087  * weren't there before, but we don't try to correct for that.
1088  */
1089  double histfrac;
1090  int lobound = 0; /* first possible slot to search */
1091  int hibound = sslot.nvalues; /* last+1 slot to search */
1092  bool have_end = false;
1093 
1094  /*
1095  * If there are only two histogram entries, we'll want up-to-date
1096  * values for both. (If there are more than two, we need at most
1097  * one of them to be updated, so we deal with that within the
1098  * loop.)
1099  */
1100  if (sslot.nvalues == 2)
1101  have_end = get_actual_variable_range(root,
1102  vardata,
1103  sslot.staop,
1104  collation,
1105  &sslot.values[0],
1106  &sslot.values[1]);
1107 
1108  while (lobound < hibound)
1109  {
1110  int probe = (lobound + hibound) / 2;
1111  bool ltcmp;
1112 
1113  /*
1114  * If we find ourselves about to compare to the first or last
1115  * histogram entry, first try to replace it with the actual
1116  * current min or max (unless we already did so above).
1117  */
1118  if (probe == 0 && sslot.nvalues > 2)
1119  have_end = get_actual_variable_range(root,
1120  vardata,
1121  sslot.staop,
1122  collation,
1123  &sslot.values[0],
1124  NULL);
1125  else if (probe == sslot.nvalues - 1 && sslot.nvalues > 2)
1126  have_end = get_actual_variable_range(root,
1127  vardata,
1128  sslot.staop,
1129  collation,
1130  NULL,
1131  &sslot.values[probe]);
1132 
1133  ltcmp = DatumGetBool(FunctionCall2Coll(opproc,
1134  collation,
1135  sslot.values[probe],
1136  constval));
1137  if (isgt)
1138  ltcmp = !ltcmp;
1139  if (ltcmp)
1140  lobound = probe + 1;
1141  else
1142  hibound = probe;
1143  }
1144 
1145  if (lobound <= 0)
1146  {
1147  /*
1148  * Constant is below lower histogram boundary. More
1149  * precisely, we have found that no entry in the histogram
1150  * satisfies the inequality clause (if !isgt) or they all do
1151  * (if isgt). We estimate that that's true of the entire
1152  * table, so set histfrac to 0.0 (which we'll flip to 1.0
1153  * below, if isgt).
1154  */
1155  histfrac = 0.0;
1156  }
1157  else if (lobound >= sslot.nvalues)
1158  {
1159  /*
1160  * Inverse case: constant is above upper histogram boundary.
1161  */
1162  histfrac = 1.0;
1163  }
1164  else
1165  {
1166  /* We have values[i-1] <= constant <= values[i]. */
1167  int i = lobound;
1168  double eq_selec = 0;
1169  double val,
1170  high,
1171  low;
1172  double binfrac;
1173 
1174  /*
1175  * In the cases where we'll need it below, obtain an estimate
1176  * of the selectivity of "x = constval". We use a calculation
1177  * similar to what var_eq_const() does for a non-MCV constant,
1178  * ie, estimate that all distinct non-MCV values occur equally
1179  * often. But multiplication by "1.0 - sumcommon - nullfrac"
1180  * will be done by our caller, so we shouldn't do that here.
1181  * Therefore we can't try to clamp the estimate by reference
1182  * to the least common MCV; the result would be too small.
1183  *
1184  * Note: since this is effectively assuming that constval
1185  * isn't an MCV, it's logically dubious if constval in fact is
1186  * one. But we have to apply *some* correction for equality,
1187  * and anyway we cannot tell if constval is an MCV, since we
1188  * don't have a suitable equality operator at hand.
1189  */
1190  if (i == 1 || isgt == iseq)
1191  {
1192  double otherdistinct;
1193  bool isdefault;
1194  AttStatsSlot mcvslot;
1195 
1196  /* Get estimated number of distinct values */
1197  otherdistinct = get_variable_numdistinct(vardata,
1198  &isdefault);
1199 
1200  /* Subtract off the number of known MCVs */
1201  if (get_attstatsslot(&mcvslot, vardata->statsTuple,
1202  STATISTIC_KIND_MCV, InvalidOid,
1204  {
1205  otherdistinct -= mcvslot.nnumbers;
1206  free_attstatsslot(&mcvslot);
1207  }
1208 
1209  /* If result doesn't seem sane, leave eq_selec at 0 */
1210  if (otherdistinct > 1)
1211  eq_selec = 1.0 / otherdistinct;
1212  }
1213 
1214  /*
1215  * Convert the constant and the two nearest bin boundary
1216  * values to a uniform comparison scale, and do a linear
1217  * interpolation within this bin.
1218  */
1219  if (convert_to_scalar(constval, consttype, collation,
1220  &val,
1221  sslot.values[i - 1], sslot.values[i],
1222  vardata->vartype,
1223  &low, &high))
1224  {
1225  if (high <= low)
1226  {
1227  /* cope if bin boundaries appear identical */
1228  binfrac = 0.5;
1229  }
1230  else if (val <= low)
1231  binfrac = 0.0;
1232  else if (val >= high)
1233  binfrac = 1.0;
1234  else
1235  {
1236  binfrac = (val - low) / (high - low);
1237 
1238  /*
1239  * Watch out for the possibility that we got a NaN or
1240  * Infinity from the division. This can happen
1241  * despite the previous checks, if for example "low"
1242  * is -Infinity.
1243  */
1244  if (isnan(binfrac) ||
1245  binfrac < 0.0 || binfrac > 1.0)
1246  binfrac = 0.5;
1247  }
1248  }
1249  else
1250  {
1251  /*
1252  * Ideally we'd produce an error here, on the grounds that
1253  * the given operator shouldn't have scalarXXsel
1254  * registered as its selectivity func unless we can deal
1255  * with its operand types. But currently, all manner of
1256  * stuff is invoking scalarXXsel, so give a default
1257  * estimate until that can be fixed.
1258  */
1259  binfrac = 0.5;
1260  }
1261 
1262  /*
1263  * Now, compute the overall selectivity across the values
1264  * represented by the histogram. We have i-1 full bins and
1265  * binfrac partial bin below the constant.
1266  */
1267  histfrac = (double) (i - 1) + binfrac;
1268  histfrac /= (double) (sslot.nvalues - 1);
1269 
1270  /*
1271  * At this point, histfrac is an estimate of the fraction of
1272  * the population represented by the histogram that satisfies
1273  * "x <= constval". Somewhat remarkably, this statement is
1274  * true regardless of which operator we were doing the probes
1275  * with, so long as convert_to_scalar() delivers reasonable
1276  * results. If the probe constant is equal to some histogram
1277  * entry, we would have considered the bin to the left of that
1278  * entry if probing with "<" or ">=", or the bin to the right
1279  * if probing with "<=" or ">"; but binfrac would have come
1280  * out as 1.0 in the first case and 0.0 in the second, leading
1281  * to the same histfrac in either case. For probe constants
1282  * between histogram entries, we find the same bin and get the
1283  * same estimate with any operator.
1284  *
1285  * The fact that the estimate corresponds to "x <= constval"
1286  * and not "x < constval" is because of the way that ANALYZE
1287  * constructs the histogram: each entry is, effectively, the
1288  * rightmost value in its sample bucket. So selectivity
1289  * values that are exact multiples of 1/(histogram_size-1)
1290  * should be understood as estimates including a histogram
1291  * entry plus everything to its left.
1292  *
1293  * However, that breaks down for the first histogram entry,
1294  * which necessarily is the leftmost value in its sample
1295  * bucket. That means the first histogram bin is slightly
1296  * narrower than the rest, by an amount equal to eq_selec.
1297  * Another way to say that is that we want "x <= leftmost" to
1298  * be estimated as eq_selec not zero. So, if we're dealing
1299  * with the first bin (i==1), rescale to make that true while
1300  * adjusting the rest of that bin linearly.
1301  */
1302  if (i == 1)
1303  histfrac += eq_selec * (1.0 - binfrac);
1304 
1305  /*
1306  * "x <= constval" is good if we want an estimate for "<=" or
1307  * ">", but if we are estimating for "<" or ">=", we now need
1308  * to decrease the estimate by eq_selec.
1309  */
1310  if (isgt == iseq)
1311  histfrac -= eq_selec;
1312  }
1313 
1314  /*
1315  * Now the estimate is finished for "<" and "<=" cases. If we are
1316  * estimating for ">" or ">=", flip it.
1317  */
1318  hist_selec = isgt ? (1.0 - histfrac) : histfrac;
1319 
1320  /*
1321  * The histogram boundaries are only approximate to begin with,
1322  * and may well be out of date anyway. Therefore, don't believe
1323  * extremely small or large selectivity estimates --- unless we
1324  * got actual current endpoint values from the table, in which
1325  * case just do the usual sanity clamp. Somewhat arbitrarily, we
1326  * set the cutoff for other cases at a hundredth of the histogram
1327  * resolution.
1328  */
1329  if (have_end)
1330  CLAMP_PROBABILITY(hist_selec);
1331  else
1332  {
1333  double cutoff = 0.01 / (double) (sslot.nvalues - 1);
1334 
1335  if (hist_selec < cutoff)
1336  hist_selec = cutoff;
1337  else if (hist_selec > 1.0 - cutoff)
1338  hist_selec = 1.0 - cutoff;
1339  }
1340  }
1341  else if (sslot.nvalues > 1)
1342  {
1343  /*
1344  * If we get here, we have a histogram but it's not sorted the way
1345  * we want. Do a brute-force search to see how many of the
1346  * entries satisfy the comparison condition, and take that
1347  * fraction as our estimate. (This is identical to the inner loop
1348  * of histogram_selectivity; maybe share code?)
1349  */
1350  LOCAL_FCINFO(fcinfo, 2);
1351  int nmatch = 0;
1352 
1353  InitFunctionCallInfoData(*fcinfo, opproc, 2, collation,
1354  NULL, NULL);
1355  fcinfo->args[0].isnull = false;
1356  fcinfo->args[1].isnull = false;
1357  fcinfo->args[1].value = constval;
1358  for (int i = 0; i < sslot.nvalues; i++)
1359  {
1360  Datum fresult;
1361 
1362  fcinfo->args[0].value = sslot.values[i];
1363  fcinfo->isnull = false;
1364  fresult = FunctionCallInvoke(fcinfo);
1365  if (!fcinfo->isnull && DatumGetBool(fresult))
1366  nmatch++;
1367  }
1368  hist_selec = ((double) nmatch) / ((double) sslot.nvalues);
1369 
1370  /*
1371  * As above, clamp to a hundredth of the histogram resolution.
1372  * This case is surely even less trustworthy than the normal one,
1373  * so we shouldn't believe exact 0 or 1 selectivity. (Maybe the
1374  * clamp should be more restrictive in this case?)
1375  */
1376  {
1377  double cutoff = 0.01 / (double) (sslot.nvalues - 1);
1378 
1379  if (hist_selec < cutoff)
1380  hist_selec = cutoff;
1381  else if (hist_selec > 1.0 - cutoff)
1382  hist_selec = 1.0 - cutoff;
1383  }
1384  }
1385 
1386  free_attstatsslot(&sslot);
1387  }
1388 
1389  return hist_selec;
1390 }
1391 
1392 /*
1393  * Common wrapper function for the selectivity estimators that simply
1394  * invoke scalarineqsel().
1395  */
1396 static Datum
1398 {
1399  PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
1400  Oid operator = PG_GETARG_OID(1);
1401  List *args = (List *) PG_GETARG_POINTER(2);
1402  int varRelid = PG_GETARG_INT32(3);
1403  Oid collation = PG_GET_COLLATION();
1404  VariableStatData vardata;
1405  Node *other;
1406  bool varonleft;
1407  Datum constval;
1408  Oid consttype;
1409  double selec;
1410 
1411  /*
1412  * If expression is not variable op something or something op variable,
1413  * then punt and return a default estimate.
1414  */
1415  if (!get_restriction_variable(root, args, varRelid,
1416  &vardata, &other, &varonleft))
1418 
1419  /*
1420  * Can't do anything useful if the something is not a constant, either.
1421  */
1422  if (!IsA(other, Const))
1423  {
1424  ReleaseVariableStats(vardata);
1426  }
1427 
1428  /*
1429  * If the constant is NULL, assume operator is strict and return zero, ie,
1430  * operator will never return TRUE.
1431  */
1432  if (((Const *) other)->constisnull)
1433  {
1434  ReleaseVariableStats(vardata);
1435  PG_RETURN_FLOAT8(0.0);
1436  }
1437  constval = ((Const *) other)->constvalue;
1438  consttype = ((Const *) other)->consttype;
1439 
1440  /*
1441  * Force the var to be on the left to simplify logic in scalarineqsel.
1442  */
1443  if (!varonleft)
1444  {
1445  operator = get_commutator(operator);
1446  if (!operator)
1447  {
1448  /* Use default selectivity (should we raise an error instead?) */
1449  ReleaseVariableStats(vardata);
1451  }
1452  isgt = !isgt;
1453  }
1454 
1455  /* The rest of the work is done by scalarineqsel(). */
1456  selec = scalarineqsel(root, operator, isgt, iseq, collation,
1457  &vardata, constval, consttype);
1458 
1459  ReleaseVariableStats(vardata);
1460 
1461  PG_RETURN_FLOAT8((float8) selec);
1462 }
1463 
1464 /*
1465  * scalarltsel - Selectivity of "<" for scalars.
1466  */
1467 Datum
1469 {
1470  return scalarineqsel_wrapper(fcinfo, false, false);
1471 }
1472 
1473 /*
1474  * scalarlesel - Selectivity of "<=" for scalars.
1475  */
1476 Datum
1478 {
1479  return scalarineqsel_wrapper(fcinfo, false, true);
1480 }
1481 
1482 /*
1483  * scalargtsel - Selectivity of ">" for scalars.
1484  */
1485 Datum
1487 {
1488  return scalarineqsel_wrapper(fcinfo, true, false);
1489 }
1490 
1491 /*
1492  * scalargesel - Selectivity of ">=" for scalars.
1493  */
1494 Datum
1496 {
1497  return scalarineqsel_wrapper(fcinfo, true, true);
1498 }
1499 
1500 /*
1501  * boolvarsel - Selectivity of Boolean variable.
1502  *
1503  * This can actually be called on any boolean-valued expression. If it
1504  * involves only Vars of the specified relation, and if there are statistics
1505  * about the Var or expression (the latter is possible if it's indexed) then
1506  * we'll produce a real estimate; otherwise it's just a default.
1507  */
1509 boolvarsel(PlannerInfo *root, Node *arg, int varRelid)
1510 {
1511  VariableStatData vardata;
1512  double selec;
1513 
1514  examine_variable(root, arg, varRelid, &vardata);
1515  if (HeapTupleIsValid(vardata.statsTuple))
1516  {
1517  /*
1518  * A boolean variable V is equivalent to the clause V = 't', so we
1519  * compute the selectivity as if that is what we have.
1520  */
1521  selec = var_eq_const(&vardata, BooleanEqualOperator, InvalidOid,
1522  BoolGetDatum(true), false, true, false);
1523  }
1524  else
1525  {
1526  /* Otherwise, the default estimate is 0.5 */
1527  selec = 0.5;
1528  }
1529  ReleaseVariableStats(vardata);
1530  return selec;
1531 }
1532 
1533 /*
1534  * booltestsel - Selectivity of BooleanTest Node.
1535  */
1538  int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
1539 {
1540  VariableStatData vardata;
1541  double selec;
1542 
1543  examine_variable(root, arg, varRelid, &vardata);
1544 
1545  if (HeapTupleIsValid(vardata.statsTuple))
1546  {
1547  Form_pg_statistic stats;
1548  double freq_null;
1549  AttStatsSlot sslot;
1550 
1551  stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
1552  freq_null = stats->stanullfrac;
1553 
1554  if (get_attstatsslot(&sslot, vardata.statsTuple,
1555  STATISTIC_KIND_MCV, InvalidOid,
1557  && sslot.nnumbers > 0)
1558  {
1559  double freq_true;
1560  double freq_false;
1561 
1562  /*
1563  * Get first MCV frequency and derive frequency for true.
1564  */
1565  if (DatumGetBool(sslot.values[0]))
1566  freq_true = sslot.numbers[0];
1567  else
1568  freq_true = 1.0 - sslot.numbers[0] - freq_null;
1569 
1570  /*
1571  * Next derive frequency for false. Then use these as appropriate
1572  * to derive frequency for each case.
1573  */
1574  freq_false = 1.0 - freq_true - freq_null;
1575 
1576  switch (booltesttype)
1577  {
1578  case IS_UNKNOWN:
1579  /* select only NULL values */
1580  selec = freq_null;
1581  break;
1582  case IS_NOT_UNKNOWN:
1583  /* select non-NULL values */
1584  selec = 1.0 - freq_null;
1585  break;
1586  case IS_TRUE:
1587  /* select only TRUE values */
1588  selec = freq_true;
1589  break;
1590  case IS_NOT_TRUE:
1591  /* select non-TRUE values */
1592  selec = 1.0 - freq_true;
1593  break;
1594  case IS_FALSE:
1595  /* select only FALSE values */
1596  selec = freq_false;
1597  break;
1598  case IS_NOT_FALSE:
1599  /* select non-FALSE values */
1600  selec = 1.0 - freq_false;
1601  break;
1602  default:
1603  elog(ERROR, "unrecognized booltesttype: %d",
1604  (int) booltesttype);
1605  selec = 0.0; /* Keep compiler quiet */
1606  break;
1607  }
1608 
1609  free_attstatsslot(&sslot);
1610  }
1611  else
1612  {
1613  /*
1614  * No most-common-value info available. Still have null fraction
1615  * information, so use it for IS [NOT] UNKNOWN. Otherwise adjust
1616  * for null fraction and assume a 50-50 split of TRUE and FALSE.
1617  */
1618  switch (booltesttype)
1619  {
1620  case IS_UNKNOWN:
1621  /* select only NULL values */
1622  selec = freq_null;
1623  break;
1624  case IS_NOT_UNKNOWN:
1625  /* select non-NULL values */
1626  selec = 1.0 - freq_null;
1627  break;
1628  case IS_TRUE:
1629  case IS_FALSE:
1630  /* Assume we select half of the non-NULL values */
1631  selec = (1.0 - freq_null) / 2.0;
1632  break;
1633  case IS_NOT_TRUE:
1634  case IS_NOT_FALSE:
1635  /* Assume we select NULLs plus half of the non-NULLs */
1636  /* equiv. to freq_null + (1.0 - freq_null) / 2.0 */
1637  selec = (freq_null + 1.0) / 2.0;
1638  break;
1639  default:
1640  elog(ERROR, "unrecognized booltesttype: %d",
1641  (int) booltesttype);
1642  selec = 0.0; /* Keep compiler quiet */
1643  break;
1644  }
1645  }
1646  }
1647  else
1648  {
1649  /*
1650  * If we can't get variable statistics for the argument, perhaps
1651  * clause_selectivity can do something with it. We ignore the
1652  * possibility of a NULL value when using clause_selectivity, and just
1653  * assume the value is either TRUE or FALSE.
1654  */
1655  switch (booltesttype)
1656  {
1657  case IS_UNKNOWN:
1658  selec = DEFAULT_UNK_SEL;
1659  break;
1660  case IS_NOT_UNKNOWN:
1661  selec = DEFAULT_NOT_UNK_SEL;
1662  break;
1663  case IS_TRUE:
1664  case IS_NOT_FALSE:
1665  selec = (double) clause_selectivity(root, arg,
1666  varRelid,
1667  jointype, sjinfo);
1668  break;
1669  case IS_FALSE:
1670  case IS_NOT_TRUE:
1671  selec = 1.0 - (double) clause_selectivity(root, arg,
1672  varRelid,
1673  jointype, sjinfo);
1674  break;
1675  default:
1676  elog(ERROR, "unrecognized booltesttype: %d",
1677  (int) booltesttype);
1678  selec = 0.0; /* Keep compiler quiet */
1679  break;
1680  }
1681  }
1682 
1683  ReleaseVariableStats(vardata);
1684 
1685  /* result should be in range, but make sure... */
1686  CLAMP_PROBABILITY(selec);
1687 
1688  return (Selectivity) selec;
1689 }
1690 
1691 /*
1692  * nulltestsel - Selectivity of NullTest Node.
1693  */
1696  int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
1697 {
1698  VariableStatData vardata;
1699  double selec;
1700 
1701  examine_variable(root, arg, varRelid, &vardata);
1702 
1703  if (HeapTupleIsValid(vardata.statsTuple))
1704  {
1705  Form_pg_statistic stats;
1706  double freq_null;
1707 
1708  stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
1709  freq_null = stats->stanullfrac;
1710 
1711  switch (nulltesttype)
1712  {
1713  case IS_NULL:
1714 
1715  /*
1716  * Use freq_null directly.
1717  */
1718  selec = freq_null;
1719  break;
1720  case IS_NOT_NULL:
1721 
1722  /*
1723  * Select not unknown (not null) values. Calculate from
1724  * freq_null.
1725  */
1726  selec = 1.0 - freq_null;
1727  break;
1728  default:
1729  elog(ERROR, "unrecognized nulltesttype: %d",
1730  (int) nulltesttype);
1731  return (Selectivity) 0; /* keep compiler quiet */
1732  }
1733  }
1734  else if (vardata.var && IsA(vardata.var, Var) &&
1735  ((Var *) vardata.var)->varattno < 0)
1736  {
1737  /*
1738  * There are no stats for system columns, but we know they are never
1739  * NULL.
1740  */
1741  selec = (nulltesttype == IS_NULL) ? 0.0 : 1.0;
1742  }
1743  else
1744  {
1745  /*
1746  * No ANALYZE stats available, so make a guess
1747  */
1748  switch (nulltesttype)
1749  {
1750  case IS_NULL:
1751  selec = DEFAULT_UNK_SEL;
1752  break;
1753  case IS_NOT_NULL:
1754  selec = DEFAULT_NOT_UNK_SEL;
1755  break;
1756  default:
1757  elog(ERROR, "unrecognized nulltesttype: %d",
1758  (int) nulltesttype);
1759  return (Selectivity) 0; /* keep compiler quiet */
1760  }
1761  }
1762 
1763  ReleaseVariableStats(vardata);
1764 
1765  /* result should be in range, but make sure... */
1766  CLAMP_PROBABILITY(selec);
1767 
1768  return (Selectivity) selec;
1769 }
1770 
1771 /*
1772  * strip_array_coercion - strip binary-compatible relabeling from an array expr
1773  *
1774  * For array values, the parser normally generates ArrayCoerceExpr conversions,
1775  * but it seems possible that RelabelType might show up. Also, the planner
1776  * is not currently tense about collapsing stacked ArrayCoerceExpr nodes,
1777  * so we need to be ready to deal with more than one level.
1778  */
1779 static Node *
1781 {
1782  for (;;)
1783  {
1784  if (node && IsA(node, ArrayCoerceExpr))
1785  {
1786  ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
1787 
1788  /*
1789  * If the per-element expression is just a RelabelType on top of
1790  * CaseTestExpr, then we know it's a binary-compatible relabeling.
1791  */
1792  if (IsA(acoerce->elemexpr, RelabelType) &&
1793  IsA(((RelabelType *) acoerce->elemexpr)->arg, CaseTestExpr))
1794  node = (Node *) acoerce->arg;
1795  else
1796  break;
1797  }
1798  else if (node && IsA(node, RelabelType))
1799  {
1800  /* We don't really expect this case, but may as well cope */
1801  node = (Node *) ((RelabelType *) node)->arg;
1802  }
1803  else
1804  break;
1805  }
1806  return node;
1807 }
1808 
1809 /*
1810  * scalararraysel - Selectivity of ScalarArrayOpExpr Node.
1811  */
1814  ScalarArrayOpExpr *clause,
1815  bool is_join_clause,
1816  int varRelid,
1817  JoinType jointype,
1818  SpecialJoinInfo *sjinfo)
1819 {
1820  Oid operator = clause->opno;
1821  bool useOr = clause->useOr;
1822  bool isEquality = false;
1823  bool isInequality = false;
1824  Node *leftop;
1825  Node *rightop;
1826  Oid nominal_element_type;
1827  Oid nominal_element_collation;
1828  TypeCacheEntry *typentry;
1829  RegProcedure oprsel;
1830  FmgrInfo oprselproc;
1831  Selectivity s1;
1832  Selectivity s1disjoint;
1833 
1834  /* First, deconstruct the expression */
1835  Assert(list_length(clause->args) == 2);
1836  leftop = (Node *) linitial(clause->args);
1837  rightop = (Node *) lsecond(clause->args);
1838 
1839  /* aggressively reduce both sides to constants */
1840  leftop = estimate_expression_value(root, leftop);
1841  rightop = estimate_expression_value(root, rightop);
1842 
1843  /* get nominal (after relabeling) element type of rightop */
1844  nominal_element_type = get_base_element_type(exprType(rightop));
1845  if (!OidIsValid(nominal_element_type))
1846  return (Selectivity) 0.5; /* probably shouldn't happen */
1847  /* get nominal collation, too, for generating constants */
1848  nominal_element_collation = exprCollation(rightop);
1849 
1850  /* look through any binary-compatible relabeling of rightop */
1851  rightop = strip_array_coercion(rightop);
1852 
1853  /*
1854  * Detect whether the operator is the default equality or inequality
1855  * operator of the array element type.
1856  */
1857  typentry = lookup_type_cache(nominal_element_type, TYPECACHE_EQ_OPR);
1858  if (OidIsValid(typentry->eq_opr))
1859  {
1860  if (operator == typentry->eq_opr)
1861  isEquality = true;
1862  else if (get_negator(operator) == typentry->eq_opr)
1863  isInequality = true;
1864  }
1865 
1866  /*
1867  * If it is equality or inequality, we might be able to estimate this as a
1868  * form of array containment; for instance "const = ANY(column)" can be
1869  * treated as "ARRAY[const] <@ column". scalararraysel_containment tries
1870  * that, and returns the selectivity estimate if successful, or -1 if not.
1871  */
1872  if ((isEquality || isInequality) && !is_join_clause)
1873  {
1874  s1 = scalararraysel_containment(root, leftop, rightop,
1875  nominal_element_type,
1876  isEquality, useOr, varRelid);
1877  if (s1 >= 0.0)
1878  return s1;
1879  }
1880 
1881  /*
1882  * Look up the underlying operator's selectivity estimator. Punt if it
1883  * hasn't got one.
1884  */
1885  if (is_join_clause)
1886  oprsel = get_oprjoin(operator);
1887  else
1888  oprsel = get_oprrest(operator);
1889  if (!oprsel)
1890  return (Selectivity) 0.5;
1891  fmgr_info(oprsel, &oprselproc);
1892 
1893  /*
1894  * In the array-containment check above, we must only believe that an
1895  * operator is equality or inequality if it is the default btree equality
1896  * operator (or its negator) for the element type, since those are the
1897  * operators that array containment will use. But in what follows, we can
1898  * be a little laxer, and also believe that any operators using eqsel() or
1899  * neqsel() as selectivity estimator act like equality or inequality.
1900  */
1901  if (oprsel == F_EQSEL || oprsel == F_EQJOINSEL)
1902  isEquality = true;
1903  else if (oprsel == F_NEQSEL || oprsel == F_NEQJOINSEL)
1904  isInequality = true;
1905 
1906  /*
1907  * We consider three cases:
1908  *
1909  * 1. rightop is an Array constant: deconstruct the array, apply the
1910  * operator's selectivity function for each array element, and merge the
1911  * results in the same way that clausesel.c does for AND/OR combinations.
1912  *
1913  * 2. rightop is an ARRAY[] construct: apply the operator's selectivity
1914  * function for each element of the ARRAY[] construct, and merge.
1915  *
1916  * 3. otherwise, make a guess ...
1917  */
1918  if (rightop && IsA(rightop, Const))
1919  {
1920  Datum arraydatum = ((Const *) rightop)->constvalue;
1921  bool arrayisnull = ((Const *) rightop)->constisnull;
1922  ArrayType *arrayval;
1923  int16 elmlen;
1924  bool elmbyval;
1925  char elmalign;
1926  int num_elems;
1927  Datum *elem_values;
1928  bool *elem_nulls;
1929  int i;
1930 
1931  if (arrayisnull) /* qual can't succeed if null array */
1932  return (Selectivity) 0.0;
1933  arrayval = DatumGetArrayTypeP(arraydatum);
1935  &elmlen, &elmbyval, &elmalign);
1936  deconstruct_array(arrayval,
1937  ARR_ELEMTYPE(arrayval),
1938  elmlen, elmbyval, elmalign,
1939  &elem_values, &elem_nulls, &num_elems);
1940 
1941  /*
1942  * For generic operators, we assume the probability of success is
1943  * independent for each array element. But for "= ANY" or "<> ALL",
1944  * if the array elements are distinct (which'd typically be the case)
1945  * then the probabilities are disjoint, and we should just sum them.
1946  *
1947  * If we were being really tense we would try to confirm that the
1948  * elements are all distinct, but that would be expensive and it
1949  * doesn't seem to be worth the cycles; it would amount to penalizing
1950  * well-written queries in favor of poorly-written ones. However, we
1951  * do protect ourselves a little bit by checking whether the
1952  * disjointness assumption leads to an impossible (out of range)
1953  * probability; if so, we fall back to the normal calculation.
1954  */
1955  s1 = s1disjoint = (useOr ? 0.0 : 1.0);
1956 
1957  for (i = 0; i < num_elems; i++)
1958  {
1959  List *args;
1960  Selectivity s2;
1961 
1962  args = list_make2(leftop,
1963  makeConst(nominal_element_type,
1964  -1,
1965  nominal_element_collation,
1966  elmlen,
1967  elem_values[i],
1968  elem_nulls[i],
1969  elmbyval));
1970  if (is_join_clause)
1971  s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
1972  clause->inputcollid,
1973  PointerGetDatum(root),
1974  ObjectIdGetDatum(operator),
1976  Int16GetDatum(jointype),
1977  PointerGetDatum(sjinfo)));
1978  else
1979  s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
1980  clause->inputcollid,
1981  PointerGetDatum(root),
1982  ObjectIdGetDatum(operator),
1984  Int32GetDatum(varRelid)));
1985 
1986  if (useOr)
1987  {
1988  s1 = s1 + s2 - s1 * s2;
1989  if (isEquality)
1990  s1disjoint += s2;
1991  }
1992  else
1993  {
1994  s1 = s1 * s2;
1995  if (isInequality)
1996  s1disjoint += s2 - 1.0;
1997  }
1998  }
1999 
2000  /* accept disjoint-probability estimate if in range */
2001  if ((useOr ? isEquality : isInequality) &&
2002  s1disjoint >= 0.0 && s1disjoint <= 1.0)
2003  s1 = s1disjoint;
2004  }
2005  else if (rightop && IsA(rightop, ArrayExpr) &&
2006  !((ArrayExpr *) rightop)->multidims)
2007  {
2008  ArrayExpr *arrayexpr = (ArrayExpr *) rightop;
2009  int16 elmlen;
2010  bool elmbyval;
2011  ListCell *l;
2012 
2013  get_typlenbyval(arrayexpr->element_typeid,
2014  &elmlen, &elmbyval);
2015 
2016  /*
2017  * We use the assumption of disjoint probabilities here too, although
2018  * the odds of equal array elements are rather higher if the elements
2019  * are not all constants (which they won't be, else constant folding
2020  * would have reduced the ArrayExpr to a Const). In this path it's
2021  * critical to have the sanity check on the s1disjoint estimate.
2022  */
2023  s1 = s1disjoint = (useOr ? 0.0 : 1.0);
2024 
2025  foreach(l, arrayexpr->elements)
2026  {
2027  Node *elem = (Node *) lfirst(l);
2028  List *args;
2029  Selectivity s2;
2030 
2031  /*
2032  * Theoretically, if elem isn't of nominal_element_type we should
2033  * insert a RelabelType, but it seems unlikely that any operator
2034  * estimation function would really care ...
2035  */
2036  args = list_make2(leftop, elem);
2037  if (is_join_clause)
2038  s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
2039  clause->inputcollid,
2040  PointerGetDatum(root),
2041  ObjectIdGetDatum(operator),
2043  Int16GetDatum(jointype),
2044  PointerGetDatum(sjinfo)));
2045  else
2046  s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
2047  clause->inputcollid,
2048  PointerGetDatum(root),
2049  ObjectIdGetDatum(operator),
2051  Int32GetDatum(varRelid)));
2052 
2053  if (useOr)
2054  {
2055  s1 = s1 + s2 - s1 * s2;
2056  if (isEquality)
2057  s1disjoint += s2;
2058  }
2059  else
2060  {
2061  s1 = s1 * s2;
2062  if (isInequality)
2063  s1disjoint += s2 - 1.0;
2064  }
2065  }
2066 
2067  /* accept disjoint-probability estimate if in range */
2068  if ((useOr ? isEquality : isInequality) &&
2069  s1disjoint >= 0.0 && s1disjoint <= 1.0)
2070  s1 = s1disjoint;
2071  }
2072  else
2073  {
2074  CaseTestExpr *dummyexpr;
2075  List *args;
2076  Selectivity s2;
2077  int i;
2078 
2079  /*
2080  * We need a dummy rightop to pass to the operator selectivity
2081  * routine. It can be pretty much anything that doesn't look like a
2082  * constant; CaseTestExpr is a convenient choice.
2083  */
2084  dummyexpr = makeNode(CaseTestExpr);
2085  dummyexpr->typeId = nominal_element_type;
2086  dummyexpr->typeMod = -1;
2087  dummyexpr->collation = clause->inputcollid;
2088  args = list_make2(leftop, dummyexpr);
2089  if (is_join_clause)
2090  s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
2091  clause->inputcollid,
2092  PointerGetDatum(root),
2093  ObjectIdGetDatum(operator),
2095  Int16GetDatum(jointype),
2096  PointerGetDatum(sjinfo)));
2097  else
2098  s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
2099  clause->inputcollid,
2100  PointerGetDatum(root),
2101  ObjectIdGetDatum(operator),
2103  Int32GetDatum(varRelid)));
2104  s1 = useOr ? 0.0 : 1.0;
2105 
2106  /*
2107  * Arbitrarily assume 10 elements in the eventual array value (see
2108  * also estimate_array_length). We don't risk an assumption of
2109  * disjoint probabilities here.
2110  */
2111  for (i = 0; i < 10; i++)
2112  {
2113  if (useOr)
2114  s1 = s1 + s2 - s1 * s2;
2115  else
2116  s1 = s1 * s2;
2117  }
2118  }
2119 
2120  /* result should be in range, but make sure... */
2122 
2123  return s1;
2124 }
2125 
2126 /*
2127  * Estimate number of elements in the array yielded by an expression.
2128  *
2129  * It's important that this agree with scalararraysel.
2130  */
2131 int
2133 {
2134  /* look through any binary-compatible relabeling of arrayexpr */
2135  arrayexpr = strip_array_coercion(arrayexpr);
2136 
2137  if (arrayexpr && IsA(arrayexpr, Const))
2138  {
2139  Datum arraydatum = ((Const *) arrayexpr)->constvalue;
2140  bool arrayisnull = ((Const *) arrayexpr)->constisnull;
2141  ArrayType *arrayval;
2142 
2143  if (arrayisnull)
2144  return 0;
2145  arrayval = DatumGetArrayTypeP(arraydatum);
2146  return ArrayGetNItems(ARR_NDIM(arrayval), ARR_DIMS(arrayval));
2147  }
2148  else if (arrayexpr && IsA(arrayexpr, ArrayExpr) &&
2149  !((ArrayExpr *) arrayexpr)->multidims)
2150  {
2151  return list_length(((ArrayExpr *) arrayexpr)->elements);
2152  }
2153  else
2154  {
2155  /* default guess --- see also scalararraysel */
2156  return 10;
2157  }
2158 }
2159 
2160 /*
2161  * rowcomparesel - Selectivity of RowCompareExpr Node.
2162  *
2163  * We estimate RowCompare selectivity by considering just the first (high
2164  * order) columns, which makes it equivalent to an ordinary OpExpr. While
2165  * this estimate could be refined by considering additional columns, it
2166  * seems unlikely that we could do a lot better without multi-column
2167  * statistics.
2168  */
2171  RowCompareExpr *clause,
2172  int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
2173 {
2174  Selectivity s1;
2175  Oid opno = linitial_oid(clause->opnos);
2176  Oid inputcollid = linitial_oid(clause->inputcollids);
2177  List *opargs;
2178  bool is_join_clause;
2179 
2180  /* Build equivalent arg list for single operator */
2181  opargs = list_make2(linitial(clause->largs), linitial(clause->rargs));
2182 
2183  /*
2184  * Decide if it's a join clause. This should match clausesel.c's
2185  * treat_as_join_clause(), except that we intentionally consider only the
2186  * leading columns and not the rest of the clause.
2187  */
2188  if (varRelid != 0)
2189  {
2190  /*
2191  * Caller is forcing restriction mode (eg, because we are examining an
2192  * inner indexscan qual).
2193  */
2194  is_join_clause = false;
2195  }
2196  else if (sjinfo == NULL)
2197  {
2198  /*
2199  * It must be a restriction clause, since it's being evaluated at a
2200  * scan node.
2201  */
2202  is_join_clause = false;
2203  }
2204  else
2205  {
2206  /*
2207  * Otherwise, it's a join if there's more than one relation used.
2208  */
2209  is_join_clause = (NumRelids(root, (Node *) opargs) > 1);
2210  }
2211 
2212  if (is_join_clause)
2213  {
2214  /* Estimate selectivity for a join clause. */
2215  s1 = join_selectivity(root, opno,
2216  opargs,
2217  inputcollid,
2218  jointype,
2219  sjinfo);
2220  }
2221  else
2222  {
2223  /* Estimate selectivity for a restriction clause. */
2224  s1 = restriction_selectivity(root, opno,
2225  opargs,
2226  inputcollid,
2227  varRelid);
2228  }
2229 
2230  return s1;
2231 }
2232 
2233 /*
2234  * eqjoinsel - Join selectivity of "="
2235  */
2236 Datum
2238 {
2239  PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
2240  Oid operator = PG_GETARG_OID(1);
2241  List *args = (List *) PG_GETARG_POINTER(2);
2242 
2243 #ifdef NOT_USED
2244  JoinType jointype = (JoinType) PG_GETARG_INT16(3);
2245 #endif
2247  Oid collation = PG_GET_COLLATION();
2248  double selec;
2249  double selec_inner;
2250  VariableStatData vardata1;
2251  VariableStatData vardata2;
2252  double nd1;
2253  double nd2;
2254  bool isdefault1;
2255  bool isdefault2;
2256  Oid opfuncoid;
2257  AttStatsSlot sslot1;
2258  AttStatsSlot sslot2;
2259  Form_pg_statistic stats1 = NULL;
2260  Form_pg_statistic stats2 = NULL;
2261  bool have_mcvs1 = false;
2262  bool have_mcvs2 = false;
2263  bool join_is_reversed;
2264  RelOptInfo *inner_rel;
2265 
2266  get_join_variables(root, args, sjinfo,
2267  &vardata1, &vardata2, &join_is_reversed);
2268 
2269  nd1 = get_variable_numdistinct(&vardata1, &isdefault1);
2270  nd2 = get_variable_numdistinct(&vardata2, &isdefault2);
2271 
2272  opfuncoid = get_opcode(operator);
2273 
2274  memset(&sslot1, 0, sizeof(sslot1));
2275  memset(&sslot2, 0, sizeof(sslot2));
2276 
2277  if (HeapTupleIsValid(vardata1.statsTuple))
2278  {
2279  /* note we allow use of nullfrac regardless of security check */
2280  stats1 = (Form_pg_statistic) GETSTRUCT(vardata1.statsTuple);
2281  if (statistic_proc_security_check(&vardata1, opfuncoid))
2282  have_mcvs1 = get_attstatsslot(&sslot1, vardata1.statsTuple,
2283  STATISTIC_KIND_MCV, InvalidOid,
2285  }
2286 
2287  if (HeapTupleIsValid(vardata2.statsTuple))
2288  {
2289  /* note we allow use of nullfrac regardless of security check */
2290  stats2 = (Form_pg_statistic) GETSTRUCT(vardata2.statsTuple);
2291  if (statistic_proc_security_check(&vardata2, opfuncoid))
2292  have_mcvs2 = get_attstatsslot(&sslot2, vardata2.statsTuple,
2293  STATISTIC_KIND_MCV, InvalidOid,
2295  }
2296 
2297  /* We need to compute the inner-join selectivity in all cases */
2298  selec_inner = eqjoinsel_inner(opfuncoid, collation,
2299  &vardata1, &vardata2,
2300  nd1, nd2,
2301  isdefault1, isdefault2,
2302  &sslot1, &sslot2,
2303  stats1, stats2,
2304  have_mcvs1, have_mcvs2);
2305 
2306  switch (sjinfo->jointype)
2307  {
2308  case JOIN_INNER:
2309  case JOIN_LEFT:
2310  case JOIN_FULL:
2311  selec = selec_inner;
2312  break;
2313  case JOIN_SEMI:
2314  case JOIN_ANTI:
2315 
2316  /*
2317  * Look up the join's inner relation. min_righthand is sufficient
2318  * information because neither SEMI nor ANTI joins permit any
2319  * reassociation into or out of their RHS, so the righthand will
2320  * always be exactly that set of rels.
2321  */
2322  inner_rel = find_join_input_rel(root, sjinfo->min_righthand);
2323 
2324  if (!join_is_reversed)
2325  selec = eqjoinsel_semi(opfuncoid, collation,
2326  &vardata1, &vardata2,
2327  nd1, nd2,
2328  isdefault1, isdefault2,
2329  &sslot1, &sslot2,
2330  stats1, stats2,
2331  have_mcvs1, have_mcvs2,
2332  inner_rel);
2333  else
2334  {
2335  Oid commop = get_commutator(operator);
2336  Oid commopfuncoid = OidIsValid(commop) ? get_opcode(commop) : InvalidOid;
2337 
2338  selec = eqjoinsel_semi(commopfuncoid, collation,
2339  &vardata2, &vardata1,
2340  nd2, nd1,
2341  isdefault2, isdefault1,
2342  &sslot2, &sslot1,
2343  stats2, stats1,
2344  have_mcvs2, have_mcvs1,
2345  inner_rel);
2346  }
2347 
2348  /*
2349  * We should never estimate the output of a semijoin to be more
2350  * rows than we estimate for an inner join with the same input
2351  * rels and join condition; it's obviously impossible for that to
2352  * happen. The former estimate is N1 * Ssemi while the latter is
2353  * N1 * N2 * Sinner, so we may clamp Ssemi <= N2 * Sinner. Doing
2354  * this is worthwhile because of the shakier estimation rules we
2355  * use in eqjoinsel_semi, particularly in cases where it has to
2356  * punt entirely.
2357  */
2358  selec = Min(selec, inner_rel->rows * selec_inner);
2359  break;
2360  default:
2361  /* other values not expected here */
2362  elog(ERROR, "unrecognized join type: %d",
2363  (int) sjinfo->jointype);
2364  selec = 0; /* keep compiler quiet */
2365  break;
2366  }
2367 
2368  free_attstatsslot(&sslot1);
2369  free_attstatsslot(&sslot2);
2370 
2371  ReleaseVariableStats(vardata1);
2372  ReleaseVariableStats(vardata2);
2373 
2374  CLAMP_PROBABILITY(selec);
2375 
2376  PG_RETURN_FLOAT8((float8) selec);
2377 }
2378 
2379 /*
2380  * eqjoinsel_inner --- eqjoinsel for normal inner join
2381  *
2382  * We also use this for LEFT/FULL outer joins; it's not presently clear
2383  * that it's worth trying to distinguish them here.
2384  */
2385 static double
2386 eqjoinsel_inner(Oid opfuncoid, Oid collation,
2387  VariableStatData *vardata1, VariableStatData *vardata2,
2388  double nd1, double nd2,
2389  bool isdefault1, bool isdefault2,
2390  AttStatsSlot *sslot1, AttStatsSlot *sslot2,
2391  Form_pg_statistic stats1, Form_pg_statistic stats2,
2392  bool have_mcvs1, bool have_mcvs2)
2393 {
2394  double selec;
2395 
2396  if (have_mcvs1 && have_mcvs2)
2397  {
2398  /*
2399  * We have most-common-value lists for both relations. Run through
2400  * the lists to see which MCVs actually join to each other with the
2401  * given operator. This allows us to determine the exact join
2402  * selectivity for the portion of the relations represented by the MCV
2403  * lists. We still have to estimate for the remaining population, but
2404  * in a skewed distribution this gives us a big leg up in accuracy.
2405  * For motivation see the analysis in Y. Ioannidis and S.
2406  * Christodoulakis, "On the propagation of errors in the size of join
2407  * results", Technical Report 1018, Computer Science Dept., University
2408  * of Wisconsin, Madison, March 1991 (available from ftp.cs.wisc.edu).
2409  */
2410  LOCAL_FCINFO(fcinfo, 2);
2411  FmgrInfo eqproc;
2412  bool *hasmatch1;
2413  bool *hasmatch2;
2414  double nullfrac1 = stats1->stanullfrac;
2415  double nullfrac2 = stats2->stanullfrac;
2416  double matchprodfreq,
2417  matchfreq1,
2418  matchfreq2,
2419  unmatchfreq1,
2420  unmatchfreq2,
2421  otherfreq1,
2422  otherfreq2,
2423  totalsel1,
2424  totalsel2;
2425  int i,
2426  nmatches;
2427 
2428  fmgr_info(opfuncoid, &eqproc);
2429 
2430  /*
2431  * Save a few cycles by setting up the fcinfo struct just once. Using
2432  * FunctionCallInvoke directly also avoids failure if the eqproc
2433  * returns NULL, though really equality functions should never do
2434  * that.
2435  */
2436  InitFunctionCallInfoData(*fcinfo, &eqproc, 2, collation,
2437  NULL, NULL);
2438  fcinfo->args[0].isnull = false;
2439  fcinfo->args[1].isnull = false;
2440 
2441  hasmatch1 = (bool *) palloc0(sslot1->nvalues * sizeof(bool));
2442  hasmatch2 = (bool *) palloc0(sslot2->nvalues * sizeof(bool));
2443 
2444  /*
2445  * Note we assume that each MCV will match at most one member of the
2446  * other MCV list. If the operator isn't really equality, there could
2447  * be multiple matches --- but we don't look for them, both for speed
2448  * and because the math wouldn't add up...
2449  */
2450  matchprodfreq = 0.0;
2451  nmatches = 0;
2452  for (i = 0; i < sslot1->nvalues; i++)
2453  {
2454  int j;
2455 
2456  fcinfo->args[0].value = sslot1->values[i];
2457 
2458  for (j = 0; j < sslot2->nvalues; j++)
2459  {
2460  Datum fresult;
2461 
2462  if (hasmatch2[j])
2463  continue;
2464  fcinfo->args[1].value = sslot2->values[j];
2465  fcinfo->isnull = false;
2466  fresult = FunctionCallInvoke(fcinfo);
2467  if (!fcinfo->isnull && DatumGetBool(fresult))
2468  {
2469  hasmatch1[i] = hasmatch2[j] = true;
2470  matchprodfreq += sslot1->numbers[i] * sslot2->numbers[j];
2471  nmatches++;
2472  break;
2473  }
2474  }
2475  }
2476  CLAMP_PROBABILITY(matchprodfreq);
2477  /* Sum up frequencies of matched and unmatched MCVs */
2478  matchfreq1 = unmatchfreq1 = 0.0;
2479  for (i = 0; i < sslot1->nvalues; i++)
2480  {
2481  if (hasmatch1[i])
2482  matchfreq1 += sslot1->numbers[i];
2483  else
2484  unmatchfreq1 += sslot1->numbers[i];
2485  }
2486  CLAMP_PROBABILITY(matchfreq1);
2487  CLAMP_PROBABILITY(unmatchfreq1);
2488  matchfreq2 = unmatchfreq2 = 0.0;
2489  for (i = 0; i < sslot2->nvalues; i++)
2490  {
2491  if (hasmatch2[i])
2492  matchfreq2 += sslot2->numbers[i];
2493  else
2494  unmatchfreq2 += sslot2->numbers[i];
2495  }
2496  CLAMP_PROBABILITY(matchfreq2);
2497  CLAMP_PROBABILITY(unmatchfreq2);
2498  pfree(hasmatch1);
2499  pfree(hasmatch2);
2500 
2501  /*
2502  * Compute total frequency of non-null values that are not in the MCV
2503  * lists.
2504  */
2505  otherfreq1 = 1.0 - nullfrac1 - matchfreq1 - unmatchfreq1;
2506  otherfreq2 = 1.0 - nullfrac2 - matchfreq2 - unmatchfreq2;
2507  CLAMP_PROBABILITY(otherfreq1);
2508  CLAMP_PROBABILITY(otherfreq2);
2509 
2510  /*
2511  * We can estimate the total selectivity from the point of view of
2512  * relation 1 as: the known selectivity for matched MCVs, plus
2513  * unmatched MCVs that are assumed to match against random members of
2514  * relation 2's non-MCV population, plus non-MCV values that are
2515  * assumed to match against random members of relation 2's unmatched
2516  * MCVs plus non-MCV values.
2517  */
2518  totalsel1 = matchprodfreq;
2519  if (nd2 > sslot2->nvalues)
2520  totalsel1 += unmatchfreq1 * otherfreq2 / (nd2 - sslot2->nvalues);
2521  if (nd2 > nmatches)
2522  totalsel1 += otherfreq1 * (otherfreq2 + unmatchfreq2) /
2523  (nd2 - nmatches);
2524  /* Same estimate from the point of view of relation 2. */
2525  totalsel2 = matchprodfreq;
2526  if (nd1 > sslot1->nvalues)
2527  totalsel2 += unmatchfreq2 * otherfreq1 / (nd1 - sslot1->nvalues);
2528  if (nd1 > nmatches)
2529  totalsel2 += otherfreq2 * (otherfreq1 + unmatchfreq1) /
2530  (nd1 - nmatches);
2531 
2532  /*
2533  * Use the smaller of the two estimates. This can be justified in
2534  * essentially the same terms as given below for the no-stats case: to
2535  * a first approximation, we are estimating from the point of view of
2536  * the relation with smaller nd.
2537  */
2538  selec = (totalsel1 < totalsel2) ? totalsel1 : totalsel2;
2539  }
2540  else
2541  {
2542  /*
2543  * We do not have MCV lists for both sides. Estimate the join
2544  * selectivity as MIN(1/nd1,1/nd2)*(1-nullfrac1)*(1-nullfrac2). This
2545  * is plausible if we assume that the join operator is strict and the
2546  * non-null values are about equally distributed: a given non-null
2547  * tuple of rel1 will join to either zero or N2*(1-nullfrac2)/nd2 rows
2548  * of rel2, so total join rows are at most
2549  * N1*(1-nullfrac1)*N2*(1-nullfrac2)/nd2 giving a join selectivity of
2550  * not more than (1-nullfrac1)*(1-nullfrac2)/nd2. By the same logic it
2551  * is not more than (1-nullfrac1)*(1-nullfrac2)/nd1, so the expression
2552  * with MIN() is an upper bound. Using the MIN() means we estimate
2553  * from the point of view of the relation with smaller nd (since the
2554  * larger nd is determining the MIN). It is reasonable to assume that
2555  * most tuples in this rel will have join partners, so the bound is
2556  * probably reasonably tight and should be taken as-is.
2557  *
2558  * XXX Can we be smarter if we have an MCV list for just one side? It
2559  * seems that if we assume equal distribution for the other side, we
2560  * end up with the same answer anyway.
2561  */
2562  double nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
2563  double nullfrac2 = stats2 ? stats2->stanullfrac : 0.0;
2564 
2565  selec = (1.0 - nullfrac1) * (1.0 - nullfrac2);
2566  if (nd1 > nd2)
2567  selec /= nd1;
2568  else
2569  selec /= nd2;
2570  }
2571 
2572  return selec;
2573 }
2574 
2575 /*
2576  * eqjoinsel_semi --- eqjoinsel for semi join
2577  *
2578  * (Also used for anti join, which we are supposed to estimate the same way.)
2579  * Caller has ensured that vardata1 is the LHS variable.
2580  * Unlike eqjoinsel_inner, we have to cope with opfuncoid being InvalidOid.
2581  */
2582 static double
2583 eqjoinsel_semi(Oid opfuncoid, Oid collation,
2584  VariableStatData *vardata1, VariableStatData *vardata2,
2585  double nd1, double nd2,
2586  bool isdefault1, bool isdefault2,
2587  AttStatsSlot *sslot1, AttStatsSlot *sslot2,
2588  Form_pg_statistic stats1, Form_pg_statistic stats2,
2589  bool have_mcvs1, bool have_mcvs2,
2590  RelOptInfo *inner_rel)
2591 {
2592  double selec;
2593 
2594  /*
2595  * We clamp nd2 to be not more than what we estimate the inner relation's
2596  * size to be. This is intuitively somewhat reasonable since obviously
2597  * there can't be more than that many distinct values coming from the
2598  * inner rel. The reason for the asymmetry (ie, that we don't clamp nd1
2599  * likewise) is that this is the only pathway by which restriction clauses
2600  * applied to the inner rel will affect the join result size estimate,
2601  * since set_joinrel_size_estimates will multiply SEMI/ANTI selectivity by
2602  * only the outer rel's size. If we clamped nd1 we'd be double-counting
2603  * the selectivity of outer-rel restrictions.
2604  *
2605  * We can apply this clamping both with respect to the base relation from
2606  * which the join variable comes (if there is just one), and to the
2607  * immediate inner input relation of the current join.
2608  *
2609  * If we clamp, we can treat nd2 as being a non-default estimate; it's not
2610  * great, maybe, but it didn't come out of nowhere either. This is most
2611  * helpful when the inner relation is empty and consequently has no stats.
2612  */
2613  if (vardata2->rel)
2614  {
2615  if (nd2 >= vardata2->rel->rows)
2616  {
2617  nd2 = vardata2->rel->rows;
2618  isdefault2 = false;
2619  }
2620  }
2621  if (nd2 >= inner_rel->rows)
2622  {
2623  nd2 = inner_rel->rows;
2624  isdefault2 = false;
2625  }
2626 
2627  if (have_mcvs1 && have_mcvs2 && OidIsValid(opfuncoid))
2628  {
2629  /*
2630  * We have most-common-value lists for both relations. Run through
2631  * the lists to see which MCVs actually join to each other with the
2632  * given operator. This allows us to determine the exact join
2633  * selectivity for the portion of the relations represented by the MCV
2634  * lists. We still have to estimate for the remaining population, but
2635  * in a skewed distribution this gives us a big leg up in accuracy.
2636  */
2637  LOCAL_FCINFO(fcinfo, 2);
2638  FmgrInfo eqproc;
2639  bool *hasmatch1;
2640  bool *hasmatch2;
2641  double nullfrac1 = stats1->stanullfrac;
2642  double matchfreq1,
2643  uncertainfrac,
2644  uncertain;
2645  int i,
2646  nmatches,
2647  clamped_nvalues2;
2648 
2649  /*
2650  * The clamping above could have resulted in nd2 being less than
2651  * sslot2->nvalues; in which case, we assume that precisely the nd2
2652  * most common values in the relation will appear in the join input,
2653  * and so compare to only the first nd2 members of the MCV list. Of
2654  * course this is frequently wrong, but it's the best bet we can make.
2655  */
2656  clamped_nvalues2 = Min(sslot2->nvalues, nd2);
2657 
2658  fmgr_info(opfuncoid, &eqproc);
2659 
2660  /*
2661  * Save a few cycles by setting up the fcinfo struct just once. Using
2662  * FunctionCallInvoke directly also avoids failure if the eqproc
2663  * returns NULL, though really equality functions should never do
2664  * that.
2665  */
2666  InitFunctionCallInfoData(*fcinfo, &eqproc, 2, collation,
2667  NULL, NULL);
2668  fcinfo->args[0].isnull = false;
2669  fcinfo->args[1].isnull = false;
2670 
2671  hasmatch1 = (bool *) palloc0(sslot1->nvalues * sizeof(bool));
2672  hasmatch2 = (bool *) palloc0(clamped_nvalues2 * sizeof(bool));
2673 
2674  /*
2675  * Note we assume that each MCV will match at most one member of the
2676  * other MCV list. If the operator isn't really equality, there could
2677  * be multiple matches --- but we don't look for them, both for speed
2678  * and because the math wouldn't add up...
2679  */
2680  nmatches = 0;
2681  for (i = 0; i < sslot1->nvalues; i++)
2682  {
2683  int j;
2684 
2685  fcinfo->args[0].value = sslot1->values[i];
2686 
2687  for (j = 0; j < clamped_nvalues2; j++)
2688  {
2689  Datum fresult;
2690 
2691  if (hasmatch2[j])
2692  continue;
2693  fcinfo->args[1].value = sslot2->values[j];
2694  fcinfo->isnull = false;
2695  fresult = FunctionCallInvoke(fcinfo);
2696  if (!fcinfo->isnull && DatumGetBool(fresult))
2697  {
2698  hasmatch1[i] = hasmatch2[j] = true;
2699  nmatches++;
2700  break;
2701  }
2702  }
2703  }
2704  /* Sum up frequencies of matched MCVs */
2705  matchfreq1 = 0.0;
2706  for (i = 0; i < sslot1->nvalues; i++)
2707  {
2708  if (hasmatch1[i])
2709  matchfreq1 += sslot1->numbers[i];
2710  }
2711  CLAMP_PROBABILITY(matchfreq1);
2712  pfree(hasmatch1);
2713  pfree(hasmatch2);
2714 
2715  /*
2716  * Now we need to estimate the fraction of relation 1 that has at
2717  * least one join partner. We know for certain that the matched MCVs
2718  * do, so that gives us a lower bound, but we're really in the dark
2719  * about everything else. Our crude approach is: if nd1 <= nd2 then
2720  * assume all non-null rel1 rows have join partners, else assume for
2721  * the uncertain rows that a fraction nd2/nd1 have join partners. We
2722  * can discount the known-matched MCVs from the distinct-values counts
2723  * before doing the division.
2724  *
2725  * Crude as the above is, it's completely useless if we don't have
2726  * reliable ndistinct values for both sides. Hence, if either nd1 or
2727  * nd2 is default, punt and assume half of the uncertain rows have
2728  * join partners.
2729  */
2730  if (!isdefault1 && !isdefault2)
2731  {
2732  nd1 -= nmatches;
2733  nd2 -= nmatches;
2734  if (nd1 <= nd2 || nd2 < 0)
2735  uncertainfrac = 1.0;
2736  else
2737  uncertainfrac = nd2 / nd1;
2738  }
2739  else
2740  uncertainfrac = 0.5;
2741  uncertain = 1.0 - matchfreq1 - nullfrac1;
2742  CLAMP_PROBABILITY(uncertain);
2743  selec = matchfreq1 + uncertainfrac * uncertain;
2744  }
2745  else
2746  {
2747  /*
2748  * Without MCV lists for both sides, we can only use the heuristic
2749  * about nd1 vs nd2.
2750  */
2751  double nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
2752 
2753  if (!isdefault1 && !isdefault2)
2754  {
2755  if (nd1 <= nd2 || nd2 < 0)
2756  selec = 1.0 - nullfrac1;
2757  else
2758  selec = (nd2 / nd1) * (1.0 - nullfrac1);
2759  }
2760  else
2761  selec = 0.5 * (1.0 - nullfrac1);
2762  }
2763 
2764  return selec;
2765 }
2766 
2767 /*
2768  * neqjoinsel - Join selectivity of "!="
2769  */
2770 Datum
2772 {
2773  PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
2774  Oid operator = PG_GETARG_OID(1);
2775  List *args = (List *) PG_GETARG_POINTER(2);
2776  JoinType jointype = (JoinType) PG_GETARG_INT16(3);
2778  Oid collation = PG_GET_COLLATION();
2779  float8 result;
2780 
2781  if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
2782  {
2783  /*
2784  * For semi-joins, if there is more than one distinct value in the RHS
2785  * relation then every non-null LHS row must find a row to join since
2786  * it can only be equal to one of them. We'll assume that there is
2787  * always more than one distinct RHS value for the sake of stability,
2788  * though in theory we could have special cases for empty RHS
2789  * (selectivity = 0) and single-distinct-value RHS (selectivity =
2790  * fraction of LHS that has the same value as the single RHS value).
2791  *
2792  * For anti-joins, if we use the same assumption that there is more
2793  * than one distinct key in the RHS relation, then every non-null LHS
2794  * row must be suppressed by the anti-join.
2795  *
2796  * So either way, the selectivity estimate should be 1 - nullfrac.
2797  */
2798  VariableStatData leftvar;
2799  VariableStatData rightvar;
2800  bool reversed;
2801  HeapTuple statsTuple;
2802  double nullfrac;
2803 
2804  get_join_variables(root, args, sjinfo, &leftvar, &rightvar, &reversed);
2805  statsTuple = reversed ? rightvar.statsTuple : leftvar.statsTuple;
2806  if (HeapTupleIsValid(statsTuple))
2807  nullfrac = ((Form_pg_statistic) GETSTRUCT(statsTuple))->stanullfrac;
2808  else
2809  nullfrac = 0.0;
2810  ReleaseVariableStats(leftvar);
2811  ReleaseVariableStats(rightvar);
2812 
2813  result = 1.0 - nullfrac;
2814  }
2815  else
2816  {
2817  /*
2818  * We want 1 - eqjoinsel() where the equality operator is the one
2819  * associated with this != operator, that is, its negator.
2820  */
2821  Oid eqop = get_negator(operator);
2822 
2823  if (eqop)
2824  {
2825  result =
2827  collation,
2828  PointerGetDatum(root),
2829  ObjectIdGetDatum(eqop),
2831  Int16GetDatum(jointype),
2832  PointerGetDatum(sjinfo)));
2833  }
2834  else
2835  {
2836  /* Use default selectivity (should we raise an error instead?) */
2837  result = DEFAULT_EQ_SEL;
2838  }
2839  result = 1.0 - result;
2840  }
2841 
2842  PG_RETURN_FLOAT8(result);
2843 }
2844 
2845 /*
2846  * scalarltjoinsel - Join selectivity of "<" for scalars
2847  */
2848 Datum
2850 {
2852 }
2853 
2854 /*
2855  * scalarlejoinsel - Join selectivity of "<=" for scalars
2856  */
2857 Datum
2859 {
2861 }
2862 
2863 /*
2864  * scalargtjoinsel - Join selectivity of ">" for scalars
2865  */
2866 Datum
2868 {
2870 }
2871 
2872 /*
2873  * scalargejoinsel - Join selectivity of ">=" for scalars
2874  */
2875 Datum
2877 {
2879 }
2880 
2881 
2882 /*
2883  * mergejoinscansel - Scan selectivity of merge join.
2884  *
2885  * A merge join will stop as soon as it exhausts either input stream.
2886  * Therefore, if we can estimate the ranges of both input variables,
2887  * we can estimate how much of the input will actually be read. This
2888  * can have a considerable impact on the cost when using indexscans.
2889  *
2890  * Also, we can estimate how much of each input has to be read before the
2891  * first join pair is found, which will affect the join's startup time.
2892  *
2893  * clause should be a clause already known to be mergejoinable. opfamily,
2894  * strategy, and nulls_first specify the sort ordering being used.
2895  *
2896  * The outputs are:
2897  * *leftstart is set to the fraction of the left-hand variable expected
2898  * to be scanned before the first join pair is found (0 to 1).
2899  * *leftend is set to the fraction of the left-hand variable expected
2900  * to be scanned before the join terminates (0 to 1).
2901  * *rightstart, *rightend similarly for the right-hand variable.
2902  */
2903 void
2905  Oid opfamily, int strategy, bool nulls_first,
2906  Selectivity *leftstart, Selectivity *leftend,
2907  Selectivity *rightstart, Selectivity *rightend)
2908 {
2909  Node *left,
2910  *right;
2911  VariableStatData leftvar,
2912  rightvar;
2913  int op_strategy;
2914  Oid op_lefttype;
2915  Oid op_righttype;
2916  Oid opno,
2917  collation,
2918  lsortop,
2919  rsortop,
2920  lstatop,
2921  rstatop,
2922  ltop,
2923  leop,
2924  revltop,
2925  revleop;
2926  bool isgt;
2927  Datum leftmin,
2928  leftmax,
2929  rightmin,
2930  rightmax;
2931  double selec;
2932 
2933  /* Set default results if we can't figure anything out. */
2934  /* XXX should default "start" fraction be a bit more than 0? */
2935  *leftstart = *rightstart = 0.0;
2936  *leftend = *rightend = 1.0;
2937 
2938  /* Deconstruct the merge clause */
2939  if (!is_opclause(clause))
2940  return; /* shouldn't happen */
2941  opno = ((OpExpr *) clause)->opno;
2942  collation = ((OpExpr *) clause)->inputcollid;
2943  left = get_leftop((Expr *) clause);
2944  right = get_rightop((Expr *) clause);
2945  if (!right)
2946  return; /* shouldn't happen */
2947 
2948  /* Look for stats for the inputs */
2949  examine_variable(root, left, 0, &leftvar);
2950  examine_variable(root, right, 0, &rightvar);
2951 
2952  /* Extract the operator's declared left/right datatypes */
2953  get_op_opfamily_properties(opno, opfamily, false,
2954  &op_strategy,
2955  &op_lefttype,
2956  &op_righttype);
2957  Assert(op_strategy == BTEqualStrategyNumber);
2958 
2959  /*
2960  * Look up the various operators we need. If we don't find them all, it
2961  * probably means the opfamily is broken, but we just fail silently.
2962  *
2963  * Note: we expect that pg_statistic histograms will be sorted by the '<'
2964  * operator, regardless of which sort direction we are considering.
2965  */
2966  switch (strategy)
2967  {
2968  case BTLessStrategyNumber:
2969  isgt = false;
2970  if (op_lefttype == op_righttype)
2971  {
2972  /* easy case */
2973  ltop = get_opfamily_member(opfamily,
2974  op_lefttype, op_righttype,
2976  leop = get_opfamily_member(opfamily,
2977  op_lefttype, op_righttype,
2979  lsortop = ltop;
2980  rsortop = ltop;
2981  lstatop = lsortop;
2982  rstatop = rsortop;
2983  revltop = ltop;
2984  revleop = leop;
2985  }
2986  else
2987  {
2988  ltop = get_opfamily_member(opfamily,
2989  op_lefttype, op_righttype,
2991  leop = get_opfamily_member(opfamily,
2992  op_lefttype, op_righttype,
2994  lsortop = get_opfamily_member(opfamily,
2995  op_lefttype, op_lefttype,
2997  rsortop = get_opfamily_member(opfamily,
2998  op_righttype, op_righttype,
3000  lstatop = lsortop;
3001  rstatop = rsortop;
3002  revltop = get_opfamily_member(opfamily,
3003  op_righttype, op_lefttype,
3005  revleop = get_opfamily_member(opfamily,
3006  op_righttype, op_lefttype,
3008  }
3009  break;
3011  /* descending-order case */
3012  isgt = true;
3013  if (op_lefttype == op_righttype)
3014  {
3015  /* easy case */
3016  ltop = get_opfamily_member(opfamily,
3017  op_lefttype, op_righttype,
3019  leop = get_opfamily_member(opfamily,
3020  op_lefttype, op_righttype,
3022  lsortop = ltop;
3023  rsortop = ltop;
3024  lstatop = get_opfamily_member(opfamily,
3025  op_lefttype, op_lefttype,
3027  rstatop = lstatop;
3028  revltop = ltop;
3029  revleop = leop;
3030  }
3031  else
3032  {
3033  ltop = get_opfamily_member(opfamily,
3034  op_lefttype, op_righttype,
3036  leop = get_opfamily_member(opfamily,
3037  op_lefttype, op_righttype,
3039  lsortop = get_opfamily_member(opfamily,
3040  op_lefttype, op_lefttype,
3042  rsortop = get_opfamily_member(opfamily,
3043  op_righttype, op_righttype,
3045  lstatop = get_opfamily_member(opfamily,
3046  op_lefttype, op_lefttype,
3048  rstatop = get_opfamily_member(opfamily,
3049  op_righttype, op_righttype,
3051  revltop = get_opfamily_member(opfamily,
3052  op_righttype, op_lefttype,
3054  revleop = get_opfamily_member(opfamily,
3055  op_righttype, op_lefttype,
3057  }
3058  break;
3059  default:
3060  goto fail; /* shouldn't get here */
3061  }
3062 
3063  if (!OidIsValid(lsortop) ||
3064  !OidIsValid(rsortop) ||
3065  !OidIsValid(lstatop) ||
3066  !OidIsValid(rstatop) ||
3067  !OidIsValid(ltop) ||
3068  !OidIsValid(leop) ||
3069  !OidIsValid(revltop) ||
3070  !OidIsValid(revleop))
3071  goto fail; /* insufficient info in catalogs */
3072 
3073  /* Try to get ranges of both inputs */
3074  if (!isgt)
3075  {
3076  if (!get_variable_range(root, &leftvar, lstatop, collation,
3077  &leftmin, &leftmax))
3078  goto fail; /* no range available from stats */
3079  if (!get_variable_range(root, &rightvar, rstatop, collation,
3080  &rightmin, &rightmax))
3081  goto fail; /* no range available from stats */
3082  }
3083  else
3084  {
3085  /* need to swap the max and min */
3086  if (!get_variable_range(root, &leftvar, lstatop, collation,
3087  &leftmax, &leftmin))
3088  goto fail; /* no range available from stats */
3089  if (!get_variable_range(root, &rightvar, rstatop, collation,
3090  &rightmax, &rightmin))
3091  goto fail; /* no range available from stats */
3092  }
3093 
3094  /*
3095  * Now, the fraction of the left variable that will be scanned is the
3096  * fraction that's <= the right-side maximum value. But only believe
3097  * non-default estimates, else stick with our 1.0.
3098  */
3099  selec = scalarineqsel(root, leop, isgt, true, collation, &leftvar,
3100  rightmax, op_righttype);
3101  if (selec != DEFAULT_INEQ_SEL)
3102  *leftend = selec;
3103 
3104  /* And similarly for the right variable. */
3105  selec = scalarineqsel(root, revleop, isgt, true, collation, &rightvar,
3106  leftmax, op_lefttype);
3107  if (selec != DEFAULT_INEQ_SEL)
3108  *rightend = selec;
3109 
3110  /*
3111  * Only one of the two "end" fractions can really be less than 1.0;
3112  * believe the smaller estimate and reset the other one to exactly 1.0. If
3113  * we get exactly equal estimates (as can easily happen with self-joins),
3114  * believe neither.
3115  */
3116  if (*leftend > *rightend)
3117  *leftend = 1.0;
3118  else if (*leftend < *rightend)
3119  *rightend = 1.0;
3120  else
3121  *leftend = *rightend = 1.0;
3122 
3123  /*
3124  * Also, the fraction of the left variable that will be scanned before the
3125  * first join pair is found is the fraction that's < the right-side
3126  * minimum value. But only believe non-default estimates, else stick with
3127  * our own default.
3128  */
3129  selec = scalarineqsel(root, ltop, isgt, false, collation, &leftvar,
3130  rightmin, op_righttype);
3131  if (selec != DEFAULT_INEQ_SEL)
3132  *leftstart = selec;
3133 
3134  /* And similarly for the right variable. */
3135  selec = scalarineqsel(root, revltop, isgt, false, collation, &rightvar,
3136  leftmin, op_lefttype);
3137  if (selec != DEFAULT_INEQ_SEL)
3138  *rightstart = selec;
3139 
3140  /*
3141  * Only one of the two "start" fractions can really be more than zero;
3142  * believe the larger estimate and reset the other one to exactly 0.0. If
3143  * we get exactly equal estimates (as can easily happen with self-joins),
3144  * believe neither.
3145  */
3146  if (*leftstart < *rightstart)
3147  *leftstart = 0.0;
3148  else if (*leftstart > *rightstart)
3149  *rightstart = 0.0;
3150  else
3151  *leftstart = *rightstart = 0.0;
3152 
3153  /*
3154  * If the sort order is nulls-first, we're going to have to skip over any
3155  * nulls too. These would not have been counted by scalarineqsel, and we
3156  * can safely add in this fraction regardless of whether we believe
3157  * scalarineqsel's results or not. But be sure to clamp the sum to 1.0!
3158  */
3159  if (nulls_first)
3160  {
3161  Form_pg_statistic stats;
3162 
3163  if (HeapTupleIsValid(leftvar.statsTuple))
3164  {
3165  stats = (Form_pg_statistic) GETSTRUCT(leftvar.statsTuple);
3166  *leftstart += stats->stanullfrac;
3167  CLAMP_PROBABILITY(*leftstart);
3168  *leftend += stats->stanullfrac;
3169  CLAMP_PROBABILITY(*leftend);
3170  }
3171  if (HeapTupleIsValid(rightvar.statsTuple))
3172  {
3173  stats = (Form_pg_statistic) GETSTRUCT(rightvar.statsTuple);
3174  *rightstart += stats->stanullfrac;
3175  CLAMP_PROBABILITY(*rightstart);
3176  *rightend += stats->stanullfrac;
3177  CLAMP_PROBABILITY(*rightend);
3178  }
3179  }
3180 
3181  /* Disbelieve start >= end, just in case that can happen */
3182  if (*leftstart >= *leftend)
3183  {
3184  *leftstart = 0.0;
3185  *leftend = 1.0;
3186  }
3187  if (*rightstart >= *rightend)
3188  {
3189  *rightstart = 0.0;
3190  *rightend = 1.0;
3191  }
3192 
3193 fail:
3194  ReleaseVariableStats(leftvar);
3195  ReleaseVariableStats(rightvar);
3196 }
3197 
3198 
3199 /*
3200  * matchingsel -- generic matching-operator selectivity support
3201  *
3202  * Use these for any operators that (a) are on data types for which we collect
3203  * standard statistics, and (b) have behavior for which the default estimate
3204  * (twice DEFAULT_EQ_SEL) is sane. Typically that is good for match-like
3205  * operators.
3206  */
3207 
3208 Datum
3210 {
3211  PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
3212  Oid operator = PG_GETARG_OID(1);
3213  List *args = (List *) PG_GETARG_POINTER(2);
3214  int varRelid = PG_GETARG_INT32(3);
3215  Oid collation = PG_GET_COLLATION();
3216  double selec;
3217 
3218  /* Use generic restriction selectivity logic. */
3219  selec = generic_restriction_selectivity(root, operator, collation,
3220  args, varRelid,
3222 
3223  PG_RETURN_FLOAT8((float8) selec);
3224 }
3225 
3226 Datum
3228 {
3229  /* Just punt, for the moment. */
3231 }
3232 
3233 
3234 /*
3235  * Helper routine for estimate_num_groups: add an item to a list of
3236  * GroupVarInfos, but only if it's not known equal to any of the existing
3237  * entries.
3238  */
3239 typedef struct
3240 {
3241  Node *var; /* might be an expression, not just a Var */
3242  RelOptInfo *rel; /* relation it belongs to */
3243  double ndistinct; /* # distinct values */
3244  bool isdefault; /* true if DEFAULT_NUM_DISTINCT was used */
3245 } GroupVarInfo;
3246 
3247 static List *
3249  Node *var, VariableStatData *vardata)
3250 {
3251  GroupVarInfo *varinfo;
3252  double ndistinct;
3253  bool isdefault;
3254  ListCell *lc;
3255 
3256  ndistinct = get_variable_numdistinct(vardata, &isdefault);
3257 
3258  foreach(lc, varinfos)
3259  {
3260  varinfo = (GroupVarInfo *) lfirst(lc);
3261 
3262  /* Drop exact duplicates */
3263  if (equal(var, varinfo->var))
3264  return varinfos;
3265 
3266  /*
3267  * Drop known-equal vars, but only if they belong to different
3268  * relations (see comments for estimate_num_groups)
3269  */
3270  if (vardata->rel != varinfo->rel &&
3271  exprs_known_equal(root, var, varinfo->var))
3272  {
3273  if (varinfo->ndistinct <= ndistinct)
3274  {
3275  /* Keep older item, forget new one */
3276  return varinfos;
3277  }
3278  else
3279  {
3280  /* Delete the older item */
3281  varinfos = foreach_delete_current(varinfos, lc);
3282  }
3283  }
3284  }
3285 
3286  varinfo = (GroupVarInfo *) palloc(sizeof(GroupVarInfo));
3287 
3288  varinfo->var = var;
3289  varinfo->rel = vardata->rel;
3290  varinfo->ndistinct = ndistinct;
3291  varinfo->isdefault = isdefault;
3292  varinfos = lappend(varinfos, varinfo);
3293  return varinfos;
3294 }
3295 
3296 /*
3297  * estimate_num_groups - Estimate number of groups in a grouped query
3298  *
3299  * Given a query having a GROUP BY clause, estimate how many groups there
3300  * will be --- ie, the number of distinct combinations of the GROUP BY
3301  * expressions.
3302  *
3303  * This routine is also used to estimate the number of rows emitted by
3304  * a DISTINCT filtering step; that is an isomorphic problem. (Note:
3305  * actually, we only use it for DISTINCT when there's no grouping or
3306  * aggregation ahead of the DISTINCT.)
3307  *
3308  * Inputs:
3309  * root - the query
3310  * groupExprs - list of expressions being grouped by
3311  * input_rows - number of rows estimated to arrive at the group/unique
3312  * filter step
3313  * pgset - NULL, or a List** pointing to a grouping set to filter the
3314  * groupExprs against
3315  *
3316  * Outputs:
3317  * estinfo - When passed as non-NULL, the function will set bits in the
3318  * "flags" field in order to provide callers with additional information
3319  * about the estimation. Currently, we only set the SELFLAG_USED_DEFAULT
3320  * bit if we used any default values in the estimation.
3321  *
3322  * Given the lack of any cross-correlation statistics in the system, it's
3323  * impossible to do anything really trustworthy with GROUP BY conditions
3324  * involving multiple Vars. We should however avoid assuming the worst
3325  * case (all possible cross-product terms actually appear as groups) since
3326  * very often the grouped-by Vars are highly correlated. Our current approach
3327  * is as follows:
3328  * 1. Expressions yielding boolean are assumed to contribute two groups,
3329  * independently of their content, and are ignored in the subsequent
3330  * steps. This is mainly because tests like "col IS NULL" break the
3331  * heuristic used in step 2 especially badly.
3332  * 2. Reduce the given expressions to a list of unique Vars used. For
3333  * example, GROUP BY a, a + b is treated the same as GROUP BY a, b.
3334  * It is clearly correct not to count the same Var more than once.
3335  * It is also reasonable to treat f(x) the same as x: f() cannot
3336  * increase the number of distinct values (unless it is volatile,
3337  * which we consider unlikely for grouping), but it probably won't
3338  * reduce the number of distinct values much either.
3339  * As a special case, if a GROUP BY expression can be matched to an
3340  * expressional index for which we have statistics, then we treat the
3341  * whole expression as though it were just a Var.
3342  * 3. If the list contains Vars of different relations that are known equal
3343  * due to equivalence classes, then drop all but one of the Vars from each
3344  * known-equal set, keeping the one with smallest estimated # of values
3345  * (since the extra values of the others can't appear in joined rows).
3346  * Note the reason we only consider Vars of different relations is that
3347  * if we considered ones of the same rel, we'd be double-counting the
3348  * restriction selectivity of the equality in the next step.
3349  * 4. For Vars within a single source rel, we multiply together the numbers
3350  * of values, clamp to the number of rows in the rel (divided by 10 if
3351  * more than one Var), and then multiply by a factor based on the
3352  * selectivity of the restriction clauses for that rel. When there's
3353  * more than one Var, the initial product is probably too high (it's the
3354  * worst case) but clamping to a fraction of the rel's rows seems to be a
3355  * helpful heuristic for not letting the estimate get out of hand. (The
3356  * factor of 10 is derived from pre-Postgres-7.4 practice.) The factor
3357  * we multiply by to adjust for the restriction selectivity assumes that
3358  * the restriction clauses are independent of the grouping, which may not
3359  * be a valid assumption, but it's hard to do better.
3360  * 5. If there are Vars from multiple rels, we repeat step 4 for each such
3361  * rel, and multiply the results together.
3362  * Note that rels not containing grouped Vars are ignored completely, as are
3363  * join clauses. Such rels cannot increase the number of groups, and we
3364  * assume such clauses do not reduce the number either (somewhat bogus,
3365  * but we don't have the info to do better).
3366  */
3367 double
3368 estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows,
3369  List **pgset, EstimationInfo *estinfo)
3370 {
3371  return estimate_num_groups_incremental(root, groupExprs,
3372  input_rows, pgset, estinfo,
3373  NULL, 0);
3374 }
3375 
3376 /*
3377  * estimate_num_groups_incremental
3378  * An estimate_num_groups variant, optimized for cases that are adding the
3379  * expressions incrementally (e.g. one by one).
3380  */
3381 double
3383  double input_rows,
3384  List **pgset, EstimationInfo *estinfo,
3385  List **cache_varinfos, int prevNExprs)
3386 {
3387  List *varinfos = (cache_varinfos) ? *cache_varinfos : NIL;
3388  double srf_multiplier = 1.0;
3389  double numdistinct;
3390  ListCell *l;
3391  int i,
3392  j;
3393 
3394  /* Zero the estinfo output parameter, if non-NULL */
3395  if (estinfo != NULL)
3396  memset(estinfo, 0, sizeof(EstimationInfo));
3397 
3398  /*
3399  * We don't ever want to return an estimate of zero groups, as that tends
3400  * to lead to division-by-zero and other unpleasantness. The input_rows
3401  * estimate is usually already at least 1, but clamp it just in case it
3402  * isn't.
3403  */
3404  input_rows = clamp_row_est(input_rows);
3405 
3406  /*
3407  * If no grouping columns, there's exactly one group. (This can't happen
3408  * for normal cases with GROUP BY or DISTINCT, but it is possible for
3409  * corner cases with set operations.)
3410  */
3411  if (groupExprs == NIL || (pgset && list_length(*pgset) < 1))
3412  return 1.0;
3413 
3414  /*
3415  * Count groups derived from boolean grouping expressions. For other
3416  * expressions, find the unique Vars used, treating an expression as a Var
3417  * if we can find stats for it. For each one, record the statistical
3418  * estimate of number of distinct values (total in its table, without
3419  * regard for filtering).
3420  */
3421  numdistinct = 1.0;
3422 
3423  i = j = 0;
3424  foreach(l, groupExprs)
3425  {
3426  Node *groupexpr = (Node *) lfirst(l);
3427  double this_srf_multiplier;
3428  VariableStatData vardata;
3429  List *varshere;
3430  ListCell *l2;
3431 
3432  /* was done on previous call */
3433  if (cache_varinfos && j++ < prevNExprs)
3434  {
3435  if (pgset)
3436  i++; /* to keep in sync with lines below */
3437  continue;
3438  }
3439 
3440  /* is expression in this grouping set? */
3441  if (pgset && !list_member_int(*pgset, i++))
3442  continue;
3443 
3444  /*
3445  * Set-returning functions in grouping columns are a bit problematic.
3446  * The code below will effectively ignore their SRF nature and come up
3447  * with a numdistinct estimate as though they were scalar functions.
3448  * We compensate by scaling up the end result by the largest SRF
3449  * rowcount estimate. (This will be an overestimate if the SRF
3450  * produces multiple copies of any output value, but it seems best to
3451  * assume the SRF's outputs are distinct. In any case, it's probably
3452  * pointless to worry too much about this without much better
3453  * estimates for SRF output rowcounts than we have today.)
3454  */
3455  this_srf_multiplier = expression_returns_set_rows(root, groupexpr);
3456  if (srf_multiplier < this_srf_multiplier)
3457  srf_multiplier = this_srf_multiplier;
3458 
3459  /* Short-circuit for expressions returning boolean */
3460  if (exprType(groupexpr) == BOOLOID)
3461  {
3462  numdistinct *= 2.0;
3463  continue;
3464  }
3465 
3466  /*
3467  * If examine_variable is able to deduce anything about the GROUP BY
3468  * expression, treat it as a single variable even if it's really more
3469  * complicated.
3470  *
3471  * XXX This has the consequence that if there's a statistics object on
3472  * the expression, we don't split it into individual Vars. This
3473  * affects our selection of statistics in
3474  * estimate_multivariate_ndistinct, because it's probably better to
3475  * use more accurate estimate for each expression and treat them as
3476  * independent, than to combine estimates for the extracted variables
3477  * when we don't know how that relates to the expressions.
3478  */
3479  examine_variable(root, groupexpr, 0, &vardata);
3480  if (HeapTupleIsValid(vardata.statsTuple) || vardata.isunique)
3481  {
3482  varinfos = add_unique_group_var(root, varinfos,
3483  groupexpr, &vardata);
3484  ReleaseVariableStats(vardata);
3485  continue;
3486  }
3487  ReleaseVariableStats(vardata);
3488 
3489  /*
3490  * Else pull out the component Vars. Handle PlaceHolderVars by
3491  * recursing into their arguments (effectively assuming that the
3492  * PlaceHolderVar doesn't change the number of groups, which boils
3493  * down to ignoring the possible addition of nulls to the result set).
3494  */
3495  varshere = pull_var_clause(groupexpr,
3499 
3500  /*
3501  * If we find any variable-free GROUP BY item, then either it is a
3502  * constant (and we can ignore it) or it contains a volatile function;
3503  * in the latter case we punt and assume that each input row will
3504  * yield a distinct group.
3505  */
3506  if (varshere == NIL)
3507  {
3508  if (contain_volatile_functions(groupexpr))
3509  {
3510  if (cache_varinfos)
3511  *cache_varinfos = varinfos;
3512  return input_rows;
3513  }
3514  continue;
3515  }
3516 
3517  /*
3518  * Else add variables to varinfos list
3519  */
3520  foreach(l2, varshere)
3521  {
3522  Node *var = (Node *) lfirst(l2);
3523 
3524  examine_variable(root, var, 0, &vardata);
3525  varinfos = add_unique_group_var(root, varinfos, var, &vardata);
3526  ReleaseVariableStats(vardata);
3527  }
3528  }
3529 
3530  if (cache_varinfos)
3531  *cache_varinfos = varinfos;
3532 
3533  /*
3534  * If now no Vars, we must have an all-constant or all-boolean GROUP BY
3535  * list.
3536  */
3537  if (varinfos == NIL)
3538  {
3539  /* Apply SRF multiplier as we would do in the long path */
3540  numdistinct *= srf_multiplier;
3541  /* Round off */
3542  numdistinct = ceil(numdistinct);
3543  /* Guard against out-of-range answers */
3544  if (numdistinct > input_rows)
3545  numdistinct = input_rows;
3546  if (numdistinct < 1.0)
3547  numdistinct = 1.0;
3548  return numdistinct;
3549  }
3550 
3551  /*
3552  * Group Vars by relation and estimate total numdistinct.
3553  *
3554  * For each iteration of the outer loop, we process the frontmost Var in
3555  * varinfos, plus all other Vars in the same relation. We remove these
3556  * Vars from the newvarinfos list for the next iteration. This is the
3557  * easiest way to group Vars of same rel together.
3558  */
3559  do
3560  {
3561  GroupVarInfo *varinfo1 = (GroupVarInfo *) linitial(varinfos);
3562  RelOptInfo *rel = varinfo1->rel;
3563  double reldistinct = 1;
3564  double relmaxndistinct = reldistinct;
3565  int relvarcount = 0;
3566  List *newvarinfos = NIL;
3567  List *relvarinfos = NIL;
3568 
3569  /*
3570  * Split the list of varinfos in two - one for the current rel, one
3571  * for remaining Vars on other rels.
3572  */
3573  relvarinfos = lappend(relvarinfos, varinfo1);
3574  for_each_from(l, varinfos, 1)
3575  {
3576  GroupVarInfo *varinfo2 = (GroupVarInfo *) lfirst(l);
3577 
3578  if (varinfo2->rel == varinfo1->rel)
3579  {
3580  /* varinfos on current rel */
3581  relvarinfos = lappend(relvarinfos, varinfo2);
3582  }
3583  else
3584  {
3585  /* not time to process varinfo2 yet */
3586  newvarinfos = lappend(newvarinfos, varinfo2);
3587  }
3588  }
3589 
3590  /*
3591  * Get the numdistinct estimate for the Vars of this rel. We
3592  * iteratively search for multivariate n-distinct with maximum number
3593  * of vars; assuming that each var group is independent of the others,
3594  * we multiply them together. Any remaining relvarinfos after no more
3595  * multivariate matches are found are assumed independent too, so
3596  * their individual ndistinct estimates are multiplied also.
3597  *
3598  * While iterating, count how many separate numdistinct values we
3599  * apply. We apply a fudge factor below, but only if we multiplied
3600  * more than one such values.
3601  */
3602  while (relvarinfos)
3603  {
3604  double mvndistinct;
3605 
3606  if (estimate_multivariate_ndistinct(root, rel, &relvarinfos,
3607  &mvndistinct))
3608  {
3609  reldistinct *= mvndistinct;
3610  if (relmaxndistinct < mvndistinct)
3611  relmaxndistinct = mvndistinct;
3612  relvarcount++;
3613  }
3614  else
3615  {
3616  foreach(l, relvarinfos)
3617  {
3618  GroupVarInfo *varinfo2 = (GroupVarInfo *) lfirst(l);
3619 
3620  reldistinct *= varinfo2->ndistinct;
3621  if (relmaxndistinct < varinfo2->ndistinct)
3622  relmaxndistinct = varinfo2->ndistinct;
3623  relvarcount++;
3624 
3625  /*
3626  * When varinfo2's isdefault is set then we'd better set
3627  * the SELFLAG_USED_DEFAULT bit in the EstimationInfo.
3628  */
3629  if (estinfo != NULL && varinfo2->isdefault)
3630  estinfo->flags |= SELFLAG_USED_DEFAULT;
3631  }
3632 
3633  /* we're done with this relation */
3634  relvarinfos = NIL;
3635  }
3636  }
3637 
3638  /*
3639  * Sanity check --- don't divide by zero if empty relation.
3640  */
3641  Assert(IS_SIMPLE_REL(rel));
3642  if (rel->tuples > 0)
3643  {
3644  /*
3645  * Clamp to size of rel, or size of rel / 10 if multiple Vars. The
3646  * fudge factor is because the Vars are probably correlated but we
3647  * don't know by how much. We should never clamp to less than the
3648  * largest ndistinct value for any of the Vars, though, since
3649  * there will surely be at least that many groups.
3650  */
3651  double clamp = rel->tuples;
3652 
3653  if (relvarcount > 1)
3654  {
3655  clamp *= 0.1;
3656  if (clamp < relmaxndistinct)
3657  {
3658  clamp = relmaxndistinct;
3659  /* for sanity in case some ndistinct is too large: */
3660  if (clamp > rel->tuples)
3661  clamp = rel->tuples;
3662  }
3663  }
3664  if (reldistinct > clamp)
3665  reldistinct = clamp;
3666 
3667  /*
3668  * Update the estimate based on the restriction selectivity,
3669  * guarding against division by zero when reldistinct is zero.
3670  * Also skip this if we know that we are returning all rows.
3671  */
3672  if (reldistinct > 0 && rel->rows < rel->tuples)
3673  {
3674  /*
3675  * Given a table containing N rows with n distinct values in a
3676  * uniform distribution, if we select p rows at random then
3677  * the expected number of distinct values selected is
3678  *
3679  * n * (1 - product((N-N/n-i)/(N-i), i=0..p-1))
3680  *
3681  * = n * (1 - (N-N/n)! / (N-N/n-p)! * (N-p)! / N!)
3682  *
3683  * See "Approximating block accesses in database
3684  * organizations", S. B. Yao, Communications of the ACM,
3685  * Volume 20 Issue 4, April 1977 Pages 260-261.
3686  *
3687  * Alternatively, re-arranging the terms from the factorials,
3688  * this may be written as
3689  *
3690  * n * (1 - product((N-p-i)/(N-i), i=0..N/n-1))
3691  *
3692  * This form of the formula is more efficient to compute in
3693  * the common case where p is larger than N/n. Additionally,
3694  * as pointed out by Dell'Era, if i << N for all terms in the
3695  * product, it can be approximated by
3696  *
3697  * n * (1 - ((N-p)/N)^(N/n))
3698  *
3699  * See "Expected distinct values when selecting from a bag
3700  * without replacement", Alberto Dell'Era,
3701  * http://www.adellera.it/investigations/distinct_balls/.
3702  *
3703  * The condition i << N is equivalent to n >> 1, so this is a
3704  * good approximation when the number of distinct values in
3705  * the table is large. It turns out that this formula also
3706  * works well even when n is small.
3707  */
3708  reldistinct *=
3709  (1 - pow((rel->tuples - rel->rows) / rel->tuples,
3710  rel->tuples / reldistinct));
3711  }
3712  reldistinct = clamp_row_est(reldistinct);
3713 
3714  /*
3715  * Update estimate of total distinct groups.
3716  */
3717  numdistinct *= reldistinct;
3718  }
3719 
3720  varinfos = newvarinfos;
3721  } while (varinfos != NIL);
3722 
3723  /* Now we can account for the effects of any SRFs */
3724  numdistinct *= srf_multiplier;
3725 
3726  /* Round off */
3727  numdistinct = ceil(numdistinct);
3728 
3729  /* Guard against out-of-range answers */
3730  if (numdistinct > input_rows)
3731  numdistinct = input_rows;
3732  if (numdistinct < 1.0)
3733  numdistinct = 1.0;
3734 
3735  return numdistinct;
3736 }
3737 
3738 /*
3739  * Estimate hash bucket statistics when the specified expression is used
3740  * as a hash key for the given number of buckets.
3741  *
3742  * This attempts to determine two values:
3743  *
3744  * 1. The frequency of the most common value of the expression (returns
3745  * zero into *mcv_freq if we can't get that).
3746  *
3747  * 2. The "bucketsize fraction", ie, average number of entries in a bucket
3748  * divided by total tuples in relation.
3749  *
3750  * XXX This is really pretty bogus since we're effectively assuming that the
3751  * distribution of hash keys will be the same after applying restriction
3752  * clauses as it was in the underlying relation. However, we are not nearly
3753  * smart enough to figure out how the restrict clauses might change the
3754  * distribution, so this will have to do for now.
3755  *
3756  * We are passed the number of buckets the executor will use for the given
3757  * input relation. If the data were perfectly distributed, with the same
3758  * number of tuples going into each available bucket, then the bucketsize
3759  * fraction would be 1/nbuckets. But this happy state of affairs will occur
3760  * only if (a) there are at least nbuckets distinct data values, and (b)
3761  * we have a not-too-skewed data distribution. Otherwise the buckets will
3762  * be nonuniformly occupied. If the other relation in the join has a key
3763  * distribution similar to this one's, then the most-loaded buckets are
3764  * exactly those that will be probed most often. Therefore, the "average"
3765  * bucket size for costing purposes should really be taken as something close
3766  * to the "worst case" bucket size. We try to estimate this by adjusting the
3767  * fraction if there are too few distinct data values, and then scaling up
3768  * by the ratio of the most common value's frequency to the average frequency.
3769  *
3770  * If no statistics are available, use a default estimate of 0.1. This will
3771  * discourage use of a hash rather strongly if the inner relation is large,
3772  * which is what we want. We do not want to hash unless we know that the
3773  * inner rel is well-dispersed (or the alternatives seem much worse).
3774  *
3775  * The caller should also check that the mcv_freq is not so large that the
3776  * most common value would by itself require an impractically large bucket.
3777  * In a hash join, the executor can split buckets if they get too big, but
3778  * obviously that doesn't help for a bucket that contains many duplicates of
3779  * the same value.
3780  */
3781 void
3782 estimate_hash_bucket_stats(PlannerInfo *root, Node *hashkey, double nbuckets,
3783  Selectivity *mcv_freq,
3784  Selectivity *bucketsize_frac)
3785 {
3786  VariableStatData vardata;
3787  double estfract,
3788  ndistinct,
3789  stanullfrac,
3790  avgfreq;
3791  bool isdefault;
3792  AttStatsSlot sslot;
3793 
3794  examine_variable(root, hashkey, 0, &vardata);
3795 
3796  /* Look up the frequency of the most common value, if available */
3797  *mcv_freq = 0.0;
3798 
3799  if (HeapTupleIsValid(vardata.statsTuple))
3800  {
3801  if (get_attstatsslot(&sslot, vardata.statsTuple,
3802  STATISTIC_KIND_MCV, InvalidOid,
3804  {
3805  /*
3806  * The first MCV stat is for the most common value.
3807  */
3808  if (sslot.nnumbers > 0)
3809  *mcv_freq = sslot.numbers[0];
3810  free_attstatsslot(&sslot);
3811  }
3812  }
3813 
3814  /* Get number of distinct values */
3815  ndistinct = get_variable_numdistinct(&vardata, &isdefault);
3816 
3817  /*
3818  * If ndistinct isn't real, punt. We normally return 0.1, but if the
3819  * mcv_freq is known to be even higher than that, use it instead.
3820  */
3821  if (isdefault)
3822  {
3823  *bucketsize_frac = (Selectivity) Max(0.1, *mcv_freq);
3824  ReleaseVariableStats(vardata);
3825  return;
3826  }
3827 
3828  /* Get fraction that are null */
3829  if (HeapTupleIsValid(vardata.statsTuple))
3830  {
3831  Form_pg_statistic stats;
3832 
3833  stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
3834  stanullfrac = stats->stanullfrac;
3835  }
3836  else
3837  stanullfrac = 0.0;
3838 
3839  /* Compute avg freq of all distinct data values in raw relation */
3840  avgfreq = (1.0 - stanullfrac) / ndistinct;
3841 
3842  /*
3843  * Adjust ndistinct to account for restriction clauses. Observe we are
3844  * assuming that the data distribution is affected uniformly by the
3845  * restriction clauses!
3846  *
3847  * XXX Possibly better way, but much more expensive: multiply by
3848  * selectivity of rel's restriction clauses that mention the target Var.
3849  */
3850  if (vardata.rel && vardata.rel->tuples > 0)
3851  {
3852  ndistinct *= vardata.rel->rows / vardata.rel->tuples;
3853  ndistinct = clamp_row_est(ndistinct);
3854  }
3855 
3856  /*
3857  * Initial estimate of bucketsize fraction is 1/nbuckets as long as the
3858  * number of buckets is less than the expected number of distinct values;
3859  * otherwise it is 1/ndistinct.
3860  */
3861  if (ndistinct > nbuckets)
3862  estfract = 1.0 / nbuckets;
3863  else
3864  estfract = 1.0 / ndistinct;
3865 
3866  /*
3867  * Adjust estimated bucketsize upward to account for skewed distribution.
3868  */
3869  if (avgfreq > 0.0 && *mcv_freq > avgfreq)
3870  estfract *= *mcv_freq / avgfreq;
3871 
3872  /*
3873  * Clamp bucketsize to sane range (the above adjustment could easily
3874  * produce an out-of-range result). We set the lower bound a little above
3875  * zero, since zero isn't a very sane result.
3876  */
3877  if (estfract < 1.0e-6)
3878  estfract = 1.0e-6;
3879  else if (estfract > 1.0)
3880  estfract = 1.0;
3881 
3882  *bucketsize_frac = (Selectivity) estfract;
3883 
3884  ReleaseVariableStats(vardata);
3885 }
3886 
3887 /*
3888  * estimate_hashagg_tablesize
3889  * estimate the number of bytes that a hash aggregate hashtable will
3890  * require based on the agg_costs, path width and number of groups.
3891  *
3892  * We return the result as "double" to forestall any possible overflow
3893  * problem in the multiplication by dNumGroups.
3894  *
3895  * XXX this may be over-estimating the size now that hashagg knows to omit
3896  * unneeded columns from the hashtable. Also for mixed-mode grouping sets,
3897  * grouping columns not in the hashed set are counted here even though hashagg
3898  * won't store them. Is this a problem?
3899  */
3900 double
3902  const AggClauseCosts *agg_costs, double dNumGroups)
3903 {
3904  Size hashentrysize;
3905 
3906  hashentrysize = hash_agg_entry_size(list_length(root->aggtransinfos),
3907  path->pathtarget->width,
3908  agg_costs->transitionSpace);
3909 
3910  /*
3911  * Note that this disregards the effect of fill-factor and growth policy
3912  * of the hash table. That's probably ok, given that the default
3913  * fill-factor is relatively high. It'd be hard to meaningfully factor in
3914  * "double-in-size" growth policies here.
3915  */
3916  return hashentrysize * dNumGroups;
3917 }
3918 
3919 
3920 /*-------------------------------------------------------------------------
3921  *
3922  * Support routines
3923  *
3924  *-------------------------------------------------------------------------
3925  */
3926 
3927 /*
3928  * Find applicable ndistinct statistics for the given list of VarInfos (which
3929  * must all belong to the given rel), and update *ndistinct to the estimate of
3930  * the MVNDistinctItem that best matches. If a match it found, *varinfos is
3931  * updated to remove the list of matched varinfos.
3932  *
3933  * Varinfos that aren't for simple Vars are ignored.
3934  *
3935  * Return true if we're able to find a match, false otherwise.
3936  */
3937 static bool
3939  List **varinfos, double *ndistinct)
3940 {
3941  ListCell *lc;
3942  int nmatches_vars;
3943  int nmatches_exprs;
3944  Oid statOid = InvalidOid;
3945  MVNDistinct *stats;
3946  StatisticExtInfo *matched_info = NULL;
3947  RangeTblEntry *rte;
3948 
3949  /* bail out immediately if the table has no extended statistics */
3950  if (!rel->statlist)
3951  return false;
3952 
3953  /* look for the ndistinct statistics object matching the most vars */
3954  nmatches_vars = 0; /* we require at least two matches */
3955  nmatches_exprs = 0;
3956  foreach(lc, rel->statlist)
3957  {
3958  ListCell *lc2;
3959  StatisticExtInfo *info = (StatisticExtInfo *) lfirst(lc);
3960  int nshared_vars = 0;
3961  int nshared_exprs = 0;
3962 
3963  /* skip statistics of other kinds */
3964  if (info->kind != STATS_EXT_NDISTINCT)
3965  continue;
3966 
3967  /*
3968  * Determine how many expressions (and variables in non-matched
3969  * expressions) match. We'll then use these numbers to pick the
3970  * statistics object that best matches the clauses.
3971  */
3972  foreach(lc2, *varinfos)
3973  {
3974  ListCell *lc3;
3975  GroupVarInfo *varinfo = (GroupVarInfo *) lfirst(lc2);
3977 
3978  Assert(varinfo->rel == rel);
3979 
3980  /* simple Var, search in statistics keys directly */
3981  if (IsA(varinfo->var, Var))
3982  {
3983  attnum = ((Var *) varinfo->var)->varattno;
3984 
3985  /*
3986  * Ignore system attributes - we don't support statistics on
3987  * them, so can't match them (and it'd fail as the values are
3988  * negative).
3989  */
3991  continue;
3992 
3993  if (bms_is_member(attnum, info->keys))
3994  nshared_vars++;
3995 
3996  continue;
3997  }
3998 
3999  /* expression - see if it's in the statistics object */
4000  foreach(lc3, info->exprs)
4001  {
4002  Node *expr = (Node *) lfirst(lc3);
4003 
4004  if (equal(varinfo->var, expr))
4005  {
4006  nshared_exprs++;
4007  break;
4008  }
4009  }
4010  }
4011 
4012  if (nshared_vars + nshared_exprs < 2)
4013  continue;
4014 
4015  /*
4016  * Does this statistics object match more columns than the currently
4017  * best object? If so, use this one instead.
4018  *
4019  * XXX This should break ties using name of the object, or something
4020  * like that, to make the outcome stable.
4021  */
4022  if ((nshared_exprs > nmatches_exprs) ||
4023  (((nshared_exprs == nmatches_exprs)) && (nshared_vars > nmatches_vars)))
4024  {
4025  statOid = info->statOid;
4026  nmatches_vars = nshared_vars;
4027  nmatches_exprs = nshared_exprs;
4028  matched_info = info;
4029  }
4030  }
4031 
4032  /* No match? */
4033  if (statOid == InvalidOid)
4034  return false;
4035 
4036  Assert(nmatches_vars + nmatches_exprs > 1);
4037 
4038  rte = planner_rt_fetch(rel->relid, root);
4039  stats = statext_ndistinct_load(statOid, rte->inh);
4040 
4041  /*
4042  * If we have a match, search it for the specific item that matches (there
4043  * must be one), and construct the output values.
4044  */
4045  if (stats)
4046  {
4047  int i;
4048  List *newlist = NIL;
4049  MVNDistinctItem *item = NULL;
4050  ListCell *lc2;
4051  Bitmapset *matched = NULL;
4052  AttrNumber attnum_offset;
4053 
4054  /*
4055  * How much we need to offset the attnums? If there are no
4056  * expressions, no offset is needed. Otherwise offset enough to move
4057  * the lowest one (which is equal to number of expressions) to 1.
4058  */
4059  if (matched_info->exprs)
4060  attnum_offset = (list_length(matched_info->exprs) + 1);
4061  else
4062  attnum_offset = 0;
4063 
4064  /* see what actually matched */
4065  foreach(lc2, *varinfos)
4066  {
4067  ListCell *lc3;
4068  int idx;
4069  bool found = false;
4070 
4071  GroupVarInfo *varinfo = (GroupVarInfo *) lfirst(lc2);
4072 
4073  /*
4074  * Process a simple Var expression, by matching it to keys
4075  * directly. If there's a matching expression, we'll try matching
4076  * it later.
4077  */
4078  if (IsA(varinfo->var, Var))
4079  {
4080  AttrNumber attnum = ((Var *) varinfo->var)->varattno;
4081 
4082  /*
4083  * Ignore expressions on system attributes. Can't rely on the
4084  * bms check for negative values.
4085  */
4087  continue;
4088 
4089  /* Is the variable covered by the statistics object? */
4090  if (!bms_is_member(attnum, matched_info->keys))
4091  continue;
4092 
4093  attnum = attnum + attnum_offset;
4094 
4095  /* ensure sufficient offset */
4097 
4098  matched = bms_add_member(matched, attnum);
4099 
4100  found = true;
4101  }
4102 
4103  /*
4104  * XXX Maybe we should allow searching the expressions even if we
4105  * found an attribute matching the expression? That would handle
4106  * trivial expressions like "(a)" but it seems fairly useless.
4107  */
4108  if (found)
4109  continue;
4110 
4111  /* expression - see if it's in the statistics object */
4112  idx = 0;
4113  foreach(lc3, matched_info->exprs)
4114  {
4115  Node *expr = (Node *) lfirst(lc3);
4116 
4117  if (equal(varinfo->var, expr))
4118  {
4119  AttrNumber attnum = -(idx + 1);
4120 
4121  attnum = attnum + attnum_offset;
4122 
4123  /* ensure sufficient offset */
4125 
4126  matched = bms_add_member(matched, attnum);
4127 
4128  /* there should be just one matching expression */
4129  break;
4130  }
4131 
4132  idx++;
4133  }
4134  }
4135 
4136  /* Find the specific item that exactly matches the combination */
4137  for (i = 0; i < stats->nitems; i++)
4138  {
4139  int j;
4140  MVNDistinctItem *tmpitem = &stats->items[i];
4141 
4142  if (tmpitem->nattributes != bms_num_members(matched))
4143  continue;
4144 
4145  /* assume it's the right item */
4146  item = tmpitem;
4147 
4148  /* check that all item attributes/expressions fit the match */
4149  for (j = 0; j < tmpitem->nattributes; j++)
4150  {
4151  AttrNumber attnum = tmpitem->attributes[j];
4152 
4153  /*
4154  * Thanks to how we constructed the matched bitmap above, we
4155  * can just offset all attnums the same way.
4156  */
4157  attnum = attnum + attnum_offset;
4158 
4159  if (!bms_is_member(attnum, matched))
4160  {
4161  /* nah, it's not this item */
4162  item = NULL;
4163  break;
4164  }
4165  }
4166 
4167  /*
4168  * If the item has all the matched attributes, we know it's the
4169  * right one - there can't be a better one. matching more.
4170  */
4171  if (item)
4172  break;
4173  }
4174 
4175  /*
4176  * Make sure we found an item. There has to be one, because ndistinct
4177  * statistics includes all combinations of attributes.
4178  */
4179  if (!item)
4180  elog(ERROR, "corrupt MVNDistinct entry");
4181 
4182  /* Form the output varinfo list, keeping only unmatched ones */
4183  foreach(lc, *varinfos)
4184  {
4185  GroupVarInfo *varinfo = (GroupVarInfo *) lfirst(lc);
4186  ListCell *lc3;
4187  bool found = false;
4188 
4189  /*
4190  * Let's look at plain variables first, because it's the most
4191  * common case and the check is quite cheap. We can simply get the
4192  * attnum and check (with an offset) matched bitmap.
4193  */
4194  if (IsA(varinfo->var, Var))
4195  {
4196  AttrNumber attnum = ((Var *) varinfo->var)->varattno;
4197 
4198  /*
4199  * If it's a system attribute, we're done. We don't support
4200  * extended statistics on system attributes, so it's clearly
4201  * not matched. Just keep the expression and continue.
4202  */
4204  {
4205  newlist = lappend(newlist, varinfo);
4206  continue;
4207  }
4208 
4209  /* apply the same offset as above */
4210  attnum += attnum_offset;
4211 
4212  /* if it's not matched, keep the varinfo */
4213  if (!bms_is_member(attnum, matched))
4214  newlist = lappend(newlist, varinfo);
4215 
4216  /* The rest of the loop deals with complex expressions. */
4217  continue;
4218  }
4219 
4220  /*
4221  * Process complex expressions, not just simple Vars.
4222  *
4223  * First, we search for an exact match of an expression. If we
4224  * find one, we can just discard the whole GroupExprInfo, with all
4225  * the variables we extracted from it.
4226  *
4227  * Otherwise we inspect the individual vars, and try matching it
4228  * to variables in the item.
4229  */
4230  foreach(lc3, matched_info->exprs)
4231  {
4232  Node *expr = (Node *) lfirst(lc3);
4233 
4234  if (equal(varinfo->var, expr))
4235  {
4236  found = true;
4237  break;
4238  }
4239  }
4240 
4241  /* found exact match, skip */
4242  if (found)
4243  continue;
4244 
4245  newlist = lappend(newlist, varinfo);
4246  }
4247 
4248  *varinfos = newlist;
4249  *ndistinct = item->ndistinct;
4250  return true;
4251  }
4252 
4253  return false;
4254 }
4255 
4256 /*
4257  * convert_to_scalar
4258  * Convert non-NULL values of the indicated types to the comparison
4259  * scale needed by scalarineqsel().
4260  * Returns "true" if successful.
4261  *
4262  * XXX this routine is a hack: ideally we should look up the conversion
4263  * subroutines in pg_type.
4264  *
4265  * All numeric datatypes are simply converted to their equivalent
4266  * "double" values. (NUMERIC values that are outside the range of "double"
4267  * are clamped to +/- HUGE_VAL.)
4268  *
4269  * String datatypes are converted by convert_string_to_scalar(),
4270  * which is explained below. The reason why this routine deals with
4271  * three values at a time, not just one, is that we need it for strings.
4272  *
4273  * The bytea datatype is just enough different from strings that it has
4274  * to be treated separately.
4275  *
4276  * The several datatypes representing absolute times are all converted
4277  * to Timestamp, which is actually an int64, and then we promote that to
4278  * a double. Note this will give correct results even for the "special"
4279  * values of Timestamp, since those are chosen to compare correctly;
4280  * see timestamp_cmp.
4281  *
4282  * The several datatypes representing relative times (intervals) are all
4283  * converted to measurements expressed in seconds.
4284  */
4285 static bool
4286 convert_to_scalar(Datum value, Oid valuetypid, Oid collid, double *scaledvalue,
4287  Datum lobound, Datum hibound, Oid boundstypid,
4288  double *scaledlobound, double *scaledhibound)
4289 {
4290  bool failure = false;
4291 
4292  /*
4293  * Both the valuetypid and the boundstypid should exactly match the
4294  * declared input type(s) of the operator we are invoked for. However,
4295  * extensions might try to use scalarineqsel as estimator for operators
4296  * with input type(s) we don't handle here; in such cases, we want to
4297  * return false, not fail. In any case, we mustn't assume that valuetypid
4298  * and boundstypid are identical.
4299  *
4300  * XXX The histogram we are interpolating between points of could belong
4301  * to a column that's only binary-compatible with the declared type. In
4302  * essence we are assuming that the semantics of binary-compatible types
4303  * are enough alike that we can use a histogram generated with one type's
4304  * operators to estimate selectivity for the other's. This is outright
4305  * wrong in some cases --- in particular signed versus unsigned
4306  * interpretation could trip us up. But it's useful enough in the
4307  * majority of cases that we do it anyway. Should think about more
4308  * rigorous ways to do it.
4309  */
4310  switch (valuetypid)
4311  {
4312  /*
4313  * Built-in numeric types
4314  */
4315  case BOOLOID:
4316  case INT2OID:
4317  case INT4OID:
4318  case INT8OID:
4319  case FLOAT4OID:
4320  case FLOAT8OID:
4321  case NUMERICOID:
4322  case OIDOID:
4323  case REGPROCOID:
4324  case REGPROCEDUREOID:
4325  case REGOPEROID:
4326  case REGOPERATOROID:
4327  case REGCLASSOID:
4328  case REGTYPEOID:
4329  case REGCONFIGOID:
4330  case REGDICTIONARYOID:
4331  case REGROLEOID:
4332  case REGNAMESPACEOID:
4333  *scaledvalue = convert_numeric_to_scalar(value, valuetypid,
4334  &failure);
4335  *scaledlobound = convert_numeric_to_scalar(lobound, boundstypid,
4336  &failure);
4337  *scaledhibound = convert_numeric_to_scalar(hibound, boundstypid,
4338  &failure);
4339  return !failure;
4340 
4341  /*
4342  * Built-in string types
4343  */
4344  case CHAROID:
4345  case BPCHAROID:
4346  case VARCHAROID:
4347  case TEXTOID:
4348  case NAMEOID:
4349  {
4350  char *valstr = convert_string_datum(value, valuetypid,
4351  collid, &failure);
4352  char *lostr = convert_string_datum(lobound, boundstypid,
4353  collid, &failure);
4354  char *histr = convert_string_datum(hibound, boundstypid,
4355  collid, &failure);
4356 
4357  /*
4358  * Bail out if any of the values is not of string type. We
4359  * might leak converted strings for the other value(s), but
4360  * that's not worth troubling over.
4361  */
4362  if (failure)
4363  return false;
4364 
4365  convert_string_to_scalar(valstr, scaledvalue,
4366  lostr, scaledlobound,
4367  histr, scaledhibound);
4368  pfree(valstr);
4369  pfree(lostr);
4370  pfree(histr);
4371  return true;
4372  }
4373 
4374  /*
4375  * Built-in bytea type
4376  */
4377  case BYTEAOID:
4378  {
4379  /* We only support bytea vs bytea comparison */
4380  if (boundstypid != BYTEAOID)
4381  return false;
4382  convert_bytea_to_scalar(value, scaledvalue,
4383  lobound, scaledlobound,
4384  hibound, scaledhibound);
4385  return true;
4386  }
4387 
4388  /*
4389  * Built-in time types
4390  */
4391  case TIMESTAMPOID:
4392  case TIMESTAMPTZOID:
4393  case DATEOID:
4394  case INTERVALOID:
4395  case TIMEOID:
4396  case TIMETZOID:
4397  *scaledvalue = convert_timevalue_to_scalar(value, valuetypid,
4398  &failure);
4399  *scaledlobound = convert_timevalue_to_scalar(lobound, boundstypid,
4400  &failure);
4401  *scaledhibound = convert_timevalue_to_scalar(hibound, boundstypid,
4402  &failure);
4403  return !failure;
4404 
4405  /*
4406  * Built-in network types
4407  */
4408  case INETOID:
4409  case CIDROID:
4410  case MACADDROID:
4411  case MACADDR8OID:
4412  *scaledvalue = convert_network_to_scalar(value, valuetypid,
4413  &failure);
4414  *scaledlobound = convert_network_to_scalar(lobound, boundstypid,
4415  &failure);
4416  *scaledhibound = convert_network_to_scalar(hibound, boundstypid,
4417  &failure);
4418  return !failure;
4419  }
4420  /* Don't know how to convert */
4421  *scaledvalue = *scaledlobound = *scaledhibound = 0;
4422  return false;
4423 }
4424 
4425 /*
4426  * Do convert_to_scalar()'s work for any numeric data type.
4427  *
4428  * On failure (e.g., unsupported typid), set *failure to true;
4429  * otherwise, that variable is not changed.
4430  */
4431 static double
4433 {
4434  switch (typid)
4435  {
4436  case BOOLOID:
4437  return (double) DatumGetBool(value);
4438  case INT2OID:
4439  return (double) DatumGetInt16(value);
4440  case INT4OID:
4441  return (double) DatumGetInt32(value);
4442  case INT8OID:
4443  return (double) DatumGetInt64(value);
4444  case FLOAT4OID:
4445  return (double) DatumGetFloat4(value);
4446  case FLOAT8OID:
4447  return (double) DatumGetFloat8(value);
4448  case NUMERICOID:
4449  /* Note: out-of-range values will be clamped to +-HUGE_VAL */
4450  return (double)
4452  value));
4453  case OIDOID:
4454  case REGPROCOID:
4455  case REGPROCEDUREOID:
4456  case REGOPEROID:
4457  case REGOPERATOROID:
4458  case REGCLASSOID:
4459  case REGTYPEOID:
4460  case REGCONFIGOID:
4461  case REGDICTIONARYOID:
4462  case REGROLEOID:
4463  case REGNAMESPACEOID:
4464  /* we can treat OIDs as integers... */
4465  return (double) DatumGetObjectId(value);
4466  }
4467 
4468  *failure = true;
4469  return 0;
4470 }
4471 
4472 /*
4473  * Do convert_to_scalar()'s work for any character-string data type.
4474  *
4475  * String datatypes are converted to a scale that ranges from 0 to 1,
4476  * where we visualize the bytes of the string as fractional digits.
4477  *
4478  * We do not want the base to be 256, however, since that tends to
4479  * generate inflated selectivity estimates; few databases will have
4480  * occurrences of all 256 possible byte values at each position.
4481  * Instead, use the smallest and largest byte values seen in the bounds
4482  * as the estimated range for each byte, after some fudging to deal with
4483  * the fact that we probably aren't going to see the full range that way.
4484  *
4485  * An additional refinement is that we discard any common prefix of the
4486  * three strings before computing the scaled values. This allows us to
4487  * "zoom in" when we encounter a narrow data range. An example is a phone
4488  * number database where all the values begin with the same area code.
4489  * (Actually, the bounds will be adjacent histogram-bin-boundary values,
4490  * so this is more likely to happen than you might think.)
4491  */
4492 static void
4494  double *scaledvalue,
4495  char *lobound,
4496  double *scaledlobound,
4497  char *hibound,
4498  double *scaledhibound)
4499 {
4500  int rangelo,
4501  rangehi;
4502  char *sptr;
4503 
4504  rangelo = rangehi = (unsigned char) hibound[0];
4505  for (sptr = lobound; *sptr; sptr++)
4506  {
4507  if (rangelo > (unsigned char) *sptr)
4508  rangelo = (unsigned char) *sptr;
4509  if (rangehi < (unsigned char) *sptr)
4510  rangehi = (unsigned char) *sptr;
4511  }
4512  for (sptr = hibound; *sptr; sptr++)
4513  {
4514  if (rangelo > (unsigned char) *sptr)
4515  rangelo = (unsigned char) *sptr;
4516  if (rangehi < (unsigned char) *sptr)
4517  rangehi = (unsigned char) *sptr;
4518  }
4519  /* If range includes any upper-case ASCII chars, make it include all */
4520  if (rangelo <= 'Z' && rangehi >= 'A')
4521  {
4522  if (rangelo > 'A')
4523  rangelo = 'A';
4524  if (rangehi < 'Z')
4525  rangehi = 'Z';
4526  }
4527  /* Ditto lower-case */
4528  if (rangelo <= 'z' && rangehi >= 'a')
4529  {
4530  if (rangelo > 'a')
4531  rangelo = 'a';
4532  if (rangehi < 'z')
4533  rangehi = 'z';
4534  }
4535  /* Ditto digits */
4536  if (rangelo <= '9' && rangehi >= '0')
4537  {
4538  if (rangelo > '0')
4539  rangelo = '0';
4540  if (rangehi < '9')
4541  rangehi = '9';
4542  }
4543 
4544  /*
4545  * If range includes less than 10 chars, assume we have not got enough
4546  * data, and make it include regular ASCII set.
4547  */
4548  if (rangehi - rangelo < 9)
4549  {
4550  rangelo = ' ';
4551  rangehi = 127;
4552  }
4553 
4554  /*
4555  * Now strip any common prefix of the three strings.
4556  */
4557  while (*lobound)
4558  {
4559  if (*lobound != *hibound || *lobound != *value)
4560  break;
4561  lobound++, hibound++, value++;
4562  }
4563 
4564  /*
4565  * Now we can do the conversions.
4566  */
4567  *scaledvalue = convert_one_string_to_scalar(value, rangelo, rangehi);
4568  *scaledlobound = convert_one_string_to_scalar(lobound, rangelo, rangehi);
4569  *scaledhibound = convert_one_string_to_scalar(hibound, rangelo, rangehi);
4570 }
4571 
4572 static double
4573 convert_one_string_to_scalar(char *value, int rangelo, int rangehi)
4574 {
4575  int slen = strlen(value);
4576  double num,
4577  denom,
4578  base;
4579 
4580  if (slen <= 0)
4581  return 0.0; /* empty string has scalar value 0 */
4582 
4583  /*
4584  * There seems little point in considering more than a dozen bytes from
4585  * the string. Since base is at least 10, that will give us nominal
4586  * resolution of at least 12 decimal digits, which is surely far more
4587  * precision than this estimation technique has got anyway (especially in
4588  * non-C locales). Also, even with the maximum possible base of 256, this
4589  * ensures denom cannot grow larger than 256^13 = 2.03e31, which will not
4590  * overflow on any known machine.
4591  */
4592  if (slen > 12)
4593  slen = 12;
4594 
4595  /* Convert initial characters to fraction */
4596  base = rangehi - rangelo + 1;
4597  num = 0.0;
4598  denom = base;
4599  while (slen-- > 0)
4600  {
4601  int ch = (unsigned char) *value++;
4602 
4603  if (ch < rangelo)
4604  ch = rangelo - 1;
4605  else if (ch > rangehi)
4606  ch = rangehi + 1;
4607  num += ((double) (ch - rangelo)) / denom;
4608  denom *= base;
4609  }
4610 
4611  return num;
4612 }
4613 
4614 /*
4615  * Convert a string-type Datum into a palloc'd, null-terminated string.
4616  *
4617  * On failure (e.g., unsupported typid), set *failure to true;
4618  * otherwise, that variable is not changed. (We'll return NULL on failure.)
4619  *
4620  * When using a non-C locale, we must pass the string through strxfrm()
4621  * before continuing, so as to generate correct locale-specific results.
4622  */
4623 static char *
4624 convert_string_datum(Datum value, Oid typid, Oid collid, bool *failure)
4625 {
4626  char *val;
4627 
4628  switch (typid)
4629  {
4630  case CHAROID:
4631  val = (char *) palloc(2);
4632  val[0] = DatumGetChar(value);
4633  val[1] = '\0';
4634  break;
4635  case BPCHAROID:
4636  case VARCHAROID:
4637  case TEXTOID:
4639  break;
4640  case NAMEOID:
4641  {
4643 
4644  val = pstrdup(NameStr(*nm));
4645  break;
4646  }
4647  default:
4648  *failure = true;
4649  return NULL;
4650  }
4651 
4652  if (!lc_collate_is_c(collid))
4653  {
4654  char *xfrmstr;
4655  size_t xfrmlen;
4656  size_t xfrmlen2 PG_USED_FOR_ASSERTS_ONLY;
4657 
4658  /*
4659  * XXX: We could guess at a suitable output buffer size and only call
4660  * strxfrm twice if our guess is too small.
4661  *
4662  * XXX: strxfrm doesn't support UTF-8 encoding on Win32, it can return
4663  * bogus data or set an error. This is not really a problem unless it
4664  * crashes since it will only give an estimation error and nothing
4665  * fatal.
4666  */
4667  xfrmlen = strxfrm(NULL, val, 0);
4668 #ifdef WIN32
4669 
4670  /*
4671  * On Windows, strxfrm returns INT_MAX when an error occurs. Instead
4672  * of trying to allocate this much memory (and fail), just return the
4673  * original string unmodified as if we were in the C locale.
4674  */
4675  if (xfrmlen == INT_MAX)
4676  return val;
4677 #endif
4678  xfrmstr = (char *) palloc(xfrmlen + 1);
4679  xfrmlen2 = strxfrm(xfrmstr, val, xfrmlen + 1);
4680 
4681  /*
4682  * Some systems (e.g., glibc) can return a smaller value from the
4683  * second call than the first; thus the Assert must be <= not ==.
4684  */
4685  Assert(xfrmlen2 <= xfrmlen);
4686  pfree(val);
4687  val = xfrmstr;
4688  }
4689 
4690  return val;
4691 }
4692 
4693 /*
4694  * Do convert_to_scalar()'s work for any bytea data type.
4695  *
4696  * Very similar to convert_string_to_scalar except we can't assume
4697  * null-termination and therefore pass explicit lengths around.
4698  *
4699  * Also, assumptions about likely "normal" ranges of characters have been
4700  * removed - a data range of 0..255 is always used, for now. (Perhaps
4701  * someday we will add information about actual byte data range to
4702  * pg_statistic.)
4703  */
4704 static void
4706  double *scaledvalue,
4707  Datum lobound,
4708  double *scaledlobound,
4709  Datum hibound,
4710  double *scaledhibound)
4711 {
4712  bytea *valuep = DatumGetByteaPP(value);
4713  bytea *loboundp = DatumGetByteaPP(lobound);
4714  bytea *hiboundp = DatumGetByteaPP(hibound);
4715  int rangelo,
4716  rangehi,
4717  valuelen = VARSIZE_ANY_EXHDR(valuep),
4718  loboundlen = VARSIZE_ANY_EXHDR(loboundp),
4719  hiboundlen = VARSIZE_ANY_EXHDR(hiboundp),
4720  i,
4721  minlen;
4722  unsigned char *valstr = (unsigned char *) VARDATA_ANY(valuep);
4723  unsigned char *lostr = (unsigned char *) VARDATA_ANY(loboundp);
4724  unsigned char *histr = (unsigned char *) VARDATA_ANY(hiboundp);
4725 
4726  /*
4727  * Assume bytea data is uniformly distributed across all byte values.
4728  */
4729  rangelo = 0;
4730  rangehi = 255;
4731 
4732  /*
4733  * Now strip any common prefix of the three strings.
4734  */
4735  minlen = Min(Min(valuelen, loboundlen), hiboundlen);
4736  for (i = 0; i < minlen; i++)
4737  {
4738  if (*lostr != *histr || *lostr != *valstr)
4739  break;
4740  lostr++, histr++, valstr++;
4741  loboundlen--, hiboundlen--, valuelen--;
4742  }
4743 
4744  /*
4745  * Now we can do the conversions.
4746  */
4747  *scaledvalue = convert_one_bytea_to_scalar(valstr, valuelen, rangelo, rangehi);
4748  *scaledlobound = convert_one_bytea_to_scalar(lostr, loboundlen, rangelo, rangehi);
4749  *scaledhibound = convert_one_bytea_to_scalar(histr, hiboundlen, rangelo, rangehi);
4750 }
4751 
4752 static double
4753 convert_one_bytea_to_scalar(unsigned char *value, int valuelen,
4754  int rangelo, int rangehi)
4755 {
4756  double num,
4757  denom,
4758  base;
4759 
4760  if (valuelen <= 0)
4761  return 0.0; /* empty string has scalar value 0 */
4762 
4763  /*
4764  * Since base is 256, need not consider more than about 10 chars (even
4765  * this many seems like overkill)
4766  */
4767  if (valuelen > 10)
4768  valuelen = 10;
4769 
4770  /* Convert initial characters to fraction */
4771  base = rangehi - rangelo + 1;
4772  num = 0.0;
4773  denom = base;
4774  while (valuelen-- > 0)
4775  {
4776  int ch = *value++;
4777 
4778  if (ch < rangelo)
4779  ch = rangelo - 1;
4780  else if (ch > rangehi)
4781  ch = rangehi + 1;
4782  num += ((double) (ch - rangelo)) / denom;
4783  denom *= base;
4784  }
4785 
4786  return num;
4787 }
4788 
4789 /*
4790  * Do convert_to_scalar()'s work for any timevalue data type.
4791  *
4792  * On failure (e.g., unsupported typid), set *failure to true;
4793  * otherwise, that variable is not changed.
4794  */
4795 static double
4797 {
4798  switch (typid)
4799  {
4800  case TIMESTAMPOID:
4801  return DatumGetTimestamp(value);
4802  case TIMESTAMPTZOID:
4803  return DatumGetTimestampTz(value);
4804  case DATEOID:
4806  case INTERVALOID:
4807  {
4809 
4810  /*
4811  * Convert the month part of Interval to days using assumed
4812  * average month length of 365.25/12.0 days. Not too
4813  * accurate, but plenty good enough for our purposes.
4814  */
4815  return interval->time + interval->day * (double) USECS_PER_DAY +
4817  }
4818  case TIMEOID:
4819  return DatumGetTimeADT(value);
4820  case TIMETZOID:
4821  {
4822  TimeTzADT *timetz = DatumGetTimeTzADTP(value);
4823 
4824  /* use GMT-equivalent time */
4825  return (double) (timetz->time + (timetz->zone * 1000000.0));
4826  }
4827  }
4828 
4829  *failure = true;
4830  return 0;
4831 }
4832 
4833 
4834 /*
4835  * get_restriction_variable
4836  * Examine the args of a restriction clause to see if it's of the
4837  * form (variable op pseudoconstant) or (pseudoconstant op variable),
4838  * where "variable" could be either a Var or an expression in vars of a
4839  * single relation. If so, extract information about the variable,
4840  * and also indicate which side it was on and the other argument.
4841  *
4842  * Inputs:
4843  * root: the planner info
4844  * args: clause argument list
4845  * varRelid: see specs for restriction selectivity functions
4846  *
4847  * Outputs: (these are valid only if true is returned)
4848  * *vardata: gets information about variable (see examine_variable)
4849  * *other: gets other clause argument, aggressively reduced to a constant
4850  * *varonleft: set true if variable is on the left, false if on the right
4851  *
4852  * Returns true if a variable is identified, otherwise false.
4853  *
4854  * Note: if there are Vars on both sides of the clause, we must fail, because
4855  * callers are expecting that the other side will act like a pseudoconstant.
4856  */
4857 bool
4859  VariableStatData *vardata, Node **other,
4860  bool *varonleft)
4861 {
4862  Node *left,
4863  *right;
4864  VariableStatData rdata;
4865 
4866  /* Fail if not a binary opclause (probably shouldn't happen) */
4867  if (list_length(args) != 2)
4868  return false;
4869 
4870  left = (Node *) linitial(args);
4871  right = (Node *) lsecond(args);
4872 
4873  /*
4874  * Examine both sides. Note that when varRelid is nonzero, Vars of other
4875  * relations will be treated as pseudoconstants.
4876  */
4877  examine_variable(root, left, varRelid, vardata);
4878  examine_variable(root, right, varRelid, &rdata);
4879 
4880  /*
4881  * If one side is a variable and the other not, we win.
4882  */
4883  if (vardata->rel && rdata.rel == NULL)
4884  {
4885  *varonleft = true;
4886  *other = estimate_expression_value(root, rdata.var);
4887  /* Assume we need no ReleaseVariableStats(rdata) here */
4888  return true;
4889  }
4890 
4891  if (vardata->rel == NULL && rdata.rel)
4892  {
4893  *varonleft = false;
4894  *other = estimate_expression_value(root, vardata->var);
4895  /* Assume we need no ReleaseVariableStats(*vardata) here */
4896  *vardata = rdata;
4897  return true;
4898  }
4899 
4900  /* Oops, clause has wrong structure (probably var op var) */
4901  ReleaseVariableStats(*vardata);
4902  ReleaseVariableStats(rdata);
4903 
4904  return false;
4905 }
4906 
4907 /*
4908  * get_join_variables
4909  * Apply examine_variable() to each side of a join clause.
4910  * Also, attempt to identify whether the join clause has the same
4911  * or reversed sense compared to the SpecialJoinInfo.
4912  *
4913  * We consider the join clause "normal" if it is "lhs_var OP rhs_var",
4914  * or "reversed" if it is "rhs_var OP lhs_var". In complicated cases
4915  * where we can't tell for sure, we default to assuming it's normal.
4916  */
4917 void
4919  VariableStatData *vardata1, VariableStatData *vardata2,
4920  bool *join_is_reversed)
4921 {
4922  Node *left,
4923  *right;
4924 
4925  if (list_length(args) != 2)
4926  elog(ERROR, "join operator should take two arguments");
4927 
4928  left = (Node *) linitial(args);
4929  right = (Node *) lsecond(args);
4930 
4931  examine_variable(root, left, 0, vardata1);
4932  examine_variable(root, right, 0, vardata2);
4933 
4934  if (vardata1->rel &&
4935  bms_is_subset(vardata1->rel->relids, sjinfo->syn_righthand))
4936  *join_is_reversed = true; /* var1 is on RHS */
4937  else if (vardata2->rel &&
4938  bms_is_subset(vardata2->rel->relids, sjinfo->syn_lefthand))
4939  *join_is_reversed = true; /* var2 is on LHS */
4940  else
4941  *join_is_reversed = false;
4942 }
4943 
4944 /* statext_expressions_load copies the tuple, so just pfree it. */
4945 static void
4947 {
4948  pfree(tuple);
4949 }
4950 
4951 /*
4952  * examine_variable
4953  * Try to look up statistical data about an expression.
4954  * Fill in a VariableStatData struct to describe the expression.
4955  *
4956  * Inputs:
4957  * root: the planner info
4958  * node: the expression tree to examine
4959  * varRelid: see specs for restriction selectivity functions
4960  *
4961  * Outputs: *vardata is filled as follows:
4962  * var: the input expression (with any binary relabeling stripped, if
4963  * it is or contains a variable; but otherwise the type is preserved)
4964  * rel: RelOptInfo for relation containing variable; NULL if expression
4965  * contains no Vars (NOTE this could point to a RelOptInfo of a
4966  * subquery, not one in the current query).
4967  * statsTuple: the pg_statistic entry for the variable, if one exists;
4968  * otherwise NULL.
4969  * freefunc: pointer to a function to release statsTuple with.
4970  * vartype: exposed type of the expression; this should always match
4971  * the declared input type of the operator we are estimating for.
4972  * atttype, atttypmod: actual type/typmod of the "var" expression. This is
4973  * commonly the same as the exposed type of the variable argument,
4974  * but can be different in binary-compatible-type cases.
4975  * isunique: true if we were able to match the var to a unique index or a
4976  * single-column DISTINCT clause, implying its values are unique for
4977  * this query. (Caution: this should be trusted for statistical
4978  * purposes only, since we do not check indimmediate nor verify that
4979  * the exact same definition of equality applies.)
4980  * acl_ok: true if current user has permission to read the column(s)
4981  * underlying the pg_statistic entry. This is consulted by
4982  * statistic_proc_security_check().
4983  *
4984  * Caller is responsible for doing ReleaseVariableStats() before exiting.
4985  */
4986 void
4987 examine_variable(PlannerInfo *root, Node *node, int varRelid,
4988  VariableStatData *vardata)
4989 {
4990  Node *basenode;
4991  Relids varnos;
4992  RelOptInfo *onerel;
4993 
4994  /* Make sure we don't return dangling pointers in vardata */
4995  MemSet(vardata, 0, sizeof(VariableStatData));
4996 
4997  /* Save the exposed type of the expression */
4998  vardata->vartype = exprType(node);
4999 
5000  /* Look inside any binary-compatible relabeling */
5001 
5002  if (IsA(node, RelabelType))
5003  basenode = (Node *) ((RelabelType *) node)->arg;
5004  else
5005  basenode = node;
5006 
5007  /* Fast path for a simple Var */
5008 
5009  if (IsA(basenode, Var) &&
5010  (varRelid == 0 || varRelid == ((Var *) basenode)->varno))
5011  {
5012  Var *var = (Var *) basenode;
5013 
5014  /* Set up result fields other than the stats tuple */
5015  vardata->var = basenode; /* return Var without relabeling */
5016  vardata->rel = find_base_rel(root, var->varno);
5017  vardata->atttype = var->vartype;
5018  vardata->atttypmod = var->vartypmod;
5019  vardata->isunique = has_unique_index(vardata->rel, var->varattno);
5020 
5021  /* Try to locate some stats */
5022  examine_simple_variable(root, var, vardata);
5023 
5024  return;
5025  }
5026 
5027  /*
5028  * Okay, it's a more complicated expression. Determine variable
5029  * membership. Note that when varRelid isn't zero, only vars of that
5030  * relation are considered "real" vars.
5031  */
5032  varnos = pull_varnos(root, basenode);
5033 
5034  onerel = NULL;
5035 
5036  switch (bms_membership(varnos))
5037  {
5038  case BMS_EMPTY_SET:
5039  /* No Vars at all ... must be pseudo-constant clause */
5040  break;
5041  case BMS_SINGLETON:
5042  if (varRelid == 0 || bms_is_member(varRelid, varnos))
5043  {
5044  onerel = find_base_rel(root,
5045  (varRelid ? varRelid : bms_singleton_member(varnos)));
5046  vardata->rel = onerel;
5047  node = basenode; /* strip any relabeling */
5048  }
5049  /* else treat it as a constant */
5050  break;
5051  case BMS_MULTIPLE:
5052  if (varRelid == 0)
5053  {
5054  /* treat it as a variable of a join relation */
5055  vardata->rel = find_join_rel(root, varnos);
5056  node = basenode; /* strip any relabeling */
5057  }
5058  else if (bms_is_member(varRelid, varnos))
5059  {
5060  /* ignore the vars belonging to other relations */
5061  vardata->rel = find_base_rel(root, varRelid);
5062  node = basenode; /* strip any relabeling */
5063  /* note: no point in expressional-index search here */
5064  }
5065  /* else treat it as a constant */
5066  break;
5067  }
5068 
5069  bms_free(varnos);
5070 
5071  vardata->var = node;
5072  vardata->atttype = exprType(node);
5073  vardata->atttypmod = exprTypmod(node);
5074 
5075  if (onerel)
5076  {
5077  /*
5078  * We have an expression in vars of a single relation. Try to match
5079  * it to expressional index columns, in hopes of finding some
5080  * statistics.
5081  *
5082  * Note that we consider all index columns including INCLUDE columns,
5083  * since there could be stats for such columns. But the test for
5084  * uniqueness needs to be warier.
5085  *
5086  * XXX it's conceivable that there are multiple matches with different
5087  * index opfamilies; if so, we need to pick one that matches the
5088  * operator we are estimating for. FIXME later.
5089  */
5090  ListCell *ilist;
5091  ListCell *slist;
5092 
5093  foreach(ilist, onerel->indexlist)
5094  {
5095  IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
5096  ListCell *indexpr_item;
5097  int pos;
5098 
5099  indexpr_item = list_head(index->indexprs);
5100  if (indexpr_item == NULL)
5101  continue; /* no expressions here... */
5102 
5103  for (pos = 0; pos < index->ncolumns; pos++)
5104  {
5105  if (index->indexkeys[pos] == 0)
5106  {
5107  Node *indexkey;
5108 
5109  if (indexpr_item == NULL)
5110  elog(ERROR, "too few entries in indexprs list");
5111  indexkey = (Node *) lfirst(indexpr_item);
5112  if (indexkey && IsA(indexkey, RelabelType))
5113  indexkey = (Node *) ((RelabelType *) indexkey)->arg;
5114  if (equal(node, indexkey))
5115  {
5116  /*
5117  * Found a match ... is it a unique index? Tests here
5118  * should match has_unique_index().
5119  */
5120  if (index->unique &&
5121  index->nkeycolumns == 1 &&
5122  pos == 0 &&
5123  (index->indpred == NIL || index->predOK))
5124  vardata->isunique = true;
5125 
5126  /*
5127  * Has it got stats? We only consider stats for
5128  * non-partial indexes, since partial indexes probably
5129  * don't reflect whole-relation statistics; the above
5130  * check for uniqueness is the only info we take from
5131  * a partial index.
5132  *
5133  * An index stats hook, however, must make its own
5134  * decisions about what to do with partial indexes.
5135  */
5137  (*get_index_stats_hook) (root, index->indexoid,
5138  pos + 1, vardata))
5139  {
5140  /*
5141  * The hook took control of acquiring a stats
5142  * tuple. If it did supply a tuple, it'd better
5143  * have supplied a freefunc.
5144  */
5145  if (HeapTupleIsValid(vardata->statsTuple) &&
5146  !vardata->freefunc)
5147  elog(ERROR, "no function provided to release variable stats with");
5148  }
5149  else if (index->indpred == NIL)
5150  {
5151  vardata->statsTuple =
5153  ObjectIdGetDatum(index->indexoid),
5154  Int16GetDatum(pos + 1),
5155  BoolGetDatum(false));
5156  vardata->freefunc = ReleaseSysCache;
5157 
5158  if (HeapTupleIsValid(vardata->statsTuple))
5159  {
5160  /* Get index's table for permission check */
5161  RangeTblEntry *rte;
5162  Oid userid;
5163 
5164  rte = planner_rt_fetch(index->rel->relid, root);
5165  Assert(rte->rtekind == RTE_RELATION);
5166 
5167  /*
5168  * Use checkAsUser if it's set, in case we're
5169  * accessing the table via a view.
5170  */
5171  userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
5172 
5173  /*
5174  * For simplicity, we insist on the whole
5175  * table being selectable, rather than trying
5176  * to identify which column(s) the index
5177  * depends on. Also require all rows to be
5178  * selectable --- there must be no
5179  * securityQuals from security barrier views
5180  * or RLS policies.
5181  */
5182  vardata->acl_ok =
5183  rte->securityQuals == NIL &&
5184  (pg_class_aclcheck(rte->relid, userid,
5185  ACL_SELECT) == ACLCHECK_OK);
5186 
5187  /*
5188  * If the user doesn't have permissions to
5189  * access an inheritance child relation, check
5190  * the permissions of the table actually
5191  * mentioned in the query, since most likely
5192  * the user does have that permission. Note
5193  * that whole-table select privilege on the
5194  * parent doesn't quite guarantee that the
5195  * user could read all columns of the child.
5196  * But in practice it's unlikely that any
5197  * interesting security violation could result
5198  * from allowing access to the expression
5199  * index's stats, so we allow it anyway. See
5200  * similar code in examine_simple_variable()
5201  * for additional comments.
5202  */
5203  if (!vardata->acl_ok &&
5204  root->append_rel_array != NULL)
5205  {
5206  AppendRelInfo *appinfo;
5207  Index varno = index->rel->relid;
5208 
5209  appinfo = root->append_rel_array[varno];
5210  while (appinfo &&
5211  planner_rt_fetch(appinfo->parent_relid,
5212  root)->rtekind == RTE_RELATION)
5213  {
5214  varno = appinfo->parent_relid;
5215  appinfo = root->append_rel_array[varno];
5216  }
5217  if (varno != index->rel->relid)
5218  {
5219  /* Repeat access check on this rel */
5220  rte = planner_rt_fetch(varno, root);
5221  Assert(rte->rtekind == RTE_RELATION);
5222 
5223  userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
5224 
5225  vardata->acl_ok =
5226  rte->securityQuals == NIL &&
5227  (pg_class_aclcheck(rte->relid,
5228  userid,
5229  ACL_SELECT) == ACLCHECK_OK);
5230  }
5231  }
5232  }
5233  else
5234  {
5235  /* suppress leakproofness checks later */
5236  vardata->acl_ok = true;
5237  }
5238  }
5239  if (vardata->statsTuple)
5240  break;
5241  }
5242  indexpr_item = lnext(index->indexprs, indexpr_item);
5243  }
5244  }
5245  if (vardata->statsTuple)
5246  break;
5247  }
5248 
5249  /*
5250  * Search extended statistics for one with a matching expression.
5251  * There might be multiple ones, so just grab the first one. In the
5252  * future, we might consider the statistics target (and pick the most
5253  * accurate statistics) and maybe some other parameters.
5254  */
5255  foreach(slist, onerel->statlist)
5256  {
5257  StatisticExtInfo *info = (StatisticExtInfo *) lfirst(slist);
5258  RangeTblEntry *rte = planner_rt_fetch(onerel->relid, root);
5259  ListCell *expr_item;
5260  int pos;
5261 
5262  /*
5263  * Stop once we've found statistics for the expression (either
5264  * from extended stats, or for an index in the preceding loop).
5265  */
5266  if (vardata->statsTuple)
5267  break;
5268 
5269  /* skip stats without per-expression stats */
5270  if (info->kind != STATS_EXT_EXPRESSIONS)
5271  continue;
5272 
5273  pos = 0;
5274  foreach(expr_item, info->exprs)
5275  {
5276  Node *expr = (Node *) lfirst(expr_item);
5277 
5278  Assert(expr);
5279 
5280  /* strip RelabelType before comparing it */
5281  if (expr && IsA(expr, RelabelType))
5282  expr = (Node *) ((RelabelType *) expr)->arg;
5283 
5284  /* found a match, see if we can extract pg_statistic row */
5285  if (equal(node, expr))
5286  {
5287  Oid userid;
5288 
5289  /*
5290  * XXX Not sure if we should cache the tuple somewhere.
5291  * Now we just create a new copy every time.
5292  */
5293  vardata->statsTuple =
5294  statext_expressions_load(info->statOid, rte->inh, pos);
5295 
5296  vardata->freefunc = ReleaseDummy;
5297 
5298  /*
5299  * Use checkAsUser if it's set, in case we're accessing
5300  * the table via a view.
5301  */
5302  userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
5303 
5304  /*
5305  * For simplicity, we insist on the whole table being
5306  * selectable, rather than trying to identify which
5307  * column(s) the statistics object depends on. Also
5308  * require all rows to be selectable --- there must be no
5309  * securityQuals from security barrier views or RLS
5310  * policies.
5311  */
5312  vardata->acl_ok =
5313  rte->securityQuals == NIL &&
5314  (pg_class_aclcheck(rte->relid, userid,
5315  ACL_SELECT) == ACLCHECK_OK);
5316 
5317  /*
5318  * If the user doesn't have permissions to access an
5319  * inheritance child relation, check the permissions of
5320  * the table actually mentioned in the query, since most
5321  * likely the user does have that permission. Note that
5322  * whole-table select privilege on the parent doesn't
5323  * quite guarantee that the user could read all columns of
5324  * the child. But in practice it's unlikely that any
5325  * interesting security violation could result from
5326  * allowing access to the expression stats, so we allow it
5327  * anyway. See similar code in examine_simple_variable()
5328  * for additional comments.
5329  */
5330  if (!vardata->acl_ok &&
5331  root->append_rel_array != NULL)
5332  {
5333  AppendRelInfo *appinfo;
5334  Index varno = onerel->relid;
5335 
5336  appinfo = root->append_rel_array[varno];
5337  while (appinfo &&
5338  planner_rt_fetch(appinfo->parent_relid,
5339  root)->rtekind == RTE_RELATION)
5340  {
5341  varno = appinfo->parent_relid;
5342  appinfo = root->append_rel_array[varno];
5343  }
5344  if (varno != onerel->relid)
5345  {
5346  /* Repeat access check on this rel */
5347  rte = planner_rt_fetch(varno, root);
5348  Assert(rte->rtekind == RTE_RELATION);
5349 
5350  userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
5351 
5352  vardata->acl_ok =
5353  rte->securityQuals == NIL &&
5354  (pg_class_aclcheck(rte->relid,
5355  userid,
5356  ACL_SELECT) == ACLCHECK_OK);
5357  }
5358  }
5359 
5360  break;
5361  }
5362 
5363  pos++;
5364  }
5365  }
5366  }
5367 }
5368 
5369 /*
5370  * examine_simple_variable
5371  * Handle a simple Var for examine_variable
5372  *
5373  * This is split out as a subroutine so that we can recurse to deal with
5374  * Vars referencing subqueries.
5375  *
5376  * We already filled in all the fields of *vardata except for the stats tuple.
5377  */
5378 static void
5380  VariableStatData *vardata)
5381 {
5382  RangeTblEntry *rte = root->simple_rte_array[var->varno];
5383 
5384  Assert(IsA(rte, RangeTblEntry));
5385 
5387  (*get_relation_stats_hook) (root, rte, var->varattno, vardata))
5388  {
5389  /*
5390  * The hook took control of acquiring a stats tuple. If it did supply
5391  * a tuple, it'd better have supplied a freefunc.
5392  */
5393  if (HeapTupleIsValid(vardata->statsTuple) &&
5394  !vardata->freefunc)
5395  elog(ERROR, "no function provided to release variable stats with");
5396  }
5397  else if (rte->rtekind == RTE_RELATION)
5398  {
5399  /*
5400  * Plain table or parent of an inheritance appendrel, so look up the
5401  * column in pg_statistic
5402  */
5404  ObjectIdGetDatum(rte->relid),
5405  Int16GetDatum(var->varattno),
5406  BoolGetDatum(rte->inh));
5407  vardata->freefunc = ReleaseSysCache;
5408 
5409  if (HeapTupleIsValid(vardata->statsTuple))
5410  {
5411  Oid userid;
5412 
5413  /*
5414  * Check if user has permission to read this column. We require
5415  * all rows to be accessible, so there must be no securityQuals
5416  * from security barrier views or RLS policies. Use checkAsUser
5417  * if it's set, in case we're accessing the table via a view.
5418  */
5419  userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
5420 
5421  vardata->acl_ok =
5422  rte->securityQuals == NIL &&
5423  ((pg_class_aclcheck(rte->relid, userid,
5424  ACL_SELECT) == ACLCHECK_OK) ||
5425  (pg_attribute_aclcheck(rte->relid, var->varattno, userid,
5426  ACL_SELECT) == ACLCHECK_OK));
5427 
5428  /*
5429  * If the user doesn't have permissions to access an inheritance
5430  * child relation or specifically this attribute, check the
5431  * permissions of the table/column actually mentioned in the
5432  * query, since most likely the user does have that permission
5433  * (else the query will fail at runtime), and if the user can read
5434  * the column there then he can get the values of the child table
5435  * too. To do that, we must find out which of the root parent's
5436  * attributes the child relation's attribute corresponds to.
5437  */
5438  if (!vardata->acl_ok && var->varattno > 0 &&
5439  root->append_rel_array != NULL)
5440  {
5441  AppendRelInfo *appinfo;
5442  Index varno = var->varno;
5443  int varattno = var->varattno;
5444  bool found = false;
5445 
5446  appinfo = root->append_rel_array[varno];
5447 
5448  /*
5449  * Partitions are mapped to their immediate parent, not the
5450  * root parent, so must be ready to walk up multiple
5451  * AppendRelInfos. But stop if we hit a parent that is not
5452  * RTE_RELATION --- that's a flattened UNION ALL subquery, not
5453  * an inheritance parent.
5454  */
5455  while (appinfo &&
5456  planner_rt_fetch(appinfo->parent_relid,
5457  root)->rtekind == RTE_RELATION)
5458  {
5459  int parent_varattno;
5460 
5461  found = false;
5462  if (varattno <= 0 || varattno > appinfo->num_child_cols)
5463  break; /* safety check */
5464  parent_varattno = appinfo->parent_colnos[varattno - 1];
5465  if (parent_varattno == 0)
5466  break; /* Var is local to child */
5467 
5468  varno = appinfo->parent_relid;
5469  varattno = parent_varattno;
5470  found = true;
5471 
5472  /* If the parent is itself a child, continue up. */
5473  appinfo = root->append_rel_array[varno];
5474  }
5475 
5476  /*
5477  * In rare cases, the Var may be local to the child table, in
5478  * which case, we've got to live with having no access to this
5479  * column's stats.
5480  */
5481  if (!found)
5482  return;
5483 
5484  /* Repeat the access check on this parent rel & column */
5485  rte = planner_rt_fetch(varno, root);
5486  Assert(rte->rtekind == RTE_RELATION);
5487 
5488  userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
5489 
5490  vardata->acl_ok =
5491  rte->securityQuals == NIL &&
5492  ((pg_class_aclcheck(rte->relid, userid,
5493  ACL_SELECT) == ACLCHECK_OK) ||
5494  (pg_attribute_aclcheck(rte->relid, varattno, userid,
5495  ACL_SELECT) == ACLCHECK_OK));
5496  }
5497  }
5498  else
5499  {
5500  /* suppress any possible leakproofness checks later */
5501  vardata->acl_ok = true;
5502  }
5503  }
5504  else if (rte->rtekind == RTE_SUBQUERY && !rte->inh)
5505  {
5506  /*
5507  * Plain subquery (not one that was converted to an appendrel).
5508  */
5509  Query *subquery = rte->subquery;
5510  RelOptInfo *rel;
5511  TargetEntry *ste;
5512 
5513  /*
5514  * Punt if it's a whole-row var rather than a plain column reference.
5515  */
5516  if (var->varattno == InvalidAttrNumber)
5517  return;
5518 
5519  /*
5520  * Punt if subquery uses set operations or GROUP BY, as these will
5521  * mash underlying columns' stats beyond recognition. (Set ops are
5522  * particularly nasty; if we forged ahead, we would return stats
5523  * relevant to only the leftmost subselect...) DISTINCT is also
5524  * problematic, but we check that later because there is a possibility
5525  * of learning something even with it.
5526  */
5527  if (subquery->setOperations ||
5528  subquery->groupClause ||
5529  subquery->groupingSets)
5530  return;
5531 
5532  /*
5533  * OK, fetch RelOptInfo for subquery. Note that we don't change the
5534  * rel returned in vardata, since caller expects it to be a rel of the
5535  * caller's query level. Because we might already be recursing, we
5536  * can't use that rel pointer either, but have to look up the Var's
5537  * rel afresh.
5538  */
5539  rel = find_base_rel(root, var->varno);
5540 
5541  /* If the subquery hasn't been planned yet, we have to punt */
5542  if (rel->subroot == NULL)
5543  return;
5544  Assert(IsA(rel->subroot, PlannerInfo));
5545 
5546  /*
5547  * Switch our attention to the subquery as mangled by the planner. It
5548  * was okay to look at the pre-planning version for the tests above,
5549  * but now we need a Var that will refer to the subroot's live
5550  * RelOptInfos. For instance, if any subquery pullup happened during
5551  * planning, Vars in the targetlist might have gotten replaced, and we
5552  * need to see the replacement expressions.
5553  */
5554  subquery = rel->subroot->parse;
5555  Assert(IsA(subquery, Query));
5556 
5557  /* Get the subquery output expression referenced by the upper Var */
5558  ste = get_tle_by_resno(subquery->targetList, var->varattno);
5559  if (ste == NULL || ste->resjunk)
5560  elog(ERROR, "subquery %s does not have attribute %d",
5561  rte->eref->aliasname, var->varattno);
5562  var = (Var *) ste->expr;
5563 
5564  /*
5565  * If subquery uses DISTINCT, we can't make use of any stats for the
5566  * variable ... but, if it's the only DISTINCT column, we are entitled
5567  * to consider it unique. We do the test this way so that it works
5568  * for cases involving DISTINCT ON.
5569  */
5570  if (subquery->distinctClause)
5571  {
5572  if (list_length(subquery->distinctClause) == 1 &&
5573  targetIsInSortList(ste, InvalidOid, subquery->distinctClause))
5574  vardata->isunique = true;
5575  /* cannot go further */
5576  return;
5577  }
5578 
5579  /*
5580  * If the sub-query originated from a view with the security_barrier
5581  * attribute, we must not look at the variable's statistics, though it
5582  * seems all right to notice the existence of a DISTINCT clause. So
5583  * stop here.
5584  *
5585  * This is probably a harsher restriction than necessary; it's
5586  * certainly OK for the selectivity estimator (which is a C function,
5587  * and therefore omnipotent anyway) to look at the statistics. But
5588  * many selectivity estimators will happily *invoke the operator
5589  * function* to try to work out a good estimate - and that's not OK.
5590  * So for now, don't dig down for stats.
5591  */
5592  if (rte->security_barrier)
5593  return;
5594 
5595  /* Can only handle a simple Var of subquery's query level */
5596  if (var && IsA(var, Var) &&
5597  var->varlevelsup == 0)
5598  {
5599  /*
5600  * OK, recurse into the subquery. Note that the original setting
5601  * of vardata->isunique (which will surely be false) is left
5602  * unchanged in this situation. That's what we want, since even
5603  * if the underlying column is unique, the subquery may have
5604  * joined to other tables in a way that creates duplicates.
5605  */
5606  examine_simple_variable(rel->subroot, var, vardata);
5607  }
5608  }
5609  else
5610  {
5611  /*
5612  * Otherwise, the Var comes from a FUNCTION, VALUES, or CTE RTE. (We
5613  * won't see RTE_JOIN here because join alias Vars have already been
5614  * flattened.) There's not much we can do with function outputs, but
5615  * maybe someday try to be smarter about VALUES and/or CTEs.
5616  */
5617  }
5618 }
5619 
5620 /*
5621  * Check whether it is permitted to call func_oid passing some of the
5622  * pg_statistic data in vardata. We allow this either if the user has SELECT
5623  * privileges on the table or column underlying the pg_statistic data or if
5624  * the function is marked leak-proof.
5625  */
5626 bool
5628 {
5629  if (vardata->acl_ok)
5630  return true;
5631 
5632  if (!OidIsValid(func_oid))
5633  return false;
5634 
5635  if (get_func_leakproof(func_oid))
5636  return true;
5637 
5638  ereport(DEBUG2,
5639  (errmsg_internal("not using statistics because function \"%s\" is not leak-proof",
5640  get_func_name(func_oid))));
5641  return false;
5642 }
5643 
5644 /*
5645  * get_variable_numdistinct
5646  * Estimate the number of distinct values of a variable.
5647  *
5648  * vardata: results of examine_variable
5649  * *isdefault: set to true if the result is a default rather than based on
5650  * anything meaningful.
5651  *
5652  * NB: be careful to produce a positive integral result, since callers may
5653  * compare the result to exact integer counts, or might divide by it.
5654  */
5655 double
5656 get_variable_numdistinct(VariableStatData *vardata, bool *isdefault)
5657 {
5658  double stadistinct;
5659  double stanullfrac = 0.0;
5660  double ntuples;
5661 
5662  *isdefault = false;
5663 
5664  /*
5665  * Determine the stadistinct value to use. There are cases where we can
5666  * get an estimate even without a pg_statistic entry, or can get a better
5667  * value than is in pg_statistic. Grab stanullfrac too if we can find it
5668  * (otherwise, assume no nulls, for lack of any better idea).
5669  */
5670  if (HeapTupleIsValid(vardata->statsTuple))
5671  {
5672  /* Use the pg_statistic entry */
5673  Form_pg_statistic stats;
5674 
5675  stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
5676  stadistinct = stats->stadistinct;
5677  stanullfrac = stats->stanullfrac;
5678  }
5679  else if (vardata->vartype == BOOLOID)
5680  {
5681  /*
5682  * Special-case boolean columns: presumably, two distinct values.
5683  *
5684  * Are there any other datatypes we should wire in special estimates
5685  * for?
5686  */
5687  stadistinct = 2.0;
5688  }
5689  else if (vardata->rel && vardata->rel->rtekind == RTE_VALUES)
5690  {
5691  /*
5692  * If the Var represents a column of a VALUES RTE, assume it's unique.
5693  * This could of course be very wrong, but it should tend to be true
5694  * in well-written queries. We could consider examining the VALUES'
5695  * contents to get some real statistics; but that only works if the
5696  * entries are all constants, and it would be pretty expensive anyway.
5697  */
5698  stadistinct = -1.0; /* unique (and all non null) */
5699  }
5700  else
5701  {
5702  /*
5703  * We don't keep statistics for system columns, but in some cases we
5704  * can infer distinctness anyway.
5705  */
5706  if (vardata->var && IsA(vardata->var, Var))
5707  {
5708  switch (((Var *) vardata->var)->varattno)
5709  {
5711  stadistinct = -1.0; /* unique (and all non null) */
5712  break;
5714  stadistinct = 1.0; /* only 1 value */
5715  break;
5716  default:
5717  stadistinct = 0.0; /* means "unknown" */
5718  break;
5719  }
5720  }
5721  else
5722  stadistinct = 0.0; /* means "unknown" */
5723 
5724  /*
5725  * XXX consider using estimate_num_groups on expressions?
5726  */
5727  }
5728 
5729  /*
5730  * If there is a unique index or DISTINCT clause for the variable, assume
5731  * it is unique no matter what pg_statistic says; the statistics could be
5732  * out of date, or we might have found a partial unique index that proves
5733  * the var is unique for this query. However, we'd better still believe
5734  * the null-fraction statistic.
5735  */
5736  if (vardata->isunique)
5737  stadistinct = -1.0 * (1.0 - stanullfrac);
5738 
5739  /*
5740  * If we had an absolute estimate, use that.
5741  */
5742  if (stadistinct > 0.0)
5743  return clamp_row_est(stadistinct);
5744 
5745  /*
5746  * Otherwise we need to get the relation size; punt if not available.
5747  */
5748  if (vardata->rel == NULL)
5749  {
5750  *isdefault = true;
5751  return DEFAULT_NUM_DISTINCT;
5752  }
5753  ntuples = vardata->rel->tuples;
5754  if (ntuples <= 0.0)
5755  {
5756  *isdefault = true;
5757  return DEFAULT_NUM_DISTINCT;
5758  }
5759 
5760  /*
5761  * If we had a relative estimate, use that.
5762  */
5763  if (stadistinct < 0.0)
5764  return clamp_row_est(-stadistinct * ntuples);
5765 
5766  /*
5767  * With no data, estimate ndistinct = ntuples if the table is small, else
5768  * use default. We use DEFAULT_NUM_DISTINCT as the cutoff for "small" so
5769  * that the behavior isn't discontinuous.
5770  */
5771  if (ntuples < DEFAULT_NUM_DISTINCT)
5772  return clamp_row_est(ntuples);
5773 
5774  *isdefault = true;
5775  return DEFAULT_NUM_DISTINCT;
5776 }
5777 
5778 /*
5779  * get_variable_range
5780  * Estimate the minimum and maximum value of the specified variable.
5781  * If successful, store values in *min and *max, and return true.
5782  * If no data available, return false.
5783  *
5784  * sortop is the "<" comparison operator to use. This should generally
5785  * be "<" not ">", as only the former is likely to be found in pg_statistic.
5786  * The collation must be specified too.
5787  */
5788 static bool
5790  Oid sortop, Oid collation,
5791  Datum *min, Datum *max)
5792 {
5793  Datum tmin = 0;
5794  Datum tmax = 0;
5795  bool have_data = false;
5796  int16 typLen;
5797  bool typByVal;
5798  Oid opfuncoid;
5799  FmgrInfo opproc;
5800  AttStatsSlot sslot;
5801 
5802  /*
5803  * XXX It's very tempting to try to use the actual column min and max, if
5804  * we can get them relatively-cheaply with an index probe. However, since
5805  * this function is called many times during join planning, that could
5806  * have unpleasant effects on planning speed. Need more investigation
5807  * before enabling this.
5808  */
5809 #ifdef NOT_USED
5810  if (get_actual_variable_range(root, vardata, sortop, collation, min, max))
5811  return true;
5812 #endif
5813 
5814  if (!HeapTupleIsValid(vardata->statsTuple))
5815  {
5816  /* no stats available, so default result */
5817  return false;
5818  }
5819 
5820  /*
5821  * If we can't apply the sortop to the stats data, just fail. In
5822  * principle, if there's a histogram and no MCVs, we could return the
5823  * histogram endpoints without ever applying the sortop ... but it's
5824  * probably not worth trying, because whatever the caller wants to do with
5825  * the endpoints would likely fail the security check too.
5826  */
5827  if (!statistic_proc_security_check(vardata,
5828  (opfuncoid = get_opcode(sortop))))
5829  return false;
5830 
5831  opproc.fn_oid = InvalidOid; /* mark this as not looked up yet */
5832 
5833  get_typlenbyval(vardata->atttype, &typLen, &typByVal);
5834 
5835  /*
5836  * If there is a histogram with the ordering we want, grab the first and
5837  * last values.
5838  */
5839  if (get_attstatsslot(&sslot, vardata->statsTuple,
5840  STATISTIC_KIND_HISTOGRAM, sortop,
5842  {
5843  if (sslot.stacoll == collation && sslot.nvalues > 0)
5844  {
5845  tmin = datumCopy(sslot.values[0], typByVal, typLen);
5846  tmax = datumCopy(sslot.values[sslot.nvalues - 1], typByVal, typLen);
5847  have_data = true;
5848  }
5849  free_attstatsslot(&sslot);
5850  }
5851 
5852  /*
5853  * Otherwise, if there is a histogram with some other ordering, scan it
5854  * and get the min and max values according to the ordering we want. This
5855  * of course may not find values that are really extremal according to our
5856  * ordering, but it beats ignoring available data.
5857  */
5858  if (!have_data &&
5859  get_attstatsslot(&sslot, vardata->statsTuple,
5860  STATISTIC_KIND_HISTOGRAM, InvalidOid,
5862  {
5863  get_stats_slot_range(&sslot, opfuncoid, &opproc,
5864  collation, typLen, typByVal,
5865  &tmin, &tmax, &have_data);
5866  free_attstatsslot(&sslot);
5867  }
5868 
5869  /*
5870  * If we have most-common-values info, look for extreme MCVs. This is
5871  * needed even if we also have a histogram, since the histogram excludes
5872  * the MCVs. However, if we *only* have MCVs and no histogram, we should
5873  * be pretty wary of deciding that that is a full representation of the
5874  * data. Proceed only if the MCVs represent the whole table (to within
5875  * roundoff error).
5876  */
5877  if (get_attstatsslot(&sslot, vardata->statsTuple,
5878  STATISTIC_KIND_MCV, InvalidOid,
5879  have_data ? ATTSTATSSLOT_VALUES :
5881  {
5882  bool use_mcvs = have_data;
5883 
5884  if (!have_data)
5885  {
5886  double sumcommon = 0.0;
5887  double nullfrac;
5888  int i;
5889 
5890  for (i = 0; i < sslot.nnumbers; i++)
5891  sumcommon += sslot.numbers[i];
5892  nullfrac = ((Form_pg_statistic) GETSTRUCT(vardata->statsTuple))->stanullfrac;
5893  if (sumcommon + nullfrac > 0.99999)
5894  use_mcvs = true;
5895  }
5896 
5897  if (use_mcvs)
5898  get_stats_slot_range(&sslot, opfuncoid, &opproc,
5899  collation, typLen, typByVal,
5900  &tmin, &tmax, &have_data);
5901  free_attstatsslot(&sslot);
5902  }
5903 
5904  *min = tmin;
5905  *max = tmax;
5906  return have_data;
5907 }
5908 
5909 /*
5910  * get_stats_slot_range: scan sslot for min/max values
5911  *
5912  * Subroutine for get_variable_range: update min/max/have_data according
5913  * to what we find in the statistics array.
5914  */
5915 static void
5916 get_stats_slot_range(AttStatsSlot *sslot, Oid opfuncoid, FmgrInfo *opproc,
5917  Oid collation, int16 typLen, bool typByVal,
5918  Datum *min, Datum *max, bool *p_have_data)
5919 {
5920  Datum tmin = *min;
5921  Datum tmax = *max;
5922  bool have_data = *p_have_data;
5923  bool found_tmin = false;
5924  bool found_tmax = false;
5925 
5926  /* Look up the comparison function, if we didn't already do so */
5927  if (opproc->fn_oid != opfuncoid)
5928  fmgr_info(opfuncoid, opproc);
5929 
5930  /* Scan all the slot's values */
5931  for (int i = 0; i < sslot->nvalues; i++)
5932  {
5933  if (!have_data)
5934  {
5935  tmin = tmax = sslot->values[i];
5936  found_tmin = found_tmax = true;
5937  *p_have_data = have_data = true;
5938  continue;
5939  }
5940  if (DatumGetBool(FunctionCall2Coll(opproc,
5941  collation,
5942  sslot->values[i], tmin)))
5943  {
5944  tmin = sslot->values[i];
5945  found_tmin = true;
5946  }
5947  if (DatumGetBool(FunctionCall2Coll(opproc,
5948  collation,
5949  tmax, sslot->values[i])))
5950  {
5951  tmax = sslot->values[i];
5952  found_tmax = true;
5953  }
5954  }
5955 
5956  /*
5957  * Copy the slot's values, if we found new extreme values.
5958  */
5959  if (found_tmin)
5960  *min = datumCopy(tmin, typByVal, typLen);
5961  if (found_tmax)
5962  *max = datumCopy(tmax, typByVal, typLen);
5963 }
5964 
5965 
5966 /*
5967  * get_actual_variable_range
5968  * Attempt to identify the current *actual* minimum and/or maximum
5969  * of the specified variable, by looking for a suitable btree index
5970  * and fetching its low and/or high values.
5971  * If successful, store values in *min and *max, and return true.
5972  * (Either pointer can be NULL if that endpoint isn't needed.)
5973  * If no data available, return false.
5974  *
5975  * sortop is the "<" comparison operator to use.
5976  * collation is the required collation.
5977  */
5978 static bool
5980  Oid sortop, Oid collation,
5981  Datum *min, Datum *max)
5982 {
5983  bool have_data = false;
5984  RelOptInfo *rel = vardata->rel;
5985  RangeTblEntry *rte;
5986  ListCell *lc;
5987 
5988  /* No hope if no relation or it doesn't have indexes */
5989  if (rel == NULL || rel->indexlist == NIL)
5990  return false;
5991  /* If it has indexes it must be a plain relation */
5992  rte = root->simple_rte_array[rel->relid];
5993  Assert(rte->rtekind == RTE_RELATION);
5994 
5995  /* Search through the indexes to see if any match our problem */
5996  foreach(lc, rel->indexlist)
5997  {
5999  ScanDirection indexscandir;
6000 
6001  /* Ignore non-btree indexes */
6002  if (index->relam != BTREE_AM_OID)
6003  continue;
6004 
6005  /*
6006  * Ignore partial indexes --- we only want stats that cover the entire
6007  * relation.
6008  */
6009  if (index->indpred != NIL)
6010  continue;
6011 
6012  /*
6013  * The index list might include hypothetical indexes inserted by a
6014  * get_relation_info hook --- don't try to access them.
6015  */
6016  if (index->hypothetical)
6017  continue;
6018 
6019  /*
6020  * The first index column must match the desired variable, sortop, and
6021  * collation --- but we can use a descending-order index.
6022  */
6023  if (collation != index->indexcollations[0])
6024  continue; /* test first 'cause it's cheapest */
6025  if (!match_index_to_operand(vardata->var, 0, index))
6026  continue;
6027  switch (get_op_opfamily_strategy(sortop, index->sortopfamily[0]))
6028  {
6029  case BTLessStrategyNumber:
6030  if (index->reverse_sort[0])
6031  indexscandir = BackwardScanDirection;
6032  else
6033  indexscandir = ForwardScanDirection;
6034  break;
6036  if (index->reverse_sort[0])
6037  indexscandir = ForwardScanDirection;
6038  else
6039  indexscandir = BackwardScanDirection;
6040  break;
6041  default:
6042  /* index doesn't match the sortop */
6043  continue;
6044  }
6045 
6046  /*
6047  * Found a suitable index to extract data from. Set up some data that
6048  * can be used by both invocations of get_actual_variable_endpoint.
6049  */
6050  {
6051  MemoryContext tmpcontext;
6052  MemoryContext oldcontext;
6053  Relation heapRel;
6054  Relation indexRel;
6055  TupleTableSlot *slot;
6056