113 #define LOG2(x) (log(x) / 0.693147180559945)
120 #define APPEND_CPU_COST_MULTIPLIER 0.5
128 #define MAXIMUM_ROWCOUNT 1e100
178 Cost *rescan_startup_cost,
Cost *rescan_total_cost);
198 List **restrictlist);
200 int parallel_workers);
204 static double page_size(
double tuples,
int width);
223 else if (nrows <= 1.0)
257 return (
int32) tuple_width;
283 return (
x < (
double) LONG_MAX) ? (long)
x : LONG_MAX;
298 Cost startup_cost = 0;
301 double spc_seq_page_cost;
323 disk_run_cost = spc_seq_page_cost * baserel->
pages;
328 startup_cost += qpqual_cost.
startup;
330 cpu_run_cost = cpu_per_tuple * baserel->
tuples;
332 startup_cost += path->pathtarget->cost.startup;
333 cpu_run_cost += path->pathtarget->cost.per_tuple * path->
rows;
341 cpu_run_cost /= parallel_divisor;
359 path->
total_cost = startup_cost + cpu_run_cost + disk_run_cost;
373 Cost startup_cost = 0;
378 double spc_seq_page_cost,
379 spc_random_page_cost,
400 &spc_random_page_cost,
405 spc_random_page_cost : spc_seq_page_cost;
411 run_cost += spc_page_cost * baserel->
pages;
423 startup_cost += qpqual_cost.
startup;
425 run_cost += cpu_per_tuple * baserel->
tuples;
427 startup_cost += path->pathtarget->cost.startup;
428 run_cost += path->pathtarget->cost.per_tuple * path->
rows;
450 Cost startup_cost = 0;
487 int input_disabled_nodes,
488 Cost input_startup_cost,
Cost input_total_cost,
491 Cost startup_cost = 0;
493 Cost comparison_cost;
518 startup_cost += comparison_cost * N * logN;
521 run_cost += path->
path.
rows * comparison_cost * logN;
538 path->
path.
total_cost = (startup_cost + run_cost + input_total_cost);
565 bool indexonly = (path->
path.
pathtype == T_IndexOnlyScan);
568 Cost startup_cost = 0;
570 Cost cpu_run_cost = 0;
571 Cost indexStartupCost;
574 double indexCorrelation,
576 double spc_seq_page_cost,
577 spc_random_page_cost;
582 double tuples_fetched;
583 double pages_fetched;
584 double rand_heap_pages;
600 if (path->
path.param_info)
628 amcostestimate(
root, path, loop_count,
629 &indexStartupCost, &indexTotalCost,
630 &indexSelectivity, &indexCorrelation,
642 startup_cost += indexStartupCost;
643 run_cost += indexTotalCost - indexStartupCost;
650 &spc_random_page_cost,
692 (
double)
index->pages,
696 pages_fetched = ceil(pages_fetched * (1.0 - baserel->
allvisfrac));
698 rand_heap_pages = pages_fetched;
700 max_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
712 pages_fetched = ceil(indexSelectivity * (
double) baserel->
pages);
716 (
double)
index->pages,
720 pages_fetched = ceil(pages_fetched * (1.0 - baserel->
allvisfrac));
722 min_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
732 (
double)
index->pages,
736 pages_fetched = ceil(pages_fetched * (1.0 - baserel->
allvisfrac));
738 rand_heap_pages = pages_fetched;
741 max_IO_cost = pages_fetched * spc_random_page_cost;
744 pages_fetched = ceil(indexSelectivity * (
double) baserel->
pages);
747 pages_fetched = ceil(pages_fetched * (1.0 - baserel->
allvisfrac));
749 if (pages_fetched > 0)
751 min_IO_cost = spc_random_page_cost;
752 if (pages_fetched > 1)
753 min_IO_cost += (pages_fetched - 1) * spc_seq_page_cost;
767 rand_heap_pages = -1;
795 csquared = indexCorrelation * indexCorrelation;
797 run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
807 startup_cost += qpqual_cost.
startup;
810 cpu_run_cost += cpu_per_tuple * tuples_fetched;
813 startup_cost += path->
path.pathtarget->cost.startup;
814 cpu_run_cost += path->
path.pathtarget->cost.per_tuple * path->
path.
rows;
824 cpu_run_cost /= parallel_divisor;
827 run_cost += cpu_run_cost;
855 foreach(lc, qual_clauses)
859 if (rinfo->pseudoconstant)
864 result =
lappend(result, rinfo);
911 double pages_fetched;
917 T = (pages > 1) ? (
double) pages : 1.0;
920 total_pages =
root->total_table_pages + index_pages;
921 total_pages =
Max(total_pages, 1.0);
937 (2.0 *
T * tuples_fetched) / (2.0 *
T + tuples_fetched);
938 if (pages_fetched >=
T)
941 pages_fetched = ceil(pages_fetched);
947 lim = (2.0 *
T *
b) / (2.0 *
T -
b);
948 if (tuples_fetched <= lim)
951 (2.0 *
T * tuples_fetched) / (2.0 *
T + tuples_fetched);
956 b + (tuples_fetched - lim) * (
T -
b) /
T;
958 pages_fetched = ceil(pages_fetched);
960 return pages_fetched;
1025 Path *bitmapqual,
double loop_count)
1027 Cost startup_cost = 0;
1029 Cost indexTotalCost;
1034 double tuples_fetched;
1035 double pages_fetched;
1036 double spc_seq_page_cost,
1037 spc_random_page_cost;
1052 loop_count, &indexTotalCost,
1055 startup_cost += indexTotalCost;
1056 T = (baserel->
pages > 1) ? (
double) baserel->
pages : 1.0;
1060 &spc_random_page_cost,
1061 &spc_seq_page_cost);
1070 if (pages_fetched >= 2.0)
1071 cost_per_page = spc_random_page_cost -
1072 (spc_random_page_cost - spc_seq_page_cost)
1073 * sqrt(pages_fetched /
T);
1075 cost_per_page = spc_random_page_cost;
1077 run_cost += pages_fetched * cost_per_page;
1090 startup_cost += qpqual_cost.
startup;
1092 cpu_run_cost = cpu_per_tuple * tuples_fetched;
1100 cpu_run_cost /= parallel_divisor;
1106 run_cost += cpu_run_cost;
1109 startup_cost += path->pathtarget->cost.startup;
1110 run_cost += path->pathtarget->cost.per_tuple * path->
rows;
1126 *cost = ((
IndexPath *) path)->indextotalcost;
1127 *selec = ((
IndexPath *) path)->indexselectivity;
1192 totalCost += subCost;
1238 totalCost += subCost;
1261 Cost startup_cost = 0;
1268 double spc_random_page_cost;
1283 foreach(l, tidquals)
1324 &spc_random_page_cost,
1328 run_cost += spc_random_page_cost * ntuples;
1337 run_cost += cpu_per_tuple * ntuples;
1340 startup_cost += path->pathtarget->cost.startup;
1341 run_cost += path->pathtarget->cost.per_tuple * path->
rows;
1369 Cost startup_cost = 0;
1376 double spc_random_page_cost;
1377 double spc_seq_page_cost;
1392 pages = ceil(selectivity * baserel->
pages);
1405 ntuples = selectivity * baserel->
tuples;
1406 nseqpages = pages - 1.0;
1416 &spc_random_page_cost,
1417 &spc_seq_page_cost);
1420 run_cost += spc_random_page_cost + spc_seq_page_cost * nseqpages;
1435 run_cost += cpu_per_tuple * ntuples;
1438 startup_cost += path->pathtarget->cost.startup;
1439 run_cost += path->pathtarget->cost.per_tuple * path->
rows;
1459 bool trivial_pathtarget)
1513 if (qpquals ==
NIL && trivial_pathtarget)
1518 startup_cost = qpqual_cost.
startup;
1523 startup_cost += path->
path.pathtarget->cost.startup;
1524 run_cost += path->
path.pathtarget->cost.per_tuple * path->
path.
rows;
1541 Cost startup_cost = 0;
1579 startup_cost += qpqual_cost.
startup;
1581 run_cost += cpu_per_tuple * baserel->
tuples;
1584 startup_cost += path->pathtarget->cost.startup;
1585 run_cost += path->pathtarget->cost.per_tuple * path->
rows;
1603 Cost startup_cost = 0;
1636 startup_cost += qpqual_cost.
startup;
1638 run_cost += cpu_per_tuple * baserel->
tuples;
1641 startup_cost += path->pathtarget->cost.startup;
1642 run_cost += path->pathtarget->cost.per_tuple * path->
rows;
1660 Cost startup_cost = 0;
1684 startup_cost += qpqual_cost.
startup;
1686 run_cost += cpu_per_tuple * baserel->
tuples;
1689 startup_cost += path->pathtarget->cost.startup;
1690 run_cost += path->pathtarget->cost.per_tuple * path->
rows;
1711 Cost startup_cost = 0;
1732 startup_cost += qpqual_cost.
startup;
1734 run_cost += cpu_per_tuple * baserel->
tuples;
1737 startup_cost += path->pathtarget->cost.startup;
1738 run_cost += path->pathtarget->cost.per_tuple * path->
rows;
1753 Cost startup_cost = 0;
1774 startup_cost += qpqual_cost.
startup;
1776 run_cost += cpu_per_tuple * baserel->
tuples;
1791 Cost startup_cost = 0;
1809 startup_cost += qpqual_cost.
startup;
1811 run_cost += cpu_per_tuple * baserel->
tuples;
1835 total_rows = nrterm->
rows;
1844 total_rows += 10 * rterm->
rows;
1856 runion->
rows = total_rows;
1857 runion->pathtarget->width =
Max(nrterm->pathtarget->width,
1858 rterm->pathtarget->width);
1899 double tuples,
int width,
1900 Cost comparison_cost,
int sort_mem,
1901 double limit_tuples)
1904 double output_bytes;
1905 double output_tuples;
1906 long sort_mem_bytes = sort_mem * 1024L;
1919 if (limit_tuples > 0 && limit_tuples < tuples)
1921 output_tuples = limit_tuples;
1926 output_tuples = tuples;
1927 output_bytes = input_bytes;
1930 if (output_bytes > sort_mem_bytes)
1935 double npages = ceil(input_bytes / BLCKSZ);
1936 double nruns = input_bytes / sort_mem_bytes;
1939 double npageaccesses;
1946 *startup_cost = comparison_cost * tuples *
LOG2(tuples);
1951 if (nruns > mergeorder)
1952 log_runs = ceil(log(nruns) / log(mergeorder));
1955 npageaccesses = 2.0 * npages * log_runs;
1957 *startup_cost += npageaccesses *
1960 else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
1968 *startup_cost = comparison_cost * tuples *
LOG2(2.0 * output_tuples);
1973 *startup_cost = comparison_cost * tuples *
LOG2(tuples);
2002 int input_disabled_nodes,
2003 Cost input_startup_cost,
Cost input_total_cost,
2004 double input_tuples,
int width,
Cost comparison_cost,
int sort_mem,
2005 double limit_tuples)
2009 input_run_cost = input_total_cost - input_startup_cost;
2010 double group_tuples,
2012 Cost group_startup_cost,
2014 group_input_run_cost;
2017 bool unknown_varno =
false;
2025 if (input_tuples < 2.0)
2053 foreach(l, pathkeys)
2065 unknown_varno =
true;
2081 group_tuples = input_tuples / input_groups;
2082 group_input_run_cost = input_run_cost / input_groups;
2089 group_tuples, width, comparison_cost, sort_mem,
2096 startup_cost = group_startup_cost + input_startup_cost +
2097 group_input_run_cost;
2105 run_cost = group_run_cost + (group_run_cost + group_startup_cost) *
2106 (input_groups - 1) + group_input_run_cost * (input_groups - 1);
2121 path->
rows = input_tuples;
2145 List *pathkeys,
int input_disabled_nodes,
2146 Cost input_cost,
double tuples,
int width,
2147 Cost comparison_cost,
int sort_mem,
2148 double limit_tuples)
2156 comparison_cost, sort_mem,
2159 startup_cost += input_cost;
2161 path->
rows = tuples;
2191 arrlen =
Min(parallel_workers, numpaths);
2196 foreach(cell, subpaths)
2200 if (path_index == arrlen)
2202 costarr[path_index++] =
subpath->total_cost;
2209 min_index = arrlen - 1;
2220 if (path_index++ == numpaths)
2223 costarr[min_index] +=
subpath->total_cost;
2227 for (
int i = 0;
i < arrlen;
i++)
2229 if (costarr[
i] < costarr[min_index])
2236 for (
int i = 0;
i < arrlen;
i++)
2238 if (costarr[
i] > costarr[max_index])
2242 return costarr[max_index];
2266 if (pathkeys ==
NIL)
2362 else if (i < apath->path.parallel_workers)
2373 if (i < apath->first_partial_path)
2377 double subpath_parallel_divisor;
2433 List *pathkeys,
int n_streams,
2434 int input_disabled_nodes,
2435 Cost input_startup_cost,
Cost input_total_cost,
2438 Cost startup_cost = 0;
2440 Cost comparison_cost;
2447 N = (n_streams < 2) ? 2.0 : (
double) n_streams;
2454 startup_cost += comparison_cost * N * logN;
2457 run_cost += tuples * comparison_cost * logN;
2466 path->
startup_cost = startup_cost + input_startup_cost;
2467 path->
total_cost = startup_cost + run_cost + input_total_cost;
2484 int input_disabled_nodes,
2485 Cost input_startup_cost,
Cost input_total_cost,
2486 double tuples,
int width)
2488 Cost startup_cost = input_startup_cost;
2489 Cost run_cost = input_total_cost - input_startup_cost;
2491 long work_mem_bytes =
work_mem * 1024L;
2493 path->
rows = tuples;
2515 if (nbytes > work_mem_bytes)
2517 double npages = ceil(nbytes / BLCKSZ);
2542 Cost *rescan_startup_cost,
Cost *rescan_total_cost)
2549 double calls = mpath->
calls;
2550 int width = mpath->
subpath->pathtarget->width;
2552 double hash_mem_bytes;
2553 double est_entry_bytes;
2554 double est_cache_entries;
2578 est_cache_entries = floor(hash_mem_bytes / est_entry_bytes);
2613 evict_ratio = 1.0 -
Min(est_cache_entries, ndistinct) / ndistinct;
2621 hit_ratio = ((calls - ndistinct) / calls) *
2622 (est_cache_entries /
Max(ndistinct, est_cache_entries));
2624 Assert(hit_ratio >= 0 && hit_ratio <= 1.0);
2658 startup_cost = input_startup_cost * (1.0 - hit_ratio);
2666 *rescan_startup_cost = startup_cost;
2667 *rescan_total_cost = total_cost;
2684 int numGroupCols,
double numGroups,
2687 Cost input_startup_cost,
Cost input_total_cost,
2688 double input_tuples,
double input_width)
2690 double output_tuples;
2696 if (aggcosts == NULL)
2700 aggcosts = &dummy_aggcosts;
2727 startup_cost = input_total_cost;
2739 startup_cost = input_startup_cost;
2740 total_cost = input_total_cost;
2750 output_tuples = numGroups;
2755 startup_cost = input_total_cost;
2764 total_cost = startup_cost;
2768 output_tuples = numGroups;
2787 double pages_written = 0.0;
2788 double pages_read = 0.0;
2790 double hashentrysize;
2793 uint64 ngroups_limit;
2806 &ngroups_limit, &num_partitions);
2808 nbatches =
Max((numGroups * hashentrysize) / mem_limit,
2809 numGroups / ngroups_limit);
2811 nbatches =
Max(ceil(nbatches), 1.0);
2812 num_partitions =
Max(num_partitions, 2);
2819 depth = ceil(log(nbatches) / log(num_partitions));
2826 pages_written = pages_read = pages * depth;
2833 pages_written *= 2.0;
2841 startup_cost += spill_cost;
2842 total_cost += spill_cost;
2854 startup_cost += qual_cost.
startup;
2865 path->
rows = output_tuples;
2886 double input_tuples)
2889 double partition_tuples;
2890 double return_tuples;
2899 double num_partitions;
2901 root->parse->targetList);
2907 partition_tuples = input_tuples / num_partitions;
2912 partition_tuples = input_tuples;
2922 root->parse->targetList);
2926 partition_tuples, NULL,
2929 peer_tuples = partition_tuples / num_groups;
2940 return_tuples = partition_tuples;
2947 return_tuples = 1.0;
2957 return_tuples = partition_tuples;
2959 return_tuples = peer_tuples;
2968 return_tuples = 1.0;
2979 return_tuples = 1.0;
2984 double end_offset_value;
2989 if (endOffset->constisnull)
2998 end_offset_value = 1.0;
3018 partition_tuples / peer_tuples *
3037 return_tuples = end_offset_value + 1.0;
3042 return_tuples = peer_tuples * (end_offset_value + 1.0);
3051 return_tuples = 1.0;
3061 return_tuples = 1.0;
3071 return_tuples =
Min(return_tuples + 1.0, partition_tuples);
3079 return_tuples =
Min(return_tuples, partition_tuples);
3101 int input_disabled_nodes,
3102 Cost input_startup_cost,
Cost input_total_cost,
3103 double input_tuples)
3107 double startup_tuples;
3115 startup_cost = input_startup_cost;
3116 total_cost = input_total_cost;
3127 foreach(lc, windowFuncs)
3136 startup_cost += argcosts.
startup;
3141 startup_cost += argcosts.
startup;
3149 startup_cost += argcosts.
startup;
3152 total_cost += wfunccost * input_tuples;
3166 path->
rows = input_tuples;
3182 if (startup_tuples > 1.0)
3183 path->
startup_cost += (total_cost - startup_cost) / input_tuples *
3184 (startup_tuples - 1.0);
3197 int numGroupCols,
double numGroups,
3199 int input_disabled_nodes,
3200 Cost input_startup_cost,
Cost input_total_cost,
3201 double input_tuples)
3203 double output_tuples;
3207 output_tuples = numGroups;
3208 startup_cost = input_startup_cost;
3209 total_cost = input_total_cost;
3226 startup_cost += qual_cost.
startup;
3237 path->
rows = output_tuples;
3270 Path *outer_path,
Path *inner_path,
3274 Cost startup_cost = 0;
3276 double outer_path_rows = outer_path->
rows;
3277 Cost inner_rescan_start_cost;
3278 Cost inner_rescan_total_cost;
3279 Cost inner_run_cost;
3280 Cost inner_rescan_run_cost;
3289 &inner_rescan_start_cost,
3290 &inner_rescan_total_cost);
3302 if (outer_path_rows > 1)
3303 run_cost += (outer_path_rows - 1) * inner_rescan_start_cost;
3306 inner_rescan_run_cost = inner_rescan_total_cost - inner_rescan_start_cost;
3326 run_cost += inner_run_cost;
3327 if (outer_path_rows > 1)
3328 run_cost += (outer_path_rows - 1) * inner_rescan_run_cost;
3336 workspace->
total_cost = startup_cost + run_cost;
3356 double outer_path_rows = outer_path->
rows;
3357 double inner_path_rows = inner_path->
rows;
3368 if (outer_path_rows <= 0)
3369 outer_path_rows = 1;
3370 if (inner_path_rows <= 0)
3371 inner_path_rows = 1;
3373 if (path->
jpath.path.param_info)
3374 path->
jpath.path.rows = path->
jpath.path.param_info->ppi_rows;
3376 path->
jpath.path.rows = path->
jpath.path.parent->rows;
3379 if (path->
jpath.path.parallel_workers > 0)
3383 path->
jpath.path.rows =
3398 double outer_matched_rows;
3399 double outer_unmatched_rows;
3412 outer_unmatched_rows = outer_path_rows - outer_matched_rows;
3419 ntuples = outer_matched_rows * inner_path_rows * inner_scan_frac;
3446 run_cost += inner_run_cost * inner_scan_frac;
3447 if (outer_matched_rows > 1)
3448 run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
3456 run_cost += outer_unmatched_rows *
3457 inner_rescan_run_cost / inner_path_rows;
3480 ntuples += outer_unmatched_rows * inner_path_rows;
3483 run_cost += inner_run_cost;
3484 if (outer_unmatched_rows >= 1)
3485 outer_unmatched_rows -= 1;
3487 outer_matched_rows -= 1;
3490 if (outer_matched_rows > 0)
3491 run_cost += outer_matched_rows * inner_rescan_run_cost * inner_scan_frac;
3494 if (outer_unmatched_rows > 0)
3495 run_cost += outer_unmatched_rows * inner_rescan_run_cost;
3503 ntuples = outer_path_rows * inner_path_rows;
3508 startup_cost += restrict_qual_cost.
startup;
3510 run_cost += cpu_per_tuple * ntuples;
3513 startup_cost += path->
jpath.path.pathtarget->cost.startup;
3514 run_cost += path->
jpath.path.pathtarget->cost.per_tuple * path->
jpath.path.rows;
3516 path->
jpath.path.startup_cost = startup_cost;
3517 path->
jpath.path.total_cost = startup_cost + run_cost;
3554 Path *outer_path,
Path *inner_path,
3555 List *outersortkeys,
List *innersortkeys,
3559 Cost startup_cost = 0;
3561 double outer_path_rows = outer_path->
rows;
3562 double inner_path_rows = inner_path->
rows;
3563 Cost inner_run_cost;
3575 if (outer_path_rows <= 0)
3576 outer_path_rows = 1;
3577 if (inner_path_rows <= 0)
3578 inner_path_rows = 1;
3591 if (mergeclauses && jointype !=
JOIN_FULL)
3601 opathkeys = outersortkeys ? outersortkeys : outer_path->
pathkeys;
3602 ipathkeys = innersortkeys ? innersortkeys : inner_path->
pathkeys;
3609 opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation ||
3612 elog(
ERROR,
"left and right pathkeys do not match in mergejoin");
3618 outer_path->parent->relids))
3637 outerstartsel = 0.0;
3643 innerstartsel = 0.0;
3650 outerstartsel = innerstartsel = 0.0;
3651 outerendsel = innerendsel = 1.0;
3658 outer_skip_rows = rint(outer_path_rows * outerstartsel);
3659 inner_skip_rows = rint(inner_path_rows * innerstartsel);
3663 Assert(outer_skip_rows <= outer_rows);
3664 Assert(inner_skip_rows <= inner_rows);
3671 outerstartsel = outer_skip_rows / outer_path_rows;
3672 innerstartsel = inner_skip_rows / inner_path_rows;
3673 outerendsel = outer_rows / outer_path_rows;
3674 innerendsel = inner_rows / inner_path_rows;
3676 Assert(outerstartsel <= outerendsel);
3677 Assert(innerstartsel <= innerendsel);
3691 outer_path->pathtarget->width,
3700 * (outerendsel - outerstartsel);
3709 * (outerendsel - outerstartsel);
3720 inner_path->pathtarget->width,
3729 * (innerendsel - innerstartsel);
3738 * (innerendsel - innerstartsel);
3754 workspace->
total_cost = startup_cost + run_cost + inner_run_cost;
3798 double inner_path_rows = inner_path->
rows;
3813 double mergejointuples,
3821 if (inner_path_rows <= 0)
3822 inner_path_rows = 1;
3825 if (path->
jpath.path.param_info)
3826 path->
jpath.path.rows = path->
jpath.path.param_info->ppi_rows;
3828 path->
jpath.path.rows = path->
jpath.path.parent->rows;
3831 if (path->
jpath.path.parallel_workers > 0)
3835 path->
jpath.path.rows =
3896 rescannedtuples = 0;
3899 rescannedtuples = mergejointuples - inner_path_rows;
3901 if (rescannedtuples < 0)
3902 rescannedtuples = 0;
3910 rescanratio = 1.0 + (rescannedtuples / inner_rows);
3921 bare_inner_cost = inner_run_cost * rescanratio;
3936 mat_inner_cost = inner_run_cost +
3968 else if (innersortkeys ==
NIL &&
3985 inner_path->pathtarget->width) >
3993 run_cost += mat_inner_cost;
3995 run_cost += bare_inner_cost;
4004 startup_cost += merge_qual_cost.
startup;
4005 startup_cost += merge_qual_cost.
per_tuple *
4006 (outer_skip_rows + inner_skip_rows * rescanratio);
4008 ((outer_rows - outer_skip_rows) +
4009 (inner_rows - inner_skip_rows) * rescanratio);
4020 startup_cost += qp_qual_cost.
startup;
4022 run_cost += cpu_per_tuple * mergejointuples;
4025 startup_cost += path->
jpath.path.pathtarget->cost.startup;
4026 run_cost += path->
jpath.path.pathtarget->cost.per_tuple * path->
jpath.path.rows;
4028 path->
jpath.path.startup_cost = startup_cost;
4029 path->
jpath.path.total_cost = startup_cost + run_cost;
4047 foreach(lc, rinfo->scansel_cache)
4051 cache->
collation == pathkey->pk_eclass->ec_collation &&
4073 cache->
collation = pathkey->pk_eclass->ec_collation;
4081 rinfo->scansel_cache =
lappend(rinfo->scansel_cache, cache);
4118 Path *outer_path,
Path *inner_path,
4123 Cost startup_cost = 0;
4125 double outer_path_rows = outer_path->
rows;
4126 double inner_path_rows = inner_path->
rows;
4127 double inner_path_rows_total = inner_path_rows;
4132 size_t space_allowed;
4178 inner_path->pathtarget->width,
4196 double outerpages =
page_size(outer_path_rows,
4197 outer_path->pathtarget->width);
4198 double innerpages =
page_size(inner_path_rows,
4199 inner_path->pathtarget->width);
4210 workspace->
total_cost = startup_cost + run_cost;
4236 double outer_path_rows = outer_path->
rows;
4237 double inner_path_rows = inner_path->
rows;
4247 double hashjointuples;
4248 double virtualbuckets;
4257 if (path->
jpath.path.param_info)
4258 path->
jpath.path.rows = path->
jpath.path.param_info->ppi_rows;
4260 path->
jpath.path.rows = path->
jpath.path.parent->rows;
4263 if (path->
jpath.path.parallel_workers > 0)
4267 path->
jpath.path.rows =
4278 virtualbuckets = (double) numbuckets * (
double) numbatches;
4292 innerbucketsize = 1.0 / virtualbuckets;
4297 innerbucketsize = 1.0;
4299 foreach(hcl, hashclauses)
4314 inner_path->parent->relids))
4317 thisbucketsize = restrictinfo->right_bucketsize;
4318 if (thisbucketsize < 0)
4324 &restrictinfo->right_mcvfreq,
4325 &restrictinfo->right_bucketsize);
4326 thisbucketsize = restrictinfo->right_bucketsize;
4328 thismcvfreq = restrictinfo->right_mcvfreq;
4333 inner_path->parent->relids));
4335 thisbucketsize = restrictinfo->left_bucketsize;
4336 if (thisbucketsize < 0)
4342 &restrictinfo->left_mcvfreq,
4343 &restrictinfo->left_bucketsize);
4344 thisbucketsize = restrictinfo->left_bucketsize;
4346 thismcvfreq = restrictinfo->left_mcvfreq;
4349 if (innerbucketsize > thisbucketsize)
4350 innerbucketsize = thisbucketsize;
4351 if (innermcvfreq > thismcvfreq)
4352 innermcvfreq = thismcvfreq;
4383 double outer_matched_rows;
4401 startup_cost += hash_qual_cost.
startup;
4402 run_cost += hash_qual_cost.
per_tuple * outer_matched_rows *
4403 clamp_row_est(inner_path_rows * innerbucketsize * inner_scan_frac) * 0.5;
4419 (outer_path_rows - outer_matched_rows) *
4424 hashjointuples = outer_path_rows - outer_matched_rows;
4426 hashjointuples = outer_matched_rows;
4440 startup_cost += hash_qual_cost.
startup;
4441 run_cost += hash_qual_cost.
per_tuple * outer_path_rows *
4458 startup_cost += qp_qual_cost.
startup;
4460 run_cost += cpu_per_tuple * hashjointuples;
4463 startup_cost += path->
jpath.path.pathtarget->cost.startup;
4464 run_cost += path->
jpath.path.pathtarget->cost.per_tuple * path->
jpath.path.rows;
4466 path->
jpath.path.startup_cost = startup_cost;
4467 path->
jpath.path.total_cost = startup_cost + run_cost;
4517 Cost plan_run_cost =
plan->total_cost -
plan->startup_cost;
4528 sp_cost.
per_tuple += 0.50 * plan_run_cost;
4573 Cost *rescan_startup_cost,
4574 Cost *rescan_total_cost)
4578 case T_FunctionScan:
4587 *rescan_startup_cost = 0;
4596 if (((
HashPath *) path)->num_batches == 1)
4599 *rescan_startup_cost = 0;
4600 *rescan_total_cost = path->total_cost - path->startup_cost;
4605 *rescan_startup_cost = path->startup_cost;
4606 *rescan_total_cost = path->total_cost;
4610 case T_WorkTableScan:
4620 path->pathtarget->width);
4621 long work_mem_bytes =
work_mem * 1024L;
4623 if (nbytes > work_mem_bytes)
4626 double npages = ceil(nbytes / BLCKSZ);
4630 *rescan_startup_cost = 0;
4631 *rescan_total_cost = run_cost;
4647 path->pathtarget->width);
4648 long work_mem_bytes =
work_mem * 1024L;
4650 if (nbytes > work_mem_bytes)
4653 double npages = ceil(nbytes / BLCKSZ);
4657 *rescan_startup_cost = 0;
4658 *rescan_total_cost = run_cost;
4664 rescan_startup_cost, rescan_total_cost);
4667 *rescan_startup_cost = path->startup_cost;
4668 *rescan_total_cost = path->total_cost;
4739 if (rinfo->eval_cost.startup < 0)
4751 if (rinfo->orclause)
4760 if (rinfo->pseudoconstant)
4766 rinfo->eval_cost = locContext.
total;
4768 context->total.startup += rinfo->eval_cost.startup;
4769 context->total.per_tuple += rinfo->eval_cost.per_tuple;
4886 &iofunc, &typioparam);
4891 &iofunc, &typisvarlena);
4913 foreach(lc, rcexpr->opnos)
4934 elog(
ERROR,
"cannot handle unplanned sub-select");
5069 foreach(l, restrictlist)
5074 joinquals =
lappend(joinquals, rinfo);
5078 joinquals = restrictlist;
5118 avgmatch = nselec * innerrel->
rows / jselec;
5120 avgmatch =
Max(1.0, avgmatch);
5143 Relids joinrelids = joinpath->path.parent->relids;
5153 if (innerpath->param_info == NULL)
5160 case T_IndexOnlyScan:
5161 indexclauses = ((
IndexPath *) innerpath)->indexclauses;
5163 case T_BitmapHeapScan:
5169 indexclauses = ((
IndexPath *) bmqual)->indexclauses;
5191 foreach(lc, innerpath->param_info->ppi_clauses)
5196 innerpath->parent->relids,
5258 tuples = selec * outer_tuples * inner_tuples;
5309 List *param_clauses)
5329 if (nrows > rel->
rows)
5393 List *restrict_clauses)
5415 if (nrows > rel->
rows)
5478 foreach(l, restrictlist)
5483 pushedquals =
lappend(pushedquals, rinfo);
5485 joinquals =
lappend(joinquals, rinfo);
5529 nrows = outer_rows * inner_rows * fkselec * jselec;
5533 nrows = outer_rows * inner_rows * fkselec * jselec;
5534 if (nrows < outer_rows)
5539 nrows = outer_rows * inner_rows * fkselec * jselec;
5540 if (nrows < outer_rows)
5542 if (nrows < inner_rows)
5547 nrows = outer_rows * fkselec * jselec;
5551 nrows = outer_rows * (1.0 - fkselec * jselec);
5556 elog(
ERROR,
"unrecognized join type: %d", (
int) jointype);
5584 List **restrictlist)
5588 List *worklist = *restrictlist;
5592 foreach(lc,
root->fkey_list)
5605 ref_is_outer =
false;
5608 ref_is_outer =
true;
5634 if (worklist == *restrictlist)
5638 foreach(cell, worklist)
5641 bool remove_it =
false;
5645 for (
i = 0;
i < fkinfo->
nkeys;
i++)
5647 if (rinfo->parent_ec)
5662 if (fkinfo->
eclass[
i] == rinfo->parent_ec)
5684 removedlist =
lappend(removedlist, rinfo);
5714 if (removedlist ==
NIL ||
5759 double ref_tuples =
Max(ref_rel->
tuples, 1.0);
5761 fkselec *= ref_rel->
rows / ref_tuples;
5771 double ref_tuples =
Max(ref_rel->
tuples, 1.0);
5773 fkselec *= 1.0 / ref_tuples;
5788 for (
int i = 0;
i < fkinfo->
nkeys;
i++)
5815 *restrictlist = worklist;
5858 int32 item_width = 0;
5889 Var *var = (
Var *) texpr;