113#define LOG2(x) (log(x) / 0.693147180559945)
120#define APPEND_CPU_COST_MULTIPLIER 0.5
128#define MAXIMUM_ROWCOUNT 1e100
178 Cost *rescan_startup_cost,
Cost *rescan_total_cost);
198 List **restrictlist);
200 int parallel_workers);
204static double page_size(
double tuples,
int width);
223 else if (nrows <= 1.0)
257 return (
int32) tuple_width;
283 return (
x < (
double) LONG_MAX) ? (long)
x : LONG_MAX;
298 Cost startup_cost = 0;
301 double spc_seq_page_cost;
323 disk_run_cost = spc_seq_page_cost * baserel->
pages;
328 startup_cost += qpqual_cost.
startup;
330 cpu_run_cost = cpu_per_tuple * baserel->
tuples;
332 startup_cost += path->pathtarget->cost.startup;
333 cpu_run_cost += path->pathtarget->cost.per_tuple * path->
rows;
341 cpu_run_cost /= parallel_divisor;
359 path->
total_cost = startup_cost + cpu_run_cost + disk_run_cost;
373 Cost startup_cost = 0;
378 double spc_seq_page_cost,
379 spc_random_page_cost,
400 &spc_random_page_cost,
405 spc_random_page_cost : spc_seq_page_cost;
411 run_cost += spc_page_cost * baserel->
pages;
423 startup_cost += qpqual_cost.
startup;
425 run_cost += cpu_per_tuple * baserel->
tuples;
427 startup_cost += path->pathtarget->cost.startup;
428 run_cost += path->pathtarget->cost.per_tuple * path->
rows;
450 Cost startup_cost = 0;
487 int input_disabled_nodes,
488 Cost input_startup_cost,
Cost input_total_cost,
491 Cost startup_cost = 0;
493 Cost comparison_cost;
518 startup_cost += comparison_cost * N * logN;
521 run_cost += path->
path.
rows * comparison_cost * logN;
538 path->
path.
total_cost = (startup_cost + run_cost + input_total_cost);
565 bool indexonly = (path->
path.
pathtype == T_IndexOnlyScan);
568 Cost startup_cost = 0;
570 Cost cpu_run_cost = 0;
571 Cost indexStartupCost;
574 double indexCorrelation,
576 double spc_seq_page_cost,
577 spc_random_page_cost;
582 double tuples_fetched;
583 double pages_fetched;
584 double rand_heap_pages;
600 if (path->
path.param_info)
628 amcostestimate(
root, path, loop_count,
629 &indexStartupCost, &indexTotalCost,
630 &indexSelectivity, &indexCorrelation,
642 startup_cost += indexStartupCost;
643 run_cost += indexTotalCost - indexStartupCost;
650 &spc_random_page_cost,
692 (
double)
index->pages,
696 pages_fetched = ceil(pages_fetched * (1.0 - baserel->
allvisfrac));
698 rand_heap_pages = pages_fetched;
700 max_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
712 pages_fetched = ceil(indexSelectivity * (
double) baserel->
pages);
716 (
double)
index->pages,
720 pages_fetched = ceil(pages_fetched * (1.0 - baserel->
allvisfrac));
722 min_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
732 (
double)
index->pages,
736 pages_fetched = ceil(pages_fetched * (1.0 - baserel->
allvisfrac));
738 rand_heap_pages = pages_fetched;
741 max_IO_cost = pages_fetched * spc_random_page_cost;
744 pages_fetched = ceil(indexSelectivity * (
double) baserel->
pages);
747 pages_fetched = ceil(pages_fetched * (1.0 - baserel->
allvisfrac));
749 if (pages_fetched > 0)
751 min_IO_cost = spc_random_page_cost;
752 if (pages_fetched > 1)
753 min_IO_cost += (pages_fetched - 1) * spc_seq_page_cost;
767 rand_heap_pages = -1;
795 csquared = indexCorrelation * indexCorrelation;
797 run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
807 startup_cost += qpqual_cost.
startup;
810 cpu_run_cost += cpu_per_tuple * tuples_fetched;
813 startup_cost += path->
path.pathtarget->cost.startup;
814 cpu_run_cost += path->
path.pathtarget->cost.per_tuple * path->
path.
rows;
824 cpu_run_cost /= parallel_divisor;
827 run_cost += cpu_run_cost;
855 foreach(lc, qual_clauses)
859 if (rinfo->pseudoconstant)
864 result =
lappend(result, rinfo);
911 double pages_fetched;
917 T = (pages > 1) ? (
double) pages : 1.0;
920 total_pages =
root->total_table_pages + index_pages;
921 total_pages =
Max(total_pages, 1.0);
937 (2.0 *
T * tuples_fetched) / (2.0 *
T + tuples_fetched);
938 if (pages_fetched >=
T)
941 pages_fetched = ceil(pages_fetched);
947 lim = (2.0 *
T *
b) / (2.0 *
T -
b);
948 if (tuples_fetched <= lim)
951 (2.0 *
T * tuples_fetched) / (2.0 *
T + tuples_fetched);
956 b + (tuples_fetched - lim) * (
T -
b) /
T;
958 pages_fetched = ceil(pages_fetched);
960 return pages_fetched;
1025 Path *bitmapqual,
double loop_count)
1027 Cost startup_cost = 0;
1029 Cost indexTotalCost;
1034 double tuples_fetched;
1035 double pages_fetched;
1036 double spc_seq_page_cost,
1037 spc_random_page_cost;
1052 loop_count, &indexTotalCost,
1055 startup_cost += indexTotalCost;
1056 T = (baserel->
pages > 1) ? (
double) baserel->
pages : 1.0;
1060 &spc_random_page_cost,
1061 &spc_seq_page_cost);
1070 if (pages_fetched >= 2.0)
1071 cost_per_page = spc_random_page_cost -
1072 (spc_random_page_cost - spc_seq_page_cost)
1073 * sqrt(pages_fetched /
T);
1075 cost_per_page = spc_random_page_cost;
1077 run_cost += pages_fetched * cost_per_page;
1090 startup_cost += qpqual_cost.
startup;
1092 cpu_run_cost = cpu_per_tuple * tuples_fetched;
1100 cpu_run_cost /= parallel_divisor;
1106 run_cost += cpu_run_cost;
1109 startup_cost += path->pathtarget->cost.startup;
1110 run_cost += path->pathtarget->cost.per_tuple * path->
rows;
1126 *cost = ((
IndexPath *) path)->indextotalcost;
1127 *selec = ((
IndexPath *) path)->indexselectivity;
1192 totalCost += subCost;
1238 totalCost += subCost;
1261 Cost startup_cost = 0;
1268 double spc_random_page_cost;
1283 foreach(l, tidquals)
1324 &spc_random_page_cost,
1328 run_cost += spc_random_page_cost * ntuples;
1337 run_cost += cpu_per_tuple * ntuples;
1340 startup_cost += path->pathtarget->cost.startup;
1341 run_cost += path->pathtarget->cost.per_tuple * path->
rows;
1369 Cost startup_cost = 0;
1376 double spc_random_page_cost;
1377 double spc_seq_page_cost;
1392 pages = ceil(selectivity * baserel->
pages);
1405 ntuples = selectivity * baserel->
tuples;
1406 nseqpages = pages - 1.0;
1416 &spc_random_page_cost,
1417 &spc_seq_page_cost);
1420 run_cost += spc_random_page_cost + spc_seq_page_cost * nseqpages;
1435 run_cost += cpu_per_tuple * ntuples;
1438 startup_cost += path->pathtarget->cost.startup;
1439 run_cost += path->pathtarget->cost.per_tuple * path->
rows;
1459 bool trivial_pathtarget)
1513 if (qpquals ==
NIL && trivial_pathtarget)
1518 startup_cost = qpqual_cost.
startup;
1523 startup_cost += path->
path.pathtarget->cost.startup;
1524 run_cost += path->
path.pathtarget->cost.per_tuple * path->
path.
rows;
1541 Cost startup_cost = 0;
1579 startup_cost += qpqual_cost.
startup;
1581 run_cost += cpu_per_tuple * baserel->
tuples;
1584 startup_cost += path->pathtarget->cost.startup;
1585 run_cost += path->pathtarget->cost.per_tuple * path->
rows;
1603 Cost startup_cost = 0;
1636 startup_cost += qpqual_cost.
startup;
1638 run_cost += cpu_per_tuple * baserel->
tuples;
1641 startup_cost += path->pathtarget->cost.startup;
1642 run_cost += path->pathtarget->cost.per_tuple * path->
rows;
1660 Cost startup_cost = 0;
1684 startup_cost += qpqual_cost.
startup;
1686 run_cost += cpu_per_tuple * baserel->
tuples;
1689 startup_cost += path->pathtarget->cost.startup;
1690 run_cost += path->pathtarget->cost.per_tuple * path->
rows;
1711 Cost startup_cost = 0;
1732 startup_cost += qpqual_cost.
startup;
1734 run_cost += cpu_per_tuple * baserel->
tuples;
1737 startup_cost += path->pathtarget->cost.startup;
1738 run_cost += path->pathtarget->cost.per_tuple * path->
rows;
1753 Cost startup_cost = 0;
1774 startup_cost += qpqual_cost.
startup;
1776 run_cost += cpu_per_tuple * baserel->
tuples;
1791 Cost startup_cost = 0;
1809 startup_cost += qpqual_cost.
startup;
1811 run_cost += cpu_per_tuple * baserel->
tuples;
1835 total_rows = nrterm->
rows;
1844 total_rows += 10 * rterm->
rows;
1856 runion->
rows = total_rows;
1857 runion->pathtarget->width =
Max(nrterm->pathtarget->width,
1858 rterm->pathtarget->width);
1899 double tuples,
int width,
1900 Cost comparison_cost,
int sort_mem,
1901 double limit_tuples)
1904 double output_bytes;
1905 double output_tuples;
1906 long sort_mem_bytes = sort_mem * 1024L;
1919 if (limit_tuples > 0 && limit_tuples < tuples)
1921 output_tuples = limit_tuples;
1926 output_tuples = tuples;
1927 output_bytes = input_bytes;
1930 if (output_bytes > sort_mem_bytes)
1935 double npages = ceil(input_bytes / BLCKSZ);
1936 double nruns = input_bytes / sort_mem_bytes;
1939 double npageaccesses;
1946 *startup_cost = comparison_cost * tuples *
LOG2(tuples);
1951 if (nruns > mergeorder)
1952 log_runs = ceil(log(nruns) / log(mergeorder));
1955 npageaccesses = 2.0 * npages * log_runs;
1957 *startup_cost += npageaccesses *
1960 else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
1968 *startup_cost = comparison_cost * tuples *
LOG2(2.0 * output_tuples);
1973 *startup_cost = comparison_cost * tuples *
LOG2(tuples);
2002 int input_disabled_nodes,
2003 Cost input_startup_cost,
Cost input_total_cost,
2004 double input_tuples,
int width,
Cost comparison_cost,
int sort_mem,
2005 double limit_tuples)
2009 input_run_cost = input_total_cost - input_startup_cost;
2010 double group_tuples,
2012 Cost group_startup_cost,
2014 group_input_run_cost;
2017 bool unknown_varno =
false;
2025 if (input_tuples < 2.0)
2053 foreach(l, pathkeys)
2065 unknown_varno =
true;
2081 group_tuples = input_tuples / input_groups;
2082 group_input_run_cost = input_run_cost / input_groups;
2089 group_tuples, width, comparison_cost, sort_mem,
2096 startup_cost = group_startup_cost + input_startup_cost +
2097 group_input_run_cost;
2105 run_cost = group_run_cost + (group_run_cost + group_startup_cost) *
2106 (input_groups - 1) + group_input_run_cost * (input_groups - 1);
2121 path->
rows = input_tuples;
2145 List *pathkeys,
int input_disabled_nodes,
2146 Cost input_cost,
double tuples,
int width,
2147 Cost comparison_cost,
int sort_mem,
2148 double limit_tuples)
2156 comparison_cost, sort_mem,
2159 startup_cost += input_cost;
2161 path->
rows = tuples;
2191 arrlen =
Min(parallel_workers, numpaths);
2196 foreach(cell, subpaths)
2200 if (path_index == arrlen)
2202 costarr[path_index++] =
subpath->total_cost;
2209 min_index = arrlen - 1;
2220 if (path_index++ == numpaths)
2223 costarr[min_index] +=
subpath->total_cost;
2227 for (
int i = 0;
i < arrlen;
i++)
2229 if (costarr[
i] < costarr[min_index])
2236 for (
int i = 0;
i < arrlen;
i++)
2238 if (costarr[
i] > costarr[max_index])
2242 return costarr[max_index];
2266 if (pathkeys ==
NIL)
2362 else if (i < apath->path.parallel_workers)
2373 if (i < apath->first_partial_path)
2377 double subpath_parallel_divisor;
2433 List *pathkeys,
int n_streams,
2434 int input_disabled_nodes,
2435 Cost input_startup_cost,
Cost input_total_cost,
2438 Cost startup_cost = 0;
2440 Cost comparison_cost;
2447 N = (n_streams < 2) ? 2.0 : (
double) n_streams;
2454 startup_cost += comparison_cost * N * logN;
2457 run_cost += tuples * comparison_cost * logN;
2466 path->
startup_cost = startup_cost + input_startup_cost;
2467 path->
total_cost = startup_cost + run_cost + input_total_cost;
2484 int input_disabled_nodes,
2485 Cost input_startup_cost,
Cost input_total_cost,
2486 double tuples,
int width)
2488 Cost startup_cost = input_startup_cost;
2489 Cost run_cost = input_total_cost - input_startup_cost;
2491 long work_mem_bytes =
work_mem * 1024L;
2493 path->
rows = tuples;
2515 if (nbytes > work_mem_bytes)
2517 double npages = ceil(nbytes / BLCKSZ);
2542 Cost *rescan_startup_cost,
Cost *rescan_total_cost)
2549 double calls = mpath->
calls;
2550 int width = mpath->
subpath->pathtarget->width;
2552 double hash_mem_bytes;
2553 double est_entry_bytes;
2554 double est_cache_entries;
2578 est_cache_entries = floor(hash_mem_bytes / est_entry_bytes);
2613 evict_ratio = 1.0 -
Min(est_cache_entries, ndistinct) / ndistinct;
2621 hit_ratio = ((calls - ndistinct) / calls) *
2622 (est_cache_entries /
Max(ndistinct, est_cache_entries));
2624 Assert(hit_ratio >= 0 && hit_ratio <= 1.0);
2658 startup_cost = input_startup_cost * (1.0 - hit_ratio);
2666 *rescan_startup_cost = startup_cost;
2667 *rescan_total_cost = total_cost;
2684 int numGroupCols,
double numGroups,
2687 Cost input_startup_cost,
Cost input_total_cost,
2688 double input_tuples,
double input_width)
2690 double output_tuples;
2696 if (aggcosts == NULL)
2700 aggcosts = &dummy_aggcosts;
2727 startup_cost = input_total_cost;
2739 startup_cost = input_startup_cost;
2740 total_cost = input_total_cost;
2750 output_tuples = numGroups;
2755 startup_cost = input_total_cost;
2764 total_cost = startup_cost;
2768 output_tuples = numGroups;
2787 double pages_written = 0.0;
2788 double pages_read = 0.0;
2790 double hashentrysize;
2806 &ngroups_limit, &num_partitions);
2808 nbatches =
Max((numGroups * hashentrysize) / mem_limit,
2809 numGroups / ngroups_limit);
2811 nbatches =
Max(ceil(nbatches), 1.0);
2812 num_partitions =
Max(num_partitions, 2);
2819 depth = ceil(log(nbatches) / log(num_partitions));
2826 pages_written = pages_read = pages * depth;
2833 pages_written *= 2.0;
2841 startup_cost += spill_cost;
2842 total_cost += spill_cost;
2854 startup_cost += qual_cost.
startup;
2865 path->
rows = output_tuples;
2886 double input_tuples)
2889 double partition_tuples;
2890 double return_tuples;
2899 double num_partitions;
2901 root->parse->targetList);
2907 partition_tuples = input_tuples / num_partitions;
2912 partition_tuples = input_tuples;
2922 root->parse->targetList);
2926 partition_tuples, NULL,
2929 peer_tuples = partition_tuples / num_groups;
2940 return_tuples = partition_tuples;
2947 return_tuples = 1.0;
2957 return_tuples = partition_tuples;
2959 return_tuples = peer_tuples;
2968 return_tuples = 1.0;
2979 return_tuples = 1.0;
2984 double end_offset_value;
2989 if (endOffset->constisnull)
2998 end_offset_value = 1.0;
3018 partition_tuples / peer_tuples *
3037 return_tuples = end_offset_value + 1.0;
3042 return_tuples = peer_tuples * (end_offset_value + 1.0);
3051 return_tuples = 1.0;
3061 return_tuples = 1.0;
3071 return_tuples =
Min(return_tuples + 1.0, partition_tuples);
3079 return_tuples =
Min(return_tuples, partition_tuples);
3101 int input_disabled_nodes,
3102 Cost input_startup_cost,
Cost input_total_cost,
3103 double input_tuples)
3107 double startup_tuples;
3115 startup_cost = input_startup_cost;
3116 total_cost = input_total_cost;
3127 foreach(lc, windowFuncs)
3136 startup_cost += argcosts.
startup;
3141 startup_cost += argcosts.
startup;
3149 startup_cost += argcosts.
startup;
3152 total_cost += wfunccost * input_tuples;
3166 path->
rows = input_tuples;
3182 if (startup_tuples > 1.0)
3183 path->
startup_cost += (total_cost - startup_cost) / input_tuples *
3184 (startup_tuples - 1.0);
3197 int numGroupCols,
double numGroups,
3199 int input_disabled_nodes,
3200 Cost input_startup_cost,
Cost input_total_cost,
3201 double input_tuples)
3203 double output_tuples;
3207 output_tuples = numGroups;
3208 startup_cost = input_startup_cost;
3209 total_cost = input_total_cost;
3226 startup_cost += qual_cost.
startup;
3237 path->
rows = output_tuples;
3270 Path *outer_path,
Path *inner_path,
3274 Cost startup_cost = 0;
3276 double outer_path_rows = outer_path->
rows;
3277 Cost inner_rescan_start_cost;
3278 Cost inner_rescan_total_cost;
3279 Cost inner_run_cost;
3280 Cost inner_rescan_run_cost;
3289 &inner_rescan_start_cost,
3290 &inner_rescan_total_cost);
3302 if (outer_path_rows > 1)
3303 run_cost += (outer_path_rows - 1) * inner_rescan_start_cost;
3306 inner_rescan_run_cost = inner_rescan_total_cost - inner_rescan_start_cost;
3326 run_cost += inner_run_cost;
3327 if (outer_path_rows > 1)
3328 run_cost += (outer_path_rows - 1) * inner_rescan_run_cost;
3336 workspace->
total_cost = startup_cost + run_cost;
3356 double outer_path_rows = outer_path->
rows;
3357 double inner_path_rows = inner_path->
rows;
3368 if (outer_path_rows <= 0)
3369 outer_path_rows = 1;
3370 if (inner_path_rows <= 0)
3371 inner_path_rows = 1;
3373 if (path->
jpath.path.param_info)
3374 path->
jpath.path.rows = path->
jpath.path.param_info->ppi_rows;
3376 path->
jpath.path.rows = path->
jpath.path.parent->rows;
3379 if (path->
jpath.path.parallel_workers > 0)
3383 path->
jpath.path.rows =
3398 double outer_matched_rows;
3399 double outer_unmatched_rows;
3412 outer_unmatched_rows = outer_path_rows - outer_matched_rows;
3419 ntuples = outer_matched_rows * inner_path_rows * inner_scan_frac;
3446 run_cost += inner_run_cost * inner_scan_frac;
3447 if (outer_matched_rows > 1)
3448 run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
3456 run_cost += outer_unmatched_rows *
3457 inner_rescan_run_cost / inner_path_rows;
3480 ntuples += outer_unmatched_rows * inner_path_rows;
3483 run_cost += inner_run_cost;
3484 if (outer_unmatched_rows >= 1)
3485 outer_unmatched_rows -= 1;
3487 outer_matched_rows -= 1;
3490 if (outer_matched_rows > 0)
3491 run_cost += outer_matched_rows * inner_rescan_run_cost * inner_scan_frac;
3494 if (outer_unmatched_rows > 0)
3495 run_cost += outer_unmatched_rows * inner_rescan_run_cost;
3503 ntuples = outer_path_rows * inner_path_rows;
3508 startup_cost += restrict_qual_cost.
startup;
3510 run_cost += cpu_per_tuple * ntuples;
3513 startup_cost += path->
jpath.path.pathtarget->cost.startup;
3514 run_cost += path->
jpath.path.pathtarget->cost.per_tuple * path->
jpath.path.rows;
3516 path->
jpath.path.startup_cost = startup_cost;
3517 path->
jpath.path.total_cost = startup_cost + run_cost;
3555 Path *outer_path,
Path *inner_path,
3556 List *outersortkeys,
List *innersortkeys,
3560 Cost startup_cost = 0;
3562 double outer_path_rows = outer_path->
rows;
3563 double inner_path_rows = inner_path->
rows;
3564 Cost inner_run_cost;
3577 if (outer_path_rows <= 0)
3578 outer_path_rows = 1;
3579 if (inner_path_rows <= 0)
3580 inner_path_rows = 1;
3593 if (mergeclauses && jointype !=
JOIN_FULL)
3603 opathkeys = outersortkeys ? outersortkeys : outer_path->
pathkeys;
3604 ipathkeys = innersortkeys ? innersortkeys : inner_path->
pathkeys;
3611 opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation ||
3614 elog(
ERROR,
"left and right pathkeys do not match in mergejoin");
3620 outer_path->parent->relids))
3639 outerstartsel = 0.0;
3645 innerstartsel = 0.0;
3652 outerstartsel = innerstartsel = 0.0;
3653 outerendsel = innerendsel = 1.0;
3660 outer_skip_rows = rint(outer_path_rows * outerstartsel);
3661 inner_skip_rows = rint(inner_path_rows * innerstartsel);
3665 Assert(outer_skip_rows <= outer_rows);
3666 Assert(inner_skip_rows <= inner_rows);
3673 outerstartsel = outer_skip_rows / outer_path_rows;
3674 innerstartsel = inner_skip_rows / inner_path_rows;
3675 outerendsel = outer_rows / outer_path_rows;
3676 innerendsel = inner_rows / inner_path_rows;
3678 Assert(outerstartsel <= outerendsel);
3679 Assert(innerstartsel <= innerendsel);
3687 bool use_incremental_sort =
false;
3703 if (presorted_keys > 0)
3704 use_incremental_sort =
true;
3707 if (!use_incremental_sort)
3715 outer_path->pathtarget->width,
3730 outer_path->pathtarget->width,
3740 * (outerendsel - outerstartsel);
3749 * (outerendsel - outerstartsel);
3765 inner_path->pathtarget->width,
3774 * (innerendsel - innerstartsel);
3783 * (innerendsel - innerstartsel);
3799 workspace->
total_cost = startup_cost + run_cost + inner_run_cost;
3843 double inner_path_rows = inner_path->
rows;
3858 double mergejointuples,
3866 if (inner_path_rows <= 0)
3867 inner_path_rows = 1;
3870 if (path->
jpath.path.param_info)
3871 path->
jpath.path.rows = path->
jpath.path.param_info->ppi_rows;
3873 path->
jpath.path.rows = path->
jpath.path.parent->rows;
3876 if (path->
jpath.path.parallel_workers > 0)
3880 path->
jpath.path.rows =
3941 rescannedtuples = 0;
3944 rescannedtuples = mergejointuples - inner_path_rows;
3946 if (rescannedtuples < 0)
3947 rescannedtuples = 0;
3955 rescanratio = 1.0 + (rescannedtuples / inner_rows);
3966 bare_inner_cost = inner_run_cost * rescanratio;
3981 mat_inner_cost = inner_run_cost +
4013 else if (innersortkeys ==
NIL &&
4030 inner_path->pathtarget->width) >
4038 run_cost += mat_inner_cost;
4040 run_cost += bare_inner_cost;
4049 startup_cost += merge_qual_cost.
startup;
4050 startup_cost += merge_qual_cost.
per_tuple *
4051 (outer_skip_rows + inner_skip_rows * rescanratio);
4053 ((outer_rows - outer_skip_rows) +
4054 (inner_rows - inner_skip_rows) * rescanratio);
4065 startup_cost += qp_qual_cost.
startup;
4067 run_cost += cpu_per_tuple * mergejointuples;
4070 startup_cost += path->
jpath.path.pathtarget->cost.startup;
4071 run_cost += path->
jpath.path.pathtarget->cost.per_tuple * path->
jpath.path.rows;
4073 path->
jpath.path.startup_cost = startup_cost;
4074 path->
jpath.path.total_cost = startup_cost + run_cost;
4092 foreach(lc, rinfo->scansel_cache)
4096 cache->
collation == pathkey->pk_eclass->ec_collation &&
4118 cache->
collation = pathkey->pk_eclass->ec_collation;
4126 rinfo->scansel_cache =
lappend(rinfo->scansel_cache, cache);
4163 Path *outer_path,
Path *inner_path,
4168 Cost startup_cost = 0;
4170 double outer_path_rows = outer_path->
rows;
4171 double inner_path_rows = inner_path->
rows;
4172 double inner_path_rows_total = inner_path_rows;
4177 size_t space_allowed;
4223 inner_path->pathtarget->width,
4241 double outerpages =
page_size(outer_path_rows,
4242 outer_path->pathtarget->width);
4243 double innerpages =
page_size(inner_path_rows,
4244 inner_path->pathtarget->width);
4255 workspace->
total_cost = startup_cost + run_cost;
4281 double outer_path_rows = outer_path->
rows;
4282 double inner_path_rows = inner_path->
rows;
4292 double hashjointuples;
4293 double virtualbuckets;
4302 if (path->
jpath.path.param_info)
4303 path->
jpath.path.rows = path->
jpath.path.param_info->ppi_rows;
4305 path->
jpath.path.rows = path->
jpath.path.parent->rows;
4308 if (path->
jpath.path.parallel_workers > 0)
4312 path->
jpath.path.rows =
4323 virtualbuckets = (double) numbuckets * (
double) numbatches;
4337 innerbucketsize = 1.0 / virtualbuckets;
4342 innerbucketsize = 1.0;
4344 foreach(hcl, hashclauses)
4359 inner_path->parent->relids))
4362 thisbucketsize = restrictinfo->right_bucketsize;
4363 if (thisbucketsize < 0)
4369 &restrictinfo->right_mcvfreq,
4370 &restrictinfo->right_bucketsize);
4371 thisbucketsize = restrictinfo->right_bucketsize;
4373 thismcvfreq = restrictinfo->right_mcvfreq;
4378 inner_path->parent->relids));
4380 thisbucketsize = restrictinfo->left_bucketsize;
4381 if (thisbucketsize < 0)
4387 &restrictinfo->left_mcvfreq,
4388 &restrictinfo->left_bucketsize);
4389 thisbucketsize = restrictinfo->left_bucketsize;
4391 thismcvfreq = restrictinfo->left_mcvfreq;
4394 if (innerbucketsize > thisbucketsize)
4395 innerbucketsize = thisbucketsize;
4396 if (innermcvfreq > thismcvfreq)
4397 innermcvfreq = thismcvfreq;
4428 double outer_matched_rows;
4446 startup_cost += hash_qual_cost.
startup;
4447 run_cost += hash_qual_cost.
per_tuple * outer_matched_rows *
4448 clamp_row_est(inner_path_rows * innerbucketsize * inner_scan_frac) * 0.5;
4464 (outer_path_rows - outer_matched_rows) *
4469 hashjointuples = outer_path_rows - outer_matched_rows;
4471 hashjointuples = outer_matched_rows;
4485 startup_cost += hash_qual_cost.
startup;
4486 run_cost += hash_qual_cost.
per_tuple * outer_path_rows *
4503 startup_cost += qp_qual_cost.
startup;
4505 run_cost += cpu_per_tuple * hashjointuples;
4508 startup_cost += path->
jpath.path.pathtarget->cost.startup;
4509 run_cost += path->
jpath.path.pathtarget->cost.per_tuple * path->
jpath.path.rows;
4511 path->
jpath.path.startup_cost = startup_cost;
4512 path->
jpath.path.total_cost = startup_cost + run_cost;
4562 Cost plan_run_cost =
plan->total_cost -
plan->startup_cost;
4573 sp_cost.
per_tuple += 0.50 * plan_run_cost;
4618 Cost *rescan_startup_cost,
4619 Cost *rescan_total_cost)
4623 case T_FunctionScan:
4632 *rescan_startup_cost = 0;
4641 if (((
HashPath *) path)->num_batches == 1)
4644 *rescan_startup_cost = 0;
4645 *rescan_total_cost = path->total_cost - path->startup_cost;
4650 *rescan_startup_cost = path->startup_cost;
4651 *rescan_total_cost = path->total_cost;
4655 case T_WorkTableScan:
4665 path->pathtarget->width);
4666 long work_mem_bytes =
work_mem * 1024L;
4668 if (nbytes > work_mem_bytes)
4671 double npages = ceil(nbytes / BLCKSZ);
4675 *rescan_startup_cost = 0;
4676 *rescan_total_cost = run_cost;
4692 path->pathtarget->width);
4693 long work_mem_bytes =
work_mem * 1024L;
4695 if (nbytes > work_mem_bytes)
4698 double npages = ceil(nbytes / BLCKSZ);
4702 *rescan_startup_cost = 0;
4703 *rescan_total_cost = run_cost;
4709 rescan_startup_cost, rescan_total_cost);
4712 *rescan_startup_cost = path->startup_cost;
4713 *rescan_total_cost = path->total_cost;
4750 *cost = context.
total;
4768 *cost = context.
total;
4787 if (rinfo->eval_cost.startup < 0)
4799 if (rinfo->orclause)
4808 if (rinfo->pseudoconstant)
4814 rinfo->eval_cost = locContext.
total;
4934 &iofunc, &typioparam);
4939 &iofunc, &typisvarlena);
4961 foreach(lc, rcexpr->opnos)
4982 elog(
ERROR,
"cannot handle unplanned sub-select");
5116 foreach(l, restrictlist)
5121 joinquals =
lappend(joinquals, rinfo);
5125 joinquals = restrictlist;
5165 avgmatch = nselec * innerrel->
rows / jselec;
5167 avgmatch =
Max(1.0, avgmatch);
5190 Relids joinrelids = joinpath->path.parent->relids;
5200 if (innerpath->param_info == NULL)
5207 case T_IndexOnlyScan:
5208 indexclauses = ((
IndexPath *) innerpath)->indexclauses;
5210 case T_BitmapHeapScan:
5216 indexclauses = ((
IndexPath *) bmqual)->indexclauses;
5238 foreach(lc, innerpath->param_info->ppi_clauses)
5243 innerpath->parent->relids,
5305 tuples = selec * outer_tuples * inner_tuples;
5356 List *param_clauses)
5376 if (nrows > rel->
rows)
5440 List *restrict_clauses)
5462 if (nrows > rel->
rows)
5525 foreach(l, restrictlist)
5530 pushedquals =
lappend(pushedquals, rinfo);
5532 joinquals =
lappend(joinquals, rinfo);
5576 nrows = outer_rows * inner_rows * fkselec * jselec;
5580 nrows = outer_rows * inner_rows * fkselec * jselec;
5581 if (nrows < outer_rows)
5586 nrows = outer_rows * inner_rows * fkselec * jselec;
5587 if (nrows < outer_rows)
5589 if (nrows < inner_rows)
5594 nrows = outer_rows * fkselec * jselec;
5598 nrows = outer_rows * (1.0 - fkselec * jselec);
5603 elog(
ERROR,
"unrecognized join type: %d", (
int) jointype);