PostgreSQL Source Code  git master
nodeHash.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * nodeHash.c
4  * Routines to hash relations for hashjoin
5  *
6  * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/executor/nodeHash.c
12  *
13  * See note on parallelism in nodeHashjoin.c.
14  *
15  *-------------------------------------------------------------------------
16  */
17 /*
18  * INTERFACE ROUTINES
19  * MultiExecHash - generate an in-memory hash table of the relation
20  * ExecInitHash - initialize node and subnodes
21  * ExecEndHash - shutdown node and subnodes
22  */
23 
24 #include "postgres.h"
25 
26 #include <math.h>
27 #include <limits.h>
28 
29 #include "access/htup_details.h"
30 #include "access/parallel.h"
31 #include "catalog/pg_statistic.h"
32 #include "commands/tablespace.h"
33 #include "executor/execdebug.h"
34 #include "executor/hashjoin.h"
35 #include "executor/nodeHash.h"
36 #include "executor/nodeHashjoin.h"
37 #include "miscadmin.h"
38 #include "pgstat.h"
39 #include "port/atomics.h"
40 #include "port/pg_bitutils.h"
41 #include "utils/dynahash.h"
42 #include "utils/guc.h"
43 #include "utils/lsyscache.h"
44 #include "utils/memutils.h"
45 #include "utils/syscache.h"
46 
47 static void ExecHashIncreaseNumBatches(HashJoinTable hashtable);
48 static void ExecHashIncreaseNumBuckets(HashJoinTable hashtable);
51 static void ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node,
52  int mcvsToUse);
53 static void ExecHashSkewTableInsert(HashJoinTable hashtable,
54  TupleTableSlot *slot,
55  uint32 hashvalue,
56  int bucketNumber);
57 static void ExecHashRemoveNextSkewBucket(HashJoinTable hashtable);
58 
59 static void *dense_alloc(HashJoinTable hashtable, Size size);
61  size_t size,
62  dsa_pointer *shared);
63 static void MultiExecPrivateHash(HashState *node);
64 static void MultiExecParallelHash(HashState *node);
66  int bucketno);
68  HashJoinTuple tuple);
69 static inline void ExecParallelHashPushTuple(dsa_pointer_atomic *head,
70  HashJoinTuple tuple,
71  dsa_pointer tuple_shared);
72 static void ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch);
74 static void ExecParallelHashRepartitionFirst(HashJoinTable hashtable);
75 static void ExecParallelHashRepartitionRest(HashJoinTable hashtable);
77  dsa_pointer *shared);
78 static bool ExecParallelHashTuplePrealloc(HashJoinTable hashtable,
79  int batchno,
80  size_t size);
81 static void ExecParallelHashMergeCounters(HashJoinTable hashtable);
83 
84 
85 /* ----------------------------------------------------------------
86  * ExecHash
87  *
88  * stub for pro forma compliance
89  * ----------------------------------------------------------------
90  */
91 static TupleTableSlot *
93 {
94  elog(ERROR, "Hash node does not support ExecProcNode call convention");
95  return NULL;
96 }
97 
98 /* ----------------------------------------------------------------
99  * MultiExecHash
100  *
101  * build hash table for hashjoin, doing partitioning if more
102  * than one batch is required.
103  * ----------------------------------------------------------------
104  */
105 Node *
107 {
108  /* must provide our own instrumentation support */
109  if (node->ps.instrument)
111 
112  if (node->parallel_state != NULL)
113  MultiExecParallelHash(node);
114  else
115  MultiExecPrivateHash(node);
116 
117  /* must provide our own instrumentation support */
118  if (node->ps.instrument)
120 
121  /*
122  * We do not return the hash table directly because it's not a subtype of
123  * Node, and so would violate the MultiExecProcNode API. Instead, our
124  * parent Hashjoin node is expected to know how to fish it out of our node
125  * state. Ugly but not really worth cleaning up, since Hashjoin knows
126  * quite a bit more about Hash besides that.
127  */
128  return NULL;
129 }
130 
131 /* ----------------------------------------------------------------
132  * MultiExecPrivateHash
133  *
134  * parallel-oblivious version, building a backend-private
135  * hash table and (if necessary) batch files.
136  * ----------------------------------------------------------------
137  */
138 static void
140 {
141  PlanState *outerNode;
142  List *hashkeys;
143  HashJoinTable hashtable;
144  TupleTableSlot *slot;
145  ExprContext *econtext;
146  uint32 hashvalue;
147 
148  /*
149  * get state info from node
150  */
151  outerNode = outerPlanState(node);
152  hashtable = node->hashtable;
153 
154  /*
155  * set expression context
156  */
157  hashkeys = node->hashkeys;
158  econtext = node->ps.ps_ExprContext;
159 
160  /*
161  * Get all tuples from the node below the Hash node and insert into the
162  * hash table (or temp files).
163  */
164  for (;;)
165  {
166  slot = ExecProcNode(outerNode);
167  if (TupIsNull(slot))
168  break;
169  /* We have to compute the hash value */
170  econtext->ecxt_outertuple = slot;
171  if (ExecHashGetHashValue(hashtable, econtext, hashkeys,
172  false, hashtable->keepNulls,
173  &hashvalue))
174  {
175  int bucketNumber;
176 
177  bucketNumber = ExecHashGetSkewBucket(hashtable, hashvalue);
178  if (bucketNumber != INVALID_SKEW_BUCKET_NO)
179  {
180  /* It's a skew tuple, so put it into that hash table */
181  ExecHashSkewTableInsert(hashtable, slot, hashvalue,
182  bucketNumber);
183  hashtable->skewTuples += 1;
184  }
185  else
186  {
187  /* Not subject to skew optimization, so insert normally */
188  ExecHashTableInsert(hashtable, slot, hashvalue);
189  }
190  hashtable->totalTuples += 1;
191  }
192  }
193 
194  /* resize the hash table if needed (NTUP_PER_BUCKET exceeded) */
195  if (hashtable->nbuckets != hashtable->nbuckets_optimal)
196  ExecHashIncreaseNumBuckets(hashtable);
197 
198  /* Account for the buckets in spaceUsed (reported in EXPLAIN ANALYZE) */
199  hashtable->spaceUsed += hashtable->nbuckets * sizeof(HashJoinTuple);
200  if (hashtable->spaceUsed > hashtable->spacePeak)
201  hashtable->spacePeak = hashtable->spaceUsed;
202 
203  hashtable->partialTuples = hashtable->totalTuples;
204 }
205 
206 /* ----------------------------------------------------------------
207  * MultiExecParallelHash
208  *
209  * parallel-aware version, building a shared hash table and
210  * (if necessary) batch files using the combined effort of
211  * a set of co-operating backends.
212  * ----------------------------------------------------------------
213  */
214 static void
216 {
217  ParallelHashJoinState *pstate;
218  PlanState *outerNode;
219  List *hashkeys;
220  HashJoinTable hashtable;
221  TupleTableSlot *slot;
222  ExprContext *econtext;
223  uint32 hashvalue;
224  Barrier *build_barrier;
225  int i;
226 
227  /*
228  * get state info from node
229  */
230  outerNode = outerPlanState(node);
231  hashtable = node->hashtable;
232 
233  /*
234  * set expression context
235  */
236  hashkeys = node->hashkeys;
237  econtext = node->ps.ps_ExprContext;
238 
239  /*
240  * Synchronize the parallel hash table build. At this stage we know that
241  * the shared hash table has been or is being set up by
242  * ExecHashTableCreate(), but we don't know if our peers have returned
243  * from there or are here in MultiExecParallelHash(), and if so how far
244  * through they are. To find out, we check the build_barrier phase then
245  * and jump to the right step in the build algorithm.
246  */
247  pstate = hashtable->parallel_state;
248  build_barrier = &pstate->build_barrier;
249  Assert(BarrierPhase(build_barrier) >= PHJ_BUILD_ALLOCATE);
250  switch (BarrierPhase(build_barrier))
251  {
252  case PHJ_BUILD_ALLOCATE:
253 
254  /*
255  * Either I just allocated the initial hash table in
256  * ExecHashTableCreate(), or someone else is doing that. Either
257  * way, wait for everyone to arrive here so we can proceed.
258  */
260  /* Fall through. */
261 
263 
264  /*
265  * It's time to begin hashing, or if we just arrived here then
266  * hashing is already underway, so join in that effort. While
267  * hashing we have to be prepared to help increase the number of
268  * batches or buckets at any time, and if we arrived here when
269  * that was already underway we'll have to help complete that work
270  * immediately so that it's safe to access batches and buckets
271  * below.
272  */
281  for (;;)
282  {
283  slot = ExecProcNode(outerNode);
284  if (TupIsNull(slot))
285  break;
286  econtext->ecxt_outertuple = slot;
287  if (ExecHashGetHashValue(hashtable, econtext, hashkeys,
288  false, hashtable->keepNulls,
289  &hashvalue))
290  ExecParallelHashTableInsert(hashtable, slot, hashvalue);
291  hashtable->partialTuples++;
292  }
293 
294  /*
295  * Make sure that any tuples we wrote to disk are visible to
296  * others before anyone tries to load them.
297  */
298  for (i = 0; i < hashtable->nbatch; ++i)
299  sts_end_write(hashtable->batches[i].inner_tuples);
300 
301  /*
302  * Update shared counters. We need an accurate total tuple count
303  * to control the empty table optimization.
304  */
306 
309 
310  /*
311  * Wait for everyone to finish building and flushing files and
312  * counters.
313  */
314  if (BarrierArriveAndWait(build_barrier,
316  {
317  /*
318  * Elect one backend to disable any further growth. Batches
319  * are now fixed. While building them we made sure they'd fit
320  * in our memory budget when we load them back in later (or we
321  * tried to do that and gave up because we detected extreme
322  * skew).
323  */
324  pstate->growth = PHJ_GROWTH_DISABLED;
325  }
326  }
327 
328  /*
329  * We're not yet attached to a batch. We all agree on the dimensions and
330  * number of inner tuples (for the empty table optimization).
331  */
332  hashtable->curbatch = -1;
333  hashtable->nbuckets = pstate->nbuckets;
334  hashtable->log2_nbuckets = my_log2(hashtable->nbuckets);
335  hashtable->totalTuples = pstate->total_tuples;
336 
337  /*
338  * Unless we're completely done and the batch state has been freed, make
339  * sure we have accessors.
340  */
341  if (BarrierPhase(build_barrier) < PHJ_BUILD_FREE)
343 
344  /*
345  * The next synchronization point is in ExecHashJoin's HJ_BUILD_HASHTABLE
346  * case, which will bring the build phase to PHJ_BUILD_RUN (if it isn't
347  * there already).
348  */
349  Assert(BarrierPhase(build_barrier) == PHJ_BUILD_HASH_OUTER ||
350  BarrierPhase(build_barrier) == PHJ_BUILD_RUN ||
351  BarrierPhase(build_barrier) == PHJ_BUILD_FREE);
352 }
353 
354 /* ----------------------------------------------------------------
355  * ExecInitHash
356  *
357  * Init routine for Hash node
358  * ----------------------------------------------------------------
359  */
360 HashState *
361 ExecInitHash(Hash *node, EState *estate, int eflags)
362 {
363  HashState *hashstate;
364 
365  /* check for unsupported flags */
366  Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
367 
368  /*
369  * create state structure
370  */
371  hashstate = makeNode(HashState);
372  hashstate->ps.plan = (Plan *) node;
373  hashstate->ps.state = estate;
374  hashstate->ps.ExecProcNode = ExecHash;
375  hashstate->hashtable = NULL;
376  hashstate->hashkeys = NIL; /* will be set by parent HashJoin */
377 
378  /*
379  * Miscellaneous initialization
380  *
381  * create expression context for node
382  */
383  ExecAssignExprContext(estate, &hashstate->ps);
384 
385  /*
386  * initialize child nodes
387  */
388  outerPlanState(hashstate) = ExecInitNode(outerPlan(node), estate, eflags);
389 
390  /*
391  * initialize our result slot and type. No need to build projection
392  * because this node doesn't do projections.
393  */
395  hashstate->ps.ps_ProjInfo = NULL;
396 
397  /*
398  * initialize child expressions
399  */
400  Assert(node->plan.qual == NIL);
401  hashstate->hashkeys =
402  ExecInitExprList(node->hashkeys, (PlanState *) hashstate);
403 
404  return hashstate;
405 }
406 
407 /* ---------------------------------------------------------------
408  * ExecEndHash
409  *
410  * clean up routine for Hash node
411  * ----------------------------------------------------------------
412  */
413 void
415 {
417 
418  /*
419  * free exprcontext
420  */
421  ExecFreeExprContext(&node->ps);
422 
423  /*
424  * shut down the subplan
425  */
426  outerPlan = outerPlanState(node);
428 }
429 
430 
431 /* ----------------------------------------------------------------
432  * ExecHashTableCreate
433  *
434  * create an empty hashtable data structure for hashjoin.
435  * ----------------------------------------------------------------
436  */
438 ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations, bool keepNulls)
439 {
440  Hash *node;
441  HashJoinTable hashtable;
442  Plan *outerNode;
443  size_t space_allowed;
444  int nbuckets;
445  int nbatch;
446  double rows;
447  int num_skew_mcvs;
448  int log2_nbuckets;
449  int nkeys;
450  int i;
451  ListCell *ho;
452  ListCell *hc;
453  MemoryContext oldcxt;
454 
455  /*
456  * Get information about the size of the relation to be hashed (it's the
457  * "outer" subtree of this node, but the inner relation of the hashjoin).
458  * Compute the appropriate size of the hash table.
459  */
460  node = (Hash *) state->ps.plan;
461  outerNode = outerPlan(node);
462 
463  /*
464  * If this is shared hash table with a partial plan, then we can't use
465  * outerNode->plan_rows to estimate its size. We need an estimate of the
466  * total number of rows across all copies of the partial plan.
467  */
468  rows = node->plan.parallel_aware ? node->rows_total : outerNode->plan_rows;
469 
470  ExecChooseHashTableSize(rows, outerNode->plan_width,
471  OidIsValid(node->skewTable),
472  state->parallel_state != NULL,
473  state->parallel_state != NULL ?
474  state->parallel_state->nparticipants - 1 : 0,
475  &space_allowed,
476  &nbuckets, &nbatch, &num_skew_mcvs);
477 
478  /* nbuckets must be a power of 2 */
479  log2_nbuckets = my_log2(nbuckets);
480  Assert(nbuckets == (1 << log2_nbuckets));
481 
482  /*
483  * Initialize the hash table control block.
484  *
485  * The hashtable control block is just palloc'd from the executor's
486  * per-query memory context. Everything else should be kept inside the
487  * subsidiary hashCxt or batchCxt.
488  */
489  hashtable = palloc_object(HashJoinTableData);
490  hashtable->nbuckets = nbuckets;
491  hashtable->nbuckets_original = nbuckets;
492  hashtable->nbuckets_optimal = nbuckets;
493  hashtable->log2_nbuckets = log2_nbuckets;
494  hashtable->log2_nbuckets_optimal = log2_nbuckets;
495  hashtable->buckets.unshared = NULL;
496  hashtable->keepNulls = keepNulls;
497  hashtable->skewEnabled = false;
498  hashtable->skewBucket = NULL;
499  hashtable->skewBucketLen = 0;
500  hashtable->nSkewBuckets = 0;
501  hashtable->skewBucketNums = NULL;
502  hashtable->nbatch = nbatch;
503  hashtable->curbatch = 0;
504  hashtable->nbatch_original = nbatch;
505  hashtable->nbatch_outstart = nbatch;
506  hashtable->growEnabled = true;
507  hashtable->totalTuples = 0;
508  hashtable->partialTuples = 0;
509  hashtable->skewTuples = 0;
510  hashtable->innerBatchFile = NULL;
511  hashtable->outerBatchFile = NULL;
512  hashtable->spaceUsed = 0;
513  hashtable->spacePeak = 0;
514  hashtable->spaceAllowed = space_allowed;
515  hashtable->spaceUsedSkew = 0;
516  hashtable->spaceAllowedSkew =
517  hashtable->spaceAllowed * SKEW_HASH_MEM_PERCENT / 100;
518  hashtable->chunks = NULL;
519  hashtable->current_chunk = NULL;
520  hashtable->parallel_state = state->parallel_state;
521  hashtable->area = state->ps.state->es_query_dsa;
522  hashtable->batches = NULL;
523 
524 #ifdef HJDEBUG
525  printf("Hashjoin %p: initial nbatch = %d, nbuckets = %d\n",
526  hashtable, nbatch, nbuckets);
527 #endif
528 
529  /*
530  * Create temporary memory contexts in which to keep the hashtable working
531  * storage. See notes in executor/hashjoin.h.
532  */
534  "HashTableContext",
536 
537  hashtable->batchCxt = AllocSetContextCreate(hashtable->hashCxt,
538  "HashBatchContext",
540 
541  /* Allocate data that will live for the life of the hashjoin */
542 
543  oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
544 
545  /*
546  * Get info about the hash functions to be used for each hash key. Also
547  * remember whether the join operators are strict.
548  */
549  nkeys = list_length(hashOperators);
550  hashtable->outer_hashfunctions = palloc_array(FmgrInfo, nkeys);
551  hashtable->inner_hashfunctions = palloc_array(FmgrInfo, nkeys);
552  hashtable->hashStrict = palloc_array(bool, nkeys);
553  hashtable->collations = palloc_array(Oid, nkeys);
554  i = 0;
555  forboth(ho, hashOperators, hc, hashCollations)
556  {
557  Oid hashop = lfirst_oid(ho);
558  Oid left_hashfn;
559  Oid right_hashfn;
560 
561  if (!get_op_hash_functions(hashop, &left_hashfn, &right_hashfn))
562  elog(ERROR, "could not find hash function for hash operator %u",
563  hashop);
564  fmgr_info(left_hashfn, &hashtable->outer_hashfunctions[i]);
565  fmgr_info(right_hashfn, &hashtable->inner_hashfunctions[i]);
566  hashtable->hashStrict[i] = op_strict(hashop);
567  hashtable->collations[i] = lfirst_oid(hc);
568  i++;
569  }
570 
571  if (nbatch > 1 && hashtable->parallel_state == NULL)
572  {
573  /*
574  * allocate and initialize the file arrays in hashCxt (not needed for
575  * parallel case which uses shared tuplestores instead of raw files)
576  */
577  hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
578  hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
579  /* The files will not be opened until needed... */
580  /* ... but make sure we have temp tablespaces established for them */
582  }
583 
584  MemoryContextSwitchTo(oldcxt);
585 
586  if (hashtable->parallel_state)
587  {
588  ParallelHashJoinState *pstate = hashtable->parallel_state;
589  Barrier *build_barrier;
590 
591  /*
592  * Attach to the build barrier. The corresponding detach operation is
593  * in ExecHashTableDetach. Note that we won't attach to the
594  * batch_barrier for batch 0 yet. We'll attach later and start it out
595  * in PHJ_BATCH_PROBE phase, because batch 0 is allocated up front and
596  * then loaded while hashing (the standard hybrid hash join
597  * algorithm), and we'll coordinate that using build_barrier.
598  */
599  build_barrier = &pstate->build_barrier;
600  BarrierAttach(build_barrier);
601 
602  /*
603  * So far we have no idea whether there are any other participants,
604  * and if so, what phase they are working on. The only thing we care
605  * about at this point is whether someone has already created the
606  * SharedHashJoinBatch objects and the hash table for batch 0. One
607  * backend will be elected to do that now if necessary.
608  */
609  if (BarrierPhase(build_barrier) == PHJ_BUILD_ELECT &&
611  {
612  pstate->nbatch = nbatch;
613  pstate->space_allowed = space_allowed;
614  pstate->growth = PHJ_GROWTH_OK;
615 
616  /* Set up the shared state for coordinating batches. */
617  ExecParallelHashJoinSetUpBatches(hashtable, nbatch);
618 
619  /*
620  * Allocate batch 0's hash table up front so we can load it
621  * directly while hashing.
622  */
623  pstate->nbuckets = nbuckets;
624  ExecParallelHashTableAlloc(hashtable, 0);
625  }
626 
627  /*
628  * The next Parallel Hash synchronization point is in
629  * MultiExecParallelHash(), which will progress it all the way to
630  * PHJ_BUILD_RUN. The caller must not return control from this
631  * executor node between now and then.
632  */
633  }
634  else
635  {
636  /*
637  * Prepare context for the first-scan space allocations; allocate the
638  * hashbucket array therein, and set each bucket "empty".
639  */
640  MemoryContextSwitchTo(hashtable->batchCxt);
641 
642  hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
643 
644  /*
645  * Set up for skew optimization, if possible and there's a need for
646  * more than one batch. (In a one-batch join, there's no point in
647  * it.)
648  */
649  if (nbatch > 1)
650  ExecHashBuildSkewHash(hashtable, node, num_skew_mcvs);
651 
652  MemoryContextSwitchTo(oldcxt);
653  }
654 
655  return hashtable;
656 }
657 
658 
659 /*
660  * Compute appropriate size for hashtable given the estimated size of the
661  * relation to be hashed (number of rows and average row width).
662  *
663  * This is exported so that the planner's costsize.c can use it.
664  */
665 
666 /* Target bucket loading (tuples per bucket) */
667 #define NTUP_PER_BUCKET 1
668 
669 void
670 ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
671  bool try_combined_hash_mem,
672  int parallel_workers,
673  size_t *space_allowed,
674  int *numbuckets,
675  int *numbatches,
676  int *num_skew_mcvs)
677 {
678  int tupsize;
679  double inner_rel_bytes;
680  size_t hash_table_bytes;
681  size_t bucket_bytes;
682  size_t max_pointers;
683  int nbatch = 1;
684  int nbuckets;
685  double dbuckets;
686 
687  /* Force a plausible relation size if no info */
688  if (ntuples <= 0.0)
689  ntuples = 1000.0;
690 
691  /*
692  * Estimate tupsize based on footprint of tuple in hashtable... note this
693  * does not allow for any palloc overhead. The manipulations of spaceUsed
694  * don't count palloc overhead either.
695  */
696  tupsize = HJTUPLE_OVERHEAD +
698  MAXALIGN(tupwidth);
699  inner_rel_bytes = ntuples * tupsize;
700 
701  /*
702  * Compute in-memory hashtable size limit from GUCs.
703  */
704  hash_table_bytes = get_hash_memory_limit();
705 
706  /*
707  * Parallel Hash tries to use the combined hash_mem of all workers to
708  * avoid the need to batch. If that won't work, it falls back to hash_mem
709  * per worker and tries to process batches in parallel.
710  */
711  if (try_combined_hash_mem)
712  {
713  /* Careful, this could overflow size_t */
714  double newlimit;
715 
716  newlimit = (double) hash_table_bytes * (double) (parallel_workers + 1);
717  newlimit = Min(newlimit, (double) SIZE_MAX);
718  hash_table_bytes = (size_t) newlimit;
719  }
720 
721  *space_allowed = hash_table_bytes;
722 
723  /*
724  * If skew optimization is possible, estimate the number of skew buckets
725  * that will fit in the memory allowed, and decrement the assumed space
726  * available for the main hash table accordingly.
727  *
728  * We make the optimistic assumption that each skew bucket will contain
729  * one inner-relation tuple. If that turns out to be low, we will recover
730  * at runtime by reducing the number of skew buckets.
731  *
732  * hashtable->skewBucket will have up to 8 times as many HashSkewBucket
733  * pointers as the number of MCVs we allow, since ExecHashBuildSkewHash
734  * will round up to the next power of 2 and then multiply by 4 to reduce
735  * collisions.
736  */
737  if (useskew)
738  {
739  size_t bytes_per_mcv;
740  size_t skew_mcvs;
741 
742  /*----------
743  * Compute number of MCVs we could hold in hash_table_bytes
744  *
745  * Divisor is:
746  * size of a hash tuple +
747  * worst-case size of skewBucket[] per MCV +
748  * size of skewBucketNums[] entry +
749  * size of skew bucket struct itself
750  *----------
751  */
752  bytes_per_mcv = tupsize +
753  (8 * sizeof(HashSkewBucket *)) +
754  sizeof(int) +
756  skew_mcvs = hash_table_bytes / bytes_per_mcv;
757 
758  /*
759  * Now scale by SKEW_HASH_MEM_PERCENT (we do it in this order so as
760  * not to worry about size_t overflow in the multiplication)
761  */
762  skew_mcvs = (skew_mcvs * SKEW_HASH_MEM_PERCENT) / 100;
763 
764  /* Now clamp to integer range */
765  skew_mcvs = Min(skew_mcvs, INT_MAX);
766 
767  *num_skew_mcvs = (int) skew_mcvs;
768 
769  /* Reduce hash_table_bytes by the amount needed for the skew table */
770  if (skew_mcvs > 0)
771  hash_table_bytes -= skew_mcvs * bytes_per_mcv;
772  }
773  else
774  *num_skew_mcvs = 0;
775 
776  /*
777  * Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when
778  * memory is filled, assuming a single batch; but limit the value so that
779  * the pointer arrays we'll try to allocate do not exceed hash_table_bytes
780  * nor MaxAllocSize.
781  *
782  * Note that both nbuckets and nbatch must be powers of 2 to make
783  * ExecHashGetBucketAndBatch fast.
784  */
785  max_pointers = hash_table_bytes / sizeof(HashJoinTuple);
786  max_pointers = Min(max_pointers, MaxAllocSize / sizeof(HashJoinTuple));
787  /* If max_pointers isn't a power of 2, must round it down to one */
788  max_pointers = pg_prevpower2_size_t(max_pointers);
789 
790  /* Also ensure we avoid integer overflow in nbatch and nbuckets */
791  /* (this step is redundant given the current value of MaxAllocSize) */
792  max_pointers = Min(max_pointers, INT_MAX / 2 + 1);
793 
794  dbuckets = ceil(ntuples / NTUP_PER_BUCKET);
795  dbuckets = Min(dbuckets, max_pointers);
796  nbuckets = (int) dbuckets;
797  /* don't let nbuckets be really small, though ... */
798  nbuckets = Max(nbuckets, 1024);
799  /* ... and force it to be a power of 2. */
800  nbuckets = pg_nextpower2_32(nbuckets);
801 
802  /*
803  * If there's not enough space to store the projected number of tuples and
804  * the required bucket headers, we will need multiple batches.
805  */
806  bucket_bytes = sizeof(HashJoinTuple) * nbuckets;
807  if (inner_rel_bytes + bucket_bytes > hash_table_bytes)
808  {
809  /* We'll need multiple batches */
810  size_t sbuckets;
811  double dbatch;
812  int minbatch;
813  size_t bucket_size;
814 
815  /*
816  * If Parallel Hash with combined hash_mem would still need multiple
817  * batches, we'll have to fall back to regular hash_mem budget.
818  */
819  if (try_combined_hash_mem)
820  {
821  ExecChooseHashTableSize(ntuples, tupwidth, useskew,
822  false, parallel_workers,
823  space_allowed,
824  numbuckets,
825  numbatches,
826  num_skew_mcvs);
827  return;
828  }
829 
830  /*
831  * Estimate the number of buckets we'll want to have when hash_mem is
832  * entirely full. Each bucket will contain a bucket pointer plus
833  * NTUP_PER_BUCKET tuples, whose projected size already includes
834  * overhead for the hash code, pointer to the next tuple, etc.
835  */
836  bucket_size = (tupsize * NTUP_PER_BUCKET + sizeof(HashJoinTuple));
837  if (hash_table_bytes <= bucket_size)
838  sbuckets = 1; /* avoid pg_nextpower2_size_t(0) */
839  else
840  sbuckets = pg_nextpower2_size_t(hash_table_bytes / bucket_size);
841  sbuckets = Min(sbuckets, max_pointers);
842  nbuckets = (int) sbuckets;
843  nbuckets = pg_nextpower2_32(nbuckets);
844  bucket_bytes = nbuckets * sizeof(HashJoinTuple);
845 
846  /*
847  * Buckets are simple pointers to hashjoin tuples, while tupsize
848  * includes the pointer, hash code, and MinimalTupleData. So buckets
849  * should never really exceed 25% of hash_mem (even for
850  * NTUP_PER_BUCKET=1); except maybe for hash_mem values that are not
851  * 2^N bytes, where we might get more because of doubling. So let's
852  * look for 50% here.
853  */
854  Assert(bucket_bytes <= hash_table_bytes / 2);
855 
856  /* Calculate required number of batches. */
857  dbatch = ceil(inner_rel_bytes / (hash_table_bytes - bucket_bytes));
858  dbatch = Min(dbatch, max_pointers);
859  minbatch = (int) dbatch;
860  nbatch = pg_nextpower2_32(Max(2, minbatch));
861  }
862 
863  Assert(nbuckets > 0);
864  Assert(nbatch > 0);
865 
866  *numbuckets = nbuckets;
867  *numbatches = nbatch;
868 }
869 
870 
871 /* ----------------------------------------------------------------
872  * ExecHashTableDestroy
873  *
874  * destroy a hash table
875  * ----------------------------------------------------------------
876  */
877 void
879 {
880  int i;
881 
882  /*
883  * Make sure all the temp files are closed. We skip batch 0, since it
884  * can't have any temp files (and the arrays might not even exist if
885  * nbatch is only 1). Parallel hash joins don't use these files.
886  */
887  if (hashtable->innerBatchFile != NULL)
888  {
889  for (i = 1; i < hashtable->nbatch; i++)
890  {
891  if (hashtable->innerBatchFile[i])
892  BufFileClose(hashtable->innerBatchFile[i]);
893  if (hashtable->outerBatchFile[i])
894  BufFileClose(hashtable->outerBatchFile[i]);
895  }
896  }
897 
898  /* Release working memory (batchCxt is a child, so it goes away too) */
899  MemoryContextDelete(hashtable->hashCxt);
900 
901  /* And drop the control block */
902  pfree(hashtable);
903 }
904 
905 /*
906  * ExecHashIncreaseNumBatches
907  * increase the original number of batches in order to reduce
908  * current memory consumption
909  */
910 static void
912 {
913  int oldnbatch = hashtable->nbatch;
914  int curbatch = hashtable->curbatch;
915  int nbatch;
916  MemoryContext oldcxt;
917  long ninmemory;
918  long nfreed;
919  HashMemoryChunk oldchunks;
920 
921  /* do nothing if we've decided to shut off growth */
922  if (!hashtable->growEnabled)
923  return;
924 
925  /* safety check to avoid overflow */
926  if (oldnbatch > Min(INT_MAX / 2, MaxAllocSize / (sizeof(void *) * 2)))
927  return;
928 
929  nbatch = oldnbatch * 2;
930  Assert(nbatch > 1);
931 
932 #ifdef HJDEBUG
933  printf("Hashjoin %p: increasing nbatch to %d because space = %zu\n",
934  hashtable, nbatch, hashtable->spaceUsed);
935 #endif
936 
937  oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
938 
939  if (hashtable->innerBatchFile == NULL)
940  {
941  /* we had no file arrays before */
942  hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
943  hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
944  /* time to establish the temp tablespaces, too */
946  }
947  else
948  {
949  /* enlarge arrays and zero out added entries */
950  hashtable->innerBatchFile = repalloc0_array(hashtable->innerBatchFile, BufFile *, oldnbatch, nbatch);
951  hashtable->outerBatchFile = repalloc0_array(hashtable->outerBatchFile, BufFile *, oldnbatch, nbatch);
952  }
953 
954  MemoryContextSwitchTo(oldcxt);
955 
956  hashtable->nbatch = nbatch;
957 
958  /*
959  * Scan through the existing hash table entries and dump out any that are
960  * no longer of the current batch.
961  */
962  ninmemory = nfreed = 0;
963 
964  /* If know we need to resize nbuckets, we can do it while rebatching. */
965  if (hashtable->nbuckets_optimal != hashtable->nbuckets)
966  {
967  /* we never decrease the number of buckets */
968  Assert(hashtable->nbuckets_optimal > hashtable->nbuckets);
969 
970  hashtable->nbuckets = hashtable->nbuckets_optimal;
971  hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
972 
973  hashtable->buckets.unshared =
974  repalloc_array(hashtable->buckets.unshared,
975  HashJoinTuple, hashtable->nbuckets);
976  }
977 
978  /*
979  * We will scan through the chunks directly, so that we can reset the
980  * buckets now and not have to keep track which tuples in the buckets have
981  * already been processed. We will free the old chunks as we go.
982  */
983  memset(hashtable->buckets.unshared, 0,
984  sizeof(HashJoinTuple) * hashtable->nbuckets);
985  oldchunks = hashtable->chunks;
986  hashtable->chunks = NULL;
987 
988  /* so, let's scan through the old chunks, and all tuples in each chunk */
989  while (oldchunks != NULL)
990  {
991  HashMemoryChunk nextchunk = oldchunks->next.unshared;
992 
993  /* position within the buffer (up to oldchunks->used) */
994  size_t idx = 0;
995 
996  /* process all tuples stored in this chunk (and then free it) */
997  while (idx < oldchunks->used)
998  {
999  HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(oldchunks) + idx);
1000  MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
1001  int hashTupleSize = (HJTUPLE_OVERHEAD + tuple->t_len);
1002  int bucketno;
1003  int batchno;
1004 
1005  ninmemory++;
1006  ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1007  &bucketno, &batchno);
1008 
1009  if (batchno == curbatch)
1010  {
1011  /* keep tuple in memory - copy it into the new chunk */
1012  HashJoinTuple copyTuple;
1013 
1014  copyTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
1015  memcpy(copyTuple, hashTuple, hashTupleSize);
1016 
1017  /* and add it back to the appropriate bucket */
1018  copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
1019  hashtable->buckets.unshared[bucketno] = copyTuple;
1020  }
1021  else
1022  {
1023  /* dump it out */
1024  Assert(batchno > curbatch);
1026  hashTuple->hashvalue,
1027  &hashtable->innerBatchFile[batchno]);
1028 
1029  hashtable->spaceUsed -= hashTupleSize;
1030  nfreed++;
1031  }
1032 
1033  /* next tuple in this chunk */
1034  idx += MAXALIGN(hashTupleSize);
1035 
1036  /* allow this loop to be cancellable */
1038  }
1039 
1040  /* we're done with this chunk - free it and proceed to the next one */
1041  pfree(oldchunks);
1042  oldchunks = nextchunk;
1043  }
1044 
1045 #ifdef HJDEBUG
1046  printf("Hashjoin %p: freed %ld of %ld tuples, space now %zu\n",
1047  hashtable, nfreed, ninmemory, hashtable->spaceUsed);
1048 #endif
1049 
1050  /*
1051  * If we dumped out either all or none of the tuples in the table, disable
1052  * further expansion of nbatch. This situation implies that we have
1053  * enough tuples of identical hashvalues to overflow spaceAllowed.
1054  * Increasing nbatch will not fix it since there's no way to subdivide the
1055  * group any more finely. We have to just gut it out and hope the server
1056  * has enough RAM.
1057  */
1058  if (nfreed == 0 || nfreed == ninmemory)
1059  {
1060  hashtable->growEnabled = false;
1061 #ifdef HJDEBUG
1062  printf("Hashjoin %p: disabling further increase of nbatch\n",
1063  hashtable);
1064 #endif
1065  }
1066 }
1067 
1068 /*
1069  * ExecParallelHashIncreaseNumBatches
1070  * Every participant attached to grow_batches_barrier must run this
1071  * function when it observes growth == PHJ_GROWTH_NEED_MORE_BATCHES.
1072  */
1073 static void
1075 {
1076  ParallelHashJoinState *pstate = hashtable->parallel_state;
1077 
1079 
1080  /*
1081  * It's unlikely, but we need to be prepared for new participants to show
1082  * up while we're in the middle of this operation so we need to switch on
1083  * barrier phase here.
1084  */
1086  {
1088 
1089  /*
1090  * Elect one participant to prepare to grow the number of batches.
1091  * This involves reallocating or resetting the buckets of batch 0
1092  * in preparation for all participants to begin repartitioning the
1093  * tuples.
1094  */
1097  {
1098  dsa_pointer_atomic *buckets;
1099  ParallelHashJoinBatch *old_batch0;
1100  int new_nbatch;
1101  int i;
1102 
1103  /* Move the old batch out of the way. */
1104  old_batch0 = hashtable->batches[0].shared;
1105  pstate->old_batches = pstate->batches;
1106  pstate->old_nbatch = hashtable->nbatch;
1107  pstate->batches = InvalidDsaPointer;
1108 
1109  /* Free this backend's old accessors. */
1111 
1112  /* Figure out how many batches to use. */
1113  if (hashtable->nbatch == 1)
1114  {
1115  /*
1116  * We are going from single-batch to multi-batch. We need
1117  * to switch from one large combined memory budget to the
1118  * regular hash_mem budget.
1119  */
1121 
1122  /*
1123  * The combined hash_mem of all participants wasn't
1124  * enough. Therefore one batch per participant would be
1125  * approximately equivalent and would probably also be
1126  * insufficient. So try two batches per participant,
1127  * rounded up to a power of two.
1128  */
1129  new_nbatch = pg_nextpower2_32(pstate->nparticipants * 2);
1130  }
1131  else
1132  {
1133  /*
1134  * We were already multi-batched. Try doubling the number
1135  * of batches.
1136  */
1137  new_nbatch = hashtable->nbatch * 2;
1138  }
1139 
1140  /* Allocate new larger generation of batches. */
1141  Assert(hashtable->nbatch == pstate->nbatch);
1142  ExecParallelHashJoinSetUpBatches(hashtable, new_nbatch);
1143  Assert(hashtable->nbatch == pstate->nbatch);
1144 
1145  /* Replace or recycle batch 0's bucket array. */
1146  if (pstate->old_nbatch == 1)
1147  {
1148  double dtuples;
1149  double dbuckets;
1150  int new_nbuckets;
1151 
1152  /*
1153  * We probably also need a smaller bucket array. How many
1154  * tuples do we expect per batch, assuming we have only
1155  * half of them so far? Normally we don't need to change
1156  * the bucket array's size, because the size of each batch
1157  * stays the same as we add more batches, but in this
1158  * special case we move from a large batch to many smaller
1159  * batches and it would be wasteful to keep the large
1160  * array.
1161  */
1162  dtuples = (old_batch0->ntuples * 2.0) / new_nbatch;
1163  dbuckets = ceil(dtuples / NTUP_PER_BUCKET);
1164  dbuckets = Min(dbuckets,
1165  MaxAllocSize / sizeof(dsa_pointer_atomic));
1166  new_nbuckets = (int) dbuckets;
1167  new_nbuckets = Max(new_nbuckets, 1024);
1168  new_nbuckets = pg_nextpower2_32(new_nbuckets);
1169  dsa_free(hashtable->area, old_batch0->buckets);
1170  hashtable->batches[0].shared->buckets =
1171  dsa_allocate(hashtable->area,
1172  sizeof(dsa_pointer_atomic) * new_nbuckets);
1173  buckets = (dsa_pointer_atomic *)
1174  dsa_get_address(hashtable->area,
1175  hashtable->batches[0].shared->buckets);
1176  for (i = 0; i < new_nbuckets; ++i)
1178  pstate->nbuckets = new_nbuckets;
1179  }
1180  else
1181  {
1182  /* Recycle the existing bucket array. */
1183  hashtable->batches[0].shared->buckets = old_batch0->buckets;
1184  buckets = (dsa_pointer_atomic *)
1185  dsa_get_address(hashtable->area, old_batch0->buckets);
1186  for (i = 0; i < hashtable->nbuckets; ++i)
1188  }
1189 
1190  /* Move all chunks to the work queue for parallel processing. */
1191  pstate->chunk_work_queue = old_batch0->chunks;
1192 
1193  /* Disable further growth temporarily while we're growing. */
1194  pstate->growth = PHJ_GROWTH_DISABLED;
1195  }
1196  else
1197  {
1198  /* All other participants just flush their tuples to disk. */
1200  }
1201  /* Fall through. */
1202 
1204  /* Wait for the above to be finished. */
1207  /* Fall through. */
1208 
1210  /* Make sure that we have the current dimensions and buckets. */
1213  /* Then partition, flush counters. */
1216  ExecParallelHashMergeCounters(hashtable);
1217  /* Wait for the above to be finished. */
1220  /* Fall through. */
1221 
1223 
1224  /*
1225  * Elect one participant to clean up and decide whether further
1226  * repartitioning is needed, or should be disabled because it's
1227  * not helping.
1228  */
1231  {
1232  bool space_exhausted = false;
1233  bool extreme_skew_detected = false;
1234 
1235  /* Make sure that we have the current dimensions and buckets. */
1238 
1239  /* Are any of the new generation of batches exhausted? */
1240  for (int i = 0; i < hashtable->nbatch; ++i)
1241  {
1242  ParallelHashJoinBatch *batch = hashtable->batches[i].shared;
1243 
1244  if (batch->space_exhausted ||
1245  batch->estimated_size > pstate->space_allowed)
1246  {
1247  int parent;
1248 
1249  space_exhausted = true;
1250 
1251  /*
1252  * Did this batch receive ALL of the tuples from its
1253  * parent batch? That would indicate that further
1254  * repartitioning isn't going to help (the hash values
1255  * are probably all the same).
1256  */
1257  parent = i % pstate->old_nbatch;
1258  if (batch->ntuples == hashtable->batches[parent].shared->old_ntuples)
1259  extreme_skew_detected = true;
1260  }
1261  }
1262 
1263  /* Don't keep growing if it's not helping or we'd overflow. */
1264  if (extreme_skew_detected || hashtable->nbatch >= INT_MAX / 2)
1265  pstate->growth = PHJ_GROWTH_DISABLED;
1266  else if (space_exhausted)
1268  else
1269  pstate->growth = PHJ_GROWTH_OK;
1270 
1271  /* Free the old batches in shared memory. */
1272  dsa_free(hashtable->area, pstate->old_batches);
1273  pstate->old_batches = InvalidDsaPointer;
1274  }
1275  /* Fall through. */
1276 
1278  /* Wait for the above to complete. */
1281  }
1282 }
1283 
1284 /*
1285  * Repartition the tuples currently loaded into memory for inner batch 0
1286  * because the number of batches has been increased. Some tuples are retained
1287  * in memory and some are written out to a later batch.
1288  */
1289 static void
1291 {
1292  dsa_pointer chunk_shared;
1293  HashMemoryChunk chunk;
1294 
1295  Assert(hashtable->nbatch == hashtable->parallel_state->nbatch);
1296 
1297  while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_shared)))
1298  {
1299  size_t idx = 0;
1300 
1301  /* Repartition all tuples in this chunk. */
1302  while (idx < chunk->used)
1303  {
1304  HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
1305  MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
1306  HashJoinTuple copyTuple;
1307  dsa_pointer shared;
1308  int bucketno;
1309  int batchno;
1310 
1311  ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1312  &bucketno, &batchno);
1313 
1314  Assert(batchno < hashtable->nbatch);
1315  if (batchno == 0)
1316  {
1317  /* It still belongs in batch 0. Copy to a new chunk. */
1318  copyTuple =
1319  ExecParallelHashTupleAlloc(hashtable,
1320  HJTUPLE_OVERHEAD + tuple->t_len,
1321  &shared);
1322  copyTuple->hashvalue = hashTuple->hashvalue;
1323  memcpy(HJTUPLE_MINTUPLE(copyTuple), tuple, tuple->t_len);
1324  ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1325  copyTuple, shared);
1326  }
1327  else
1328  {
1329  size_t tuple_size =
1330  MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
1331 
1332  /* It belongs in a later batch. */
1333  hashtable->batches[batchno].estimated_size += tuple_size;
1334  sts_puttuple(hashtable->batches[batchno].inner_tuples,
1335  &hashTuple->hashvalue, tuple);
1336  }
1337 
1338  /* Count this tuple. */
1339  ++hashtable->batches[0].old_ntuples;
1340  ++hashtable->batches[batchno].ntuples;
1341 
1343  HJTUPLE_MINTUPLE(hashTuple)->t_len);
1344  }
1345 
1346  /* Free this chunk. */
1347  dsa_free(hashtable->area, chunk_shared);
1348 
1350  }
1351 }
1352 
1353 /*
1354  * Help repartition inner batches 1..n.
1355  */
1356 static void
1358 {
1359  ParallelHashJoinState *pstate = hashtable->parallel_state;
1360  int old_nbatch = pstate->old_nbatch;
1361  SharedTuplestoreAccessor **old_inner_tuples;
1362  ParallelHashJoinBatch *old_batches;
1363  int i;
1364 
1365  /* Get our hands on the previous generation of batches. */
1366  old_batches = (ParallelHashJoinBatch *)
1367  dsa_get_address(hashtable->area, pstate->old_batches);
1368  old_inner_tuples = palloc0_array(SharedTuplestoreAccessor *, old_nbatch);
1369  for (i = 1; i < old_nbatch; ++i)
1370  {
1371  ParallelHashJoinBatch *shared =
1372  NthParallelHashJoinBatch(old_batches, i);
1373 
1374  old_inner_tuples[i] = sts_attach(ParallelHashJoinBatchInner(shared),
1376  &pstate->fileset);
1377  }
1378 
1379  /* Join in the effort to repartition them. */
1380  for (i = 1; i < old_nbatch; ++i)
1381  {
1382  MinimalTuple tuple;
1383  uint32 hashvalue;
1384 
1385  /* Scan one partition from the previous generation. */
1386  sts_begin_parallel_scan(old_inner_tuples[i]);
1387  while ((tuple = sts_parallel_scan_next(old_inner_tuples[i], &hashvalue)))
1388  {
1389  size_t tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
1390  int bucketno;
1391  int batchno;
1392 
1393  /* Decide which partition it goes to in the new generation. */
1394  ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno,
1395  &batchno);
1396 
1397  hashtable->batches[batchno].estimated_size += tuple_size;
1398  ++hashtable->batches[batchno].ntuples;
1399  ++hashtable->batches[i].old_ntuples;
1400 
1401  /* Store the tuple its new batch. */
1402  sts_puttuple(hashtable->batches[batchno].inner_tuples,
1403  &hashvalue, tuple);
1404 
1406  }
1407  sts_end_parallel_scan(old_inner_tuples[i]);
1408  }
1409 
1410  pfree(old_inner_tuples);
1411 }
1412 
1413 /*
1414  * Transfer the backend-local per-batch counters to the shared totals.
1415  */
1416 static void
1418 {
1419  ParallelHashJoinState *pstate = hashtable->parallel_state;
1420  int i;
1421 
1422  LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
1423  pstate->total_tuples = 0;
1424  for (i = 0; i < hashtable->nbatch; ++i)
1425  {
1426  ParallelHashJoinBatchAccessor *batch = &hashtable->batches[i];
1427 
1428  batch->shared->size += batch->size;
1429  batch->shared->estimated_size += batch->estimated_size;
1430  batch->shared->ntuples += batch->ntuples;
1431  batch->shared->old_ntuples += batch->old_ntuples;
1432  batch->size = 0;
1433  batch->estimated_size = 0;
1434  batch->ntuples = 0;
1435  batch->old_ntuples = 0;
1436  pstate->total_tuples += batch->shared->ntuples;
1437  }
1438  LWLockRelease(&pstate->lock);
1439 }
1440 
1441 /*
1442  * ExecHashIncreaseNumBuckets
1443  * increase the original number of buckets in order to reduce
1444  * number of tuples per bucket
1445  */
1446 static void
1448 {
1449  HashMemoryChunk chunk;
1450 
1451  /* do nothing if not an increase (it's called increase for a reason) */
1452  if (hashtable->nbuckets >= hashtable->nbuckets_optimal)
1453  return;
1454 
1455 #ifdef HJDEBUG
1456  printf("Hashjoin %p: increasing nbuckets %d => %d\n",
1457  hashtable, hashtable->nbuckets, hashtable->nbuckets_optimal);
1458 #endif
1459 
1460  hashtable->nbuckets = hashtable->nbuckets_optimal;
1461  hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
1462 
1463  Assert(hashtable->nbuckets > 1);
1464  Assert(hashtable->nbuckets <= (INT_MAX / 2));
1465  Assert(hashtable->nbuckets == (1 << hashtable->log2_nbuckets));
1466 
1467  /*
1468  * Just reallocate the proper number of buckets - we don't need to walk
1469  * through them - we can walk the dense-allocated chunks (just like in
1470  * ExecHashIncreaseNumBatches, but without all the copying into new
1471  * chunks)
1472  */
1473  hashtable->buckets.unshared =
1474  repalloc_array(hashtable->buckets.unshared,
1475  HashJoinTuple, hashtable->nbuckets);
1476 
1477  memset(hashtable->buckets.unshared, 0,
1478  hashtable->nbuckets * sizeof(HashJoinTuple));
1479 
1480  /* scan through all tuples in all chunks to rebuild the hash table */
1481  for (chunk = hashtable->chunks; chunk != NULL; chunk = chunk->next.unshared)
1482  {
1483  /* process all tuples stored in this chunk */
1484  size_t idx = 0;
1485 
1486  while (idx < chunk->used)
1487  {
1488  HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
1489  int bucketno;
1490  int batchno;
1491 
1492  ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1493  &bucketno, &batchno);
1494 
1495  /* add the tuple to the proper bucket */
1496  hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
1497  hashtable->buckets.unshared[bucketno] = hashTuple;
1498 
1499  /* advance index past the tuple */
1501  HJTUPLE_MINTUPLE(hashTuple)->t_len);
1502  }
1503 
1504  /* allow this loop to be cancellable */
1506  }
1507 }
1508 
1509 static void
1511 {
1512  ParallelHashJoinState *pstate = hashtable->parallel_state;
1513  int i;
1514  HashMemoryChunk chunk;
1515  dsa_pointer chunk_s;
1516 
1518 
1519  /*
1520  * It's unlikely, but we need to be prepared for new participants to show
1521  * up while we're in the middle of this operation so we need to switch on
1522  * barrier phase here.
1523  */
1525  {
1527  /* Elect one participant to prepare to increase nbuckets. */
1530  {
1531  size_t size;
1532  dsa_pointer_atomic *buckets;
1533 
1534  /* Double the size of the bucket array. */
1535  pstate->nbuckets *= 2;
1536  size = pstate->nbuckets * sizeof(dsa_pointer_atomic);
1537  hashtable->batches[0].shared->size += size / 2;
1538  dsa_free(hashtable->area, hashtable->batches[0].shared->buckets);
1539  hashtable->batches[0].shared->buckets =
1540  dsa_allocate(hashtable->area, size);
1541  buckets = (dsa_pointer_atomic *)
1542  dsa_get_address(hashtable->area,
1543  hashtable->batches[0].shared->buckets);
1544  for (i = 0; i < pstate->nbuckets; ++i)
1546 
1547  /* Put the chunk list onto the work queue. */
1548  pstate->chunk_work_queue = hashtable->batches[0].shared->chunks;
1549 
1550  /* Clear the flag. */
1551  pstate->growth = PHJ_GROWTH_OK;
1552  }
1553  /* Fall through. */
1554 
1556  /* Wait for the above to complete. */
1559  /* Fall through. */
1560 
1562  /* Reinsert all tuples into the hash table. */
1565  while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_s)))
1566  {
1567  size_t idx = 0;
1568 
1569  while (idx < chunk->used)
1570  {
1571  HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
1572  dsa_pointer shared = chunk_s + HASH_CHUNK_HEADER_SIZE + idx;
1573  int bucketno;
1574  int batchno;
1575 
1576  ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1577  &bucketno, &batchno);
1578  Assert(batchno == 0);
1579 
1580  /* add the tuple to the proper bucket */
1581  ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1582  hashTuple, shared);
1583 
1584  /* advance index past the tuple */
1586  HJTUPLE_MINTUPLE(hashTuple)->t_len);
1587  }
1588 
1589  /* allow this loop to be cancellable */
1591  }
1594  }
1595 }
1596 
1597 /*
1598  * ExecHashTableInsert
1599  * insert a tuple into the hash table depending on the hash value
1600  * it may just go to a temp file for later batches
1601  *
1602  * Note: the passed TupleTableSlot may contain a regular, minimal, or virtual
1603  * tuple; the minimal case in particular is certain to happen while reloading
1604  * tuples from batch files. We could save some cycles in the regular-tuple
1605  * case by not forcing the slot contents into minimal form; not clear if it's
1606  * worth the messiness required.
1607  */
1608 void
1610  TupleTableSlot *slot,
1611  uint32 hashvalue)
1612 {
1613  bool shouldFree;
1614  MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
1615  int bucketno;
1616  int batchno;
1617 
1618  ExecHashGetBucketAndBatch(hashtable, hashvalue,
1619  &bucketno, &batchno);
1620 
1621  /*
1622  * decide whether to put the tuple in the hash table or a temp file
1623  */
1624  if (batchno == hashtable->curbatch)
1625  {
1626  /*
1627  * put the tuple in hash table
1628  */
1629  HashJoinTuple hashTuple;
1630  int hashTupleSize;
1631  double ntuples = (hashtable->totalTuples - hashtable->skewTuples);
1632 
1633  /* Create the HashJoinTuple */
1634  hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
1635  hashTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
1636 
1637  hashTuple->hashvalue = hashvalue;
1638  memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
1639 
1640  /*
1641  * We always reset the tuple-matched flag on insertion. This is okay
1642  * even when reloading a tuple from a batch file, since the tuple
1643  * could not possibly have been matched to an outer tuple before it
1644  * went into the batch file.
1645  */
1647 
1648  /* Push it onto the front of the bucket's list */
1649  hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
1650  hashtable->buckets.unshared[bucketno] = hashTuple;
1651 
1652  /*
1653  * Increase the (optimal) number of buckets if we just exceeded the
1654  * NTUP_PER_BUCKET threshold, but only when there's still a single
1655  * batch.
1656  */
1657  if (hashtable->nbatch == 1 &&
1658  ntuples > (hashtable->nbuckets_optimal * NTUP_PER_BUCKET))
1659  {
1660  /* Guard against integer overflow and alloc size overflow */
1661  if (hashtable->nbuckets_optimal <= INT_MAX / 2 &&
1662  hashtable->nbuckets_optimal * 2 <= MaxAllocSize / sizeof(HashJoinTuple))
1663  {
1664  hashtable->nbuckets_optimal *= 2;
1665  hashtable->log2_nbuckets_optimal += 1;
1666  }
1667  }
1668 
1669  /* Account for space used, and back off if we've used too much */
1670  hashtable->spaceUsed += hashTupleSize;
1671  if (hashtable->spaceUsed > hashtable->spacePeak)
1672  hashtable->spacePeak = hashtable->spaceUsed;
1673  if (hashtable->spaceUsed +
1674  hashtable->nbuckets_optimal * sizeof(HashJoinTuple)
1675  > hashtable->spaceAllowed)
1676  ExecHashIncreaseNumBatches(hashtable);
1677  }
1678  else
1679  {
1680  /*
1681  * put the tuple into a temp file for later batches
1682  */
1683  Assert(batchno > hashtable->curbatch);
1684  ExecHashJoinSaveTuple(tuple,
1685  hashvalue,
1686  &hashtable->innerBatchFile[batchno]);
1687  }
1688 
1689  if (shouldFree)
1690  heap_free_minimal_tuple(tuple);
1691 }
1692 
1693 /*
1694  * ExecParallelHashTableInsert
1695  * insert a tuple into a shared hash table or shared batch tuplestore
1696  */
1697 void
1699  TupleTableSlot *slot,
1700  uint32 hashvalue)
1701 {
1702  bool shouldFree;
1703  MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
1704  dsa_pointer shared;
1705  int bucketno;
1706  int batchno;
1707 
1708 retry:
1709  ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
1710 
1711  if (batchno == 0)
1712  {
1713  HashJoinTuple hashTuple;
1714 
1715  /* Try to load it into memory. */
1718  hashTuple = ExecParallelHashTupleAlloc(hashtable,
1719  HJTUPLE_OVERHEAD + tuple->t_len,
1720  &shared);
1721  if (hashTuple == NULL)
1722  goto retry;
1723 
1724  /* Store the hash value in the HashJoinTuple header. */
1725  hashTuple->hashvalue = hashvalue;
1726  memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
1727 
1728  /* Push it onto the front of the bucket's list */
1729  ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1730  hashTuple, shared);
1731  }
1732  else
1733  {
1734  size_t tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
1735 
1736  Assert(batchno > 0);
1737 
1738  /* Try to preallocate space in the batch if necessary. */
1739  if (hashtable->batches[batchno].preallocated < tuple_size)
1740  {
1741  if (!ExecParallelHashTuplePrealloc(hashtable, batchno, tuple_size))
1742  goto retry;
1743  }
1744 
1745  Assert(hashtable->batches[batchno].preallocated >= tuple_size);
1746  hashtable->batches[batchno].preallocated -= tuple_size;
1747  sts_puttuple(hashtable->batches[batchno].inner_tuples, &hashvalue,
1748  tuple);
1749  }
1750  ++hashtable->batches[batchno].ntuples;
1751 
1752  if (shouldFree)
1753  heap_free_minimal_tuple(tuple);
1754 }
1755 
1756 /*
1757  * Insert a tuple into the current hash table. Unlike
1758  * ExecParallelHashTableInsert, this version is not prepared to send the tuple
1759  * to other batches or to run out of memory, and should only be called with
1760  * tuples that belong in the current batch once growth has been disabled.
1761  */
1762 void
1764  TupleTableSlot *slot,
1765  uint32 hashvalue)
1766 {
1767  bool shouldFree;
1768  MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
1769  HashJoinTuple hashTuple;
1770  dsa_pointer shared;
1771  int batchno;
1772  int bucketno;
1773 
1774  ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
1775  Assert(batchno == hashtable->curbatch);
1776  hashTuple = ExecParallelHashTupleAlloc(hashtable,
1777  HJTUPLE_OVERHEAD + tuple->t_len,
1778  &shared);
1779  hashTuple->hashvalue = hashvalue;
1780  memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
1782  ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1783  hashTuple, shared);
1784 
1785  if (shouldFree)
1786  heap_free_minimal_tuple(tuple);
1787 }
1788 
1789 /*
1790  * ExecHashGetHashValue
1791  * Compute the hash value for a tuple
1792  *
1793  * The tuple to be tested must be in econtext->ecxt_outertuple (thus Vars in
1794  * the hashkeys expressions need to have OUTER_VAR as varno). If outer_tuple
1795  * is false (meaning it's the HashJoin's inner node, Hash), econtext,
1796  * hashkeys, and slot need to be from Hash, with hashkeys/slot referencing and
1797  * being suitable for tuples from the node below the Hash. Conversely, if
1798  * outer_tuple is true, econtext is from HashJoin, and hashkeys/slot need to
1799  * be appropriate for tuples from HashJoin's outer node.
1800  *
1801  * A true result means the tuple's hash value has been successfully computed
1802  * and stored at *hashvalue. A false result means the tuple cannot match
1803  * because it contains a null attribute, and hence it should be discarded
1804  * immediately. (If keep_nulls is true then false is never returned.)
1805  */
1806 bool
1808  ExprContext *econtext,
1809  List *hashkeys,
1810  bool outer_tuple,
1811  bool keep_nulls,
1812  uint32 *hashvalue)
1813 {
1814  uint32 hashkey = 0;
1815  FmgrInfo *hashfunctions;
1816  ListCell *hk;
1817  int i = 0;
1818  MemoryContext oldContext;
1819 
1820  /*
1821  * We reset the eval context each time to reclaim any memory leaked in the
1822  * hashkey expressions.
1823  */
1824  ResetExprContext(econtext);
1825 
1826  oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
1827 
1828  if (outer_tuple)
1829  hashfunctions = hashtable->outer_hashfunctions;
1830  else
1831  hashfunctions = hashtable->inner_hashfunctions;
1832 
1833  foreach(hk, hashkeys)
1834  {
1835  ExprState *keyexpr = (ExprState *) lfirst(hk);
1836  Datum keyval;
1837  bool isNull;
1838 
1839  /* combine successive hashkeys by rotating */
1840  hashkey = pg_rotate_left32(hashkey, 1);
1841 
1842  /*
1843  * Get the join attribute value of the tuple
1844  */
1845  keyval = ExecEvalExpr(keyexpr, econtext, &isNull);
1846 
1847  /*
1848  * If the attribute is NULL, and the join operator is strict, then
1849  * this tuple cannot pass the join qual so we can reject it
1850  * immediately (unless we're scanning the outside of an outer join, in
1851  * which case we must not reject it). Otherwise we act like the
1852  * hashcode of NULL is zero (this will support operators that act like
1853  * IS NOT DISTINCT, though not any more-random behavior). We treat
1854  * the hash support function as strict even if the operator is not.
1855  *
1856  * Note: currently, all hashjoinable operators must be strict since
1857  * the hash index AM assumes that. However, it takes so little extra
1858  * code here to allow non-strict that we may as well do it.
1859  */
1860  if (isNull)
1861  {
1862  if (hashtable->hashStrict[i] && !keep_nulls)
1863  {
1864  MemoryContextSwitchTo(oldContext);
1865  return false; /* cannot match */
1866  }
1867  /* else, leave hashkey unmodified, equivalent to hashcode 0 */
1868  }
1869  else
1870  {
1871  /* Compute the hash function */
1872  uint32 hkey;
1873 
1874  hkey = DatumGetUInt32(FunctionCall1Coll(&hashfunctions[i], hashtable->collations[i], keyval));
1875  hashkey ^= hkey;
1876  }
1877 
1878  i++;
1879  }
1880 
1881  MemoryContextSwitchTo(oldContext);
1882 
1883  *hashvalue = hashkey;
1884  return true;
1885 }
1886 
1887 /*
1888  * ExecHashGetBucketAndBatch
1889  * Determine the bucket number and batch number for a hash value
1890  *
1891  * Note: on-the-fly increases of nbatch must not change the bucket number
1892  * for a given hash code (since we don't move tuples to different hash
1893  * chains), and must only cause the batch number to remain the same or
1894  * increase. Our algorithm is
1895  * bucketno = hashvalue MOD nbuckets
1896  * batchno = ROR(hashvalue, log2_nbuckets) MOD nbatch
1897  * where nbuckets and nbatch are both expected to be powers of 2, so we can
1898  * do the computations by shifting and masking. (This assumes that all hash
1899  * functions are good about randomizing all their output bits, else we are
1900  * likely to have very skewed bucket or batch occupancy.)
1901  *
1902  * nbuckets and log2_nbuckets may change while nbatch == 1 because of dynamic
1903  * bucket count growth. Once we start batching, the value is fixed and does
1904  * not change over the course of the join (making it possible to compute batch
1905  * number the way we do here).
1906  *
1907  * nbatch is always a power of 2; we increase it only by doubling it. This
1908  * effectively adds one more bit to the top of the batchno. In very large
1909  * joins, we might run out of bits to add, so we do this by rotating the hash
1910  * value. This causes batchno to steal bits from bucketno when the number of
1911  * virtual buckets exceeds 2^32. It's better to have longer bucket chains
1912  * than to lose the ability to divide batches.
1913  */
1914 void
1916  uint32 hashvalue,
1917  int *bucketno,
1918  int *batchno)
1919 {
1920  uint32 nbuckets = (uint32) hashtable->nbuckets;
1921  uint32 nbatch = (uint32) hashtable->nbatch;
1922 
1923  if (nbatch > 1)
1924  {
1925  *bucketno = hashvalue & (nbuckets - 1);
1926  *batchno = pg_rotate_right32(hashvalue,
1927  hashtable->log2_nbuckets) & (nbatch - 1);
1928  }
1929  else
1930  {
1931  *bucketno = hashvalue & (nbuckets - 1);
1932  *batchno = 0;
1933  }
1934 }
1935 
1936 /*
1937  * ExecScanHashBucket
1938  * scan a hash bucket for matches to the current outer tuple
1939  *
1940  * The current outer tuple must be stored in econtext->ecxt_outertuple.
1941  *
1942  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
1943  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
1944  * for the latter.
1945  */
1946 bool
1948  ExprContext *econtext)
1949 {
1950  ExprState *hjclauses = hjstate->hashclauses;
1951  HashJoinTable hashtable = hjstate->hj_HashTable;
1952  HashJoinTuple hashTuple = hjstate->hj_CurTuple;
1953  uint32 hashvalue = hjstate->hj_CurHashValue;
1954 
1955  /*
1956  * hj_CurTuple is the address of the tuple last returned from the current
1957  * bucket, or NULL if it's time to start scanning a new bucket.
1958  *
1959  * If the tuple hashed to a skew bucket then scan the skew bucket
1960  * otherwise scan the standard hashtable bucket.
1961  */
1962  if (hashTuple != NULL)
1963  hashTuple = hashTuple->next.unshared;
1964  else if (hjstate->hj_CurSkewBucketNo != INVALID_SKEW_BUCKET_NO)
1965  hashTuple = hashtable->skewBucket[hjstate->hj_CurSkewBucketNo]->tuples;
1966  else
1967  hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
1968 
1969  while (hashTuple != NULL)
1970  {
1971  if (hashTuple->hashvalue == hashvalue)
1972  {
1973  TupleTableSlot *inntuple;
1974 
1975  /* insert hashtable's tuple into exec slot so ExecQual sees it */
1976  inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
1977  hjstate->hj_HashTupleSlot,
1978  false); /* do not pfree */
1979  econtext->ecxt_innertuple = inntuple;
1980 
1981  if (ExecQualAndReset(hjclauses, econtext))
1982  {
1983  hjstate->hj_CurTuple = hashTuple;
1984  return true;
1985  }
1986  }
1987 
1988  hashTuple = hashTuple->next.unshared;
1989  }
1990 
1991  /*
1992  * no match
1993  */
1994  return false;
1995 }
1996 
1997 /*
1998  * ExecParallelScanHashBucket
1999  * scan a hash bucket for matches to the current outer tuple
2000  *
2001  * The current outer tuple must be stored in econtext->ecxt_outertuple.
2002  *
2003  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
2004  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
2005  * for the latter.
2006  */
2007 bool
2009  ExprContext *econtext)
2010 {
2011  ExprState *hjclauses = hjstate->hashclauses;
2012  HashJoinTable hashtable = hjstate->hj_HashTable;
2013  HashJoinTuple hashTuple = hjstate->hj_CurTuple;
2014  uint32 hashvalue = hjstate->hj_CurHashValue;
2015 
2016  /*
2017  * hj_CurTuple is the address of the tuple last returned from the current
2018  * bucket, or NULL if it's time to start scanning a new bucket.
2019  */
2020  if (hashTuple != NULL)
2021  hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2022  else
2023  hashTuple = ExecParallelHashFirstTuple(hashtable,
2024  hjstate->hj_CurBucketNo);
2025 
2026  while (hashTuple != NULL)
2027  {
2028  if (hashTuple->hashvalue == hashvalue)
2029  {
2030  TupleTableSlot *inntuple;
2031 
2032  /* insert hashtable's tuple into exec slot so ExecQual sees it */
2033  inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2034  hjstate->hj_HashTupleSlot,
2035  false); /* do not pfree */
2036  econtext->ecxt_innertuple = inntuple;
2037 
2038  if (ExecQualAndReset(hjclauses, econtext))
2039  {
2040  hjstate->hj_CurTuple = hashTuple;
2041  return true;
2042  }
2043  }
2044 
2045  hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2046  }
2047 
2048  /*
2049  * no match
2050  */
2051  return false;
2052 }
2053 
2054 /*
2055  * ExecPrepHashTableForUnmatched
2056  * set up for a series of ExecScanHashTableForUnmatched calls
2057  */
2058 void
2060 {
2061  /*----------
2062  * During this scan we use the HashJoinState fields as follows:
2063  *
2064  * hj_CurBucketNo: next regular bucket to scan
2065  * hj_CurSkewBucketNo: next skew bucket (an index into skewBucketNums)
2066  * hj_CurTuple: last tuple returned, or NULL to start next bucket
2067  *----------
2068  */
2069  hjstate->hj_CurBucketNo = 0;
2070  hjstate->hj_CurSkewBucketNo = 0;
2071  hjstate->hj_CurTuple = NULL;
2072 }
2073 
2074 /*
2075  * Decide if this process is allowed to run the unmatched scan. If so, the
2076  * batch barrier is advanced to PHJ_BATCH_SCAN and true is returned.
2077  * Otherwise the batch is detached and false is returned.
2078  */
2079 bool
2081 {
2082  HashJoinTable hashtable = hjstate->hj_HashTable;
2083  int curbatch = hashtable->curbatch;
2084  ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
2085 
2087 
2088  /*
2089  * It would not be deadlock-free to wait on the batch barrier, because it
2090  * is in PHJ_BATCH_PROBE phase, and thus processes attached to it have
2091  * already emitted tuples. Therefore, we'll hold a wait-free election:
2092  * only one process can continue to the next phase, and all others detach
2093  * from this batch. They can still go any work on other batches, if there
2094  * are any.
2095  */
2097  {
2098  /* This process considers the batch to be done. */
2099  hashtable->batches[hashtable->curbatch].done = true;
2100 
2101  /* Make sure any temporary files are closed. */
2102  sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
2103  sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
2104 
2105  /*
2106  * Track largest batch we've seen, which would normally happen in
2107  * ExecHashTableDetachBatch().
2108  */
2109  hashtable->spacePeak =
2110  Max(hashtable->spacePeak,
2111  batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
2112  hashtable->curbatch = -1;
2113  return false;
2114  }
2115 
2116  /* Now we are alone with this batch. */
2118  Assert(BarrierParticipants(&batch->batch_barrier) == 1);
2119 
2120  /*
2121  * Has another process decided to give up early and command all processes
2122  * to skip the unmatched scan?
2123  */
2124  if (batch->skip_unmatched)
2125  {
2126  hashtable->batches[hashtable->curbatch].done = true;
2127  ExecHashTableDetachBatch(hashtable);
2128  return false;
2129  }
2130 
2131  /* Now prepare the process local state, just as for non-parallel join. */
2133 
2134  return true;
2135 }
2136 
2137 /*
2138  * ExecScanHashTableForUnmatched
2139  * scan the hash table for unmatched inner tuples
2140  *
2141  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
2142  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
2143  * for the latter.
2144  */
2145 bool
2147 {
2148  HashJoinTable hashtable = hjstate->hj_HashTable;
2149  HashJoinTuple hashTuple = hjstate->hj_CurTuple;
2150 
2151  for (;;)
2152  {
2153  /*
2154  * hj_CurTuple is the address of the tuple last returned from the
2155  * current bucket, or NULL if it's time to start scanning a new
2156  * bucket.
2157  */
2158  if (hashTuple != NULL)
2159  hashTuple = hashTuple->next.unshared;
2160  else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
2161  {
2162  hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
2163  hjstate->hj_CurBucketNo++;
2164  }
2165  else if (hjstate->hj_CurSkewBucketNo < hashtable->nSkewBuckets)
2166  {
2167  int j = hashtable->skewBucketNums[hjstate->hj_CurSkewBucketNo];
2168 
2169  hashTuple = hashtable->skewBucket[j]->tuples;
2170  hjstate->hj_CurSkewBucketNo++;
2171  }
2172  else
2173  break; /* finished all buckets */
2174 
2175  while (hashTuple != NULL)
2176  {
2177  if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
2178  {
2179  TupleTableSlot *inntuple;
2180 
2181  /* insert hashtable's tuple into exec slot */
2182  inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2183  hjstate->hj_HashTupleSlot,
2184  false); /* do not pfree */
2185  econtext->ecxt_innertuple = inntuple;
2186 
2187  /*
2188  * Reset temp memory each time; although this function doesn't
2189  * do any qual eval, the caller will, so let's keep it
2190  * parallel to ExecScanHashBucket.
2191  */
2192  ResetExprContext(econtext);
2193 
2194  hjstate->hj_CurTuple = hashTuple;
2195  return true;
2196  }
2197 
2198  hashTuple = hashTuple->next.unshared;
2199  }
2200 
2201  /* allow this loop to be cancellable */
2203  }
2204 
2205  /*
2206  * no more unmatched tuples
2207  */
2208  return false;
2209 }
2210 
2211 /*
2212  * ExecParallelScanHashTableForUnmatched
2213  * scan the hash table for unmatched inner tuples, in parallel join
2214  *
2215  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
2216  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
2217  * for the latter.
2218  */
2219 bool
2221  ExprContext *econtext)
2222 {
2223  HashJoinTable hashtable = hjstate->hj_HashTable;
2224  HashJoinTuple hashTuple = hjstate->hj_CurTuple;
2225 
2226  for (;;)
2227  {
2228  /*
2229  * hj_CurTuple is the address of the tuple last returned from the
2230  * current bucket, or NULL if it's time to start scanning a new
2231  * bucket.
2232  */
2233  if (hashTuple != NULL)
2234  hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2235  else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
2236  hashTuple = ExecParallelHashFirstTuple(hashtable,
2237  hjstate->hj_CurBucketNo++);
2238  else
2239  break; /* finished all buckets */
2240 
2241  while (hashTuple != NULL)
2242  {
2243  if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
2244  {
2245  TupleTableSlot *inntuple;
2246 
2247  /* insert hashtable's tuple into exec slot */
2248  inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2249  hjstate->hj_HashTupleSlot,
2250  false); /* do not pfree */
2251  econtext->ecxt_innertuple = inntuple;
2252 
2253  /*
2254  * Reset temp memory each time; although this function doesn't
2255  * do any qual eval, the caller will, so let's keep it
2256  * parallel to ExecScanHashBucket.
2257  */
2258  ResetExprContext(econtext);
2259 
2260  hjstate->hj_CurTuple = hashTuple;
2261  return true;
2262  }
2263 
2264  hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2265  }
2266 
2267  /* allow this loop to be cancellable */
2269  }
2270 
2271  /*
2272  * no more unmatched tuples
2273  */
2274  return false;
2275 }
2276 
2277 /*
2278  * ExecHashTableReset
2279  *
2280  * reset hash table header for new batch
2281  */
2282 void
2284 {
2285  MemoryContext oldcxt;
2286  int nbuckets = hashtable->nbuckets;
2287 
2288  /*
2289  * Release all the hash buckets and tuples acquired in the prior pass, and
2290  * reinitialize the context for a new pass.
2291  */
2292  MemoryContextReset(hashtable->batchCxt);
2293  oldcxt = MemoryContextSwitchTo(hashtable->batchCxt);
2294 
2295  /* Reallocate and reinitialize the hash bucket headers. */
2296  hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
2297 
2298  hashtable->spaceUsed = 0;
2299 
2300  MemoryContextSwitchTo(oldcxt);
2301 
2302  /* Forget the chunks (the memory was freed by the context reset above). */
2303  hashtable->chunks = NULL;
2304 }
2305 
2306 /*
2307  * ExecHashTableResetMatchFlags
2308  * Clear all the HeapTupleHeaderHasMatch flags in the table
2309  */
2310 void
2312 {
2313  HashJoinTuple tuple;
2314  int i;
2315 
2316  /* Reset all flags in the main table ... */
2317  for (i = 0; i < hashtable->nbuckets; i++)
2318  {
2319  for (tuple = hashtable->buckets.unshared[i]; tuple != NULL;
2320  tuple = tuple->next.unshared)
2322  }
2323 
2324  /* ... and the same for the skew buckets, if any */
2325  for (i = 0; i < hashtable->nSkewBuckets; i++)
2326  {
2327  int j = hashtable->skewBucketNums[i];
2328  HashSkewBucket *skewBucket = hashtable->skewBucket[j];
2329 
2330  for (tuple = skewBucket->tuples; tuple != NULL; tuple = tuple->next.unshared)
2332  }
2333 }
2334 
2335 
2336 void
2338 {
2340 
2341  /*
2342  * if chgParam of subnode is not null then plan will be re-scanned by
2343  * first ExecProcNode.
2344  */
2345  if (outerPlan->chgParam == NULL)
2347 }
2348 
2349 
2350 /*
2351  * ExecHashBuildSkewHash
2352  *
2353  * Set up for skew optimization if we can identify the most common values
2354  * (MCVs) of the outer relation's join key. We make a skew hash bucket
2355  * for the hash value of each MCV, up to the number of slots allowed
2356  * based on available memory.
2357  */
2358 static void
2359 ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node, int mcvsToUse)
2360 {
2361  HeapTupleData *statsTuple;
2362  AttStatsSlot sslot;
2363 
2364  /* Do nothing if planner didn't identify the outer relation's join key */
2365  if (!OidIsValid(node->skewTable))
2366  return;
2367  /* Also, do nothing if we don't have room for at least one skew bucket */
2368  if (mcvsToUse <= 0)
2369  return;
2370 
2371  /*
2372  * Try to find the MCV statistics for the outer relation's join key.
2373  */
2374  statsTuple = SearchSysCache3(STATRELATTINH,
2375  ObjectIdGetDatum(node->skewTable),
2376  Int16GetDatum(node->skewColumn),
2377  BoolGetDatum(node->skewInherit));
2378  if (!HeapTupleIsValid(statsTuple))
2379  return;
2380 
2381  if (get_attstatsslot(&sslot, statsTuple,
2382  STATISTIC_KIND_MCV, InvalidOid,
2384  {
2385  double frac;
2386  int nbuckets;
2387  FmgrInfo *hashfunctions;
2388  int i;
2389 
2390  if (mcvsToUse > sslot.nvalues)
2391  mcvsToUse = sslot.nvalues;
2392 
2393  /*
2394  * Calculate the expected fraction of outer relation that will
2395  * participate in the skew optimization. If this isn't at least
2396  * SKEW_MIN_OUTER_FRACTION, don't use skew optimization.
2397  */
2398  frac = 0;
2399  for (i = 0; i < mcvsToUse; i++)
2400  frac += sslot.numbers[i];
2401  if (frac < SKEW_MIN_OUTER_FRACTION)
2402  {
2403  free_attstatsslot(&sslot);
2404  ReleaseSysCache(statsTuple);
2405  return;
2406  }
2407 
2408  /*
2409  * Okay, set up the skew hashtable.
2410  *
2411  * skewBucket[] is an open addressing hashtable with a power of 2 size
2412  * that is greater than the number of MCV values. (This ensures there
2413  * will be at least one null entry, so searches will always
2414  * terminate.)
2415  *
2416  * Note: this code could fail if mcvsToUse exceeds INT_MAX/8 or
2417  * MaxAllocSize/sizeof(void *)/8, but that is not currently possible
2418  * since we limit pg_statistic entries to much less than that.
2419  */
2420  nbuckets = pg_nextpower2_32(mcvsToUse + 1);
2421  /* use two more bits just to help avoid collisions */
2422  nbuckets <<= 2;
2423 
2424  hashtable->skewEnabled = true;
2425  hashtable->skewBucketLen = nbuckets;
2426 
2427  /*
2428  * We allocate the bucket memory in the hashtable's batch context. It
2429  * is only needed during the first batch, and this ensures it will be
2430  * automatically removed once the first batch is done.
2431  */
2432  hashtable->skewBucket = (HashSkewBucket **)
2433  MemoryContextAllocZero(hashtable->batchCxt,
2434  nbuckets * sizeof(HashSkewBucket *));
2435  hashtable->skewBucketNums = (int *)
2436  MemoryContextAllocZero(hashtable->batchCxt,
2437  mcvsToUse * sizeof(int));
2438 
2439  hashtable->spaceUsed += nbuckets * sizeof(HashSkewBucket *)
2440  + mcvsToUse * sizeof(int);
2441  hashtable->spaceUsedSkew += nbuckets * sizeof(HashSkewBucket *)
2442  + mcvsToUse * sizeof(int);
2443  if (hashtable->spaceUsed > hashtable->spacePeak)
2444  hashtable->spacePeak = hashtable->spaceUsed;
2445 
2446  /*
2447  * Create a skew bucket for each MCV hash value.
2448  *
2449  * Note: it is very important that we create the buckets in order of
2450  * decreasing MCV frequency. If we have to remove some buckets, they
2451  * must be removed in reverse order of creation (see notes in
2452  * ExecHashRemoveNextSkewBucket) and we want the least common MCVs to
2453  * be removed first.
2454  */
2455  hashfunctions = hashtable->outer_hashfunctions;
2456 
2457  for (i = 0; i < mcvsToUse; i++)
2458  {
2459  uint32 hashvalue;
2460  int bucket;
2461 
2462  hashvalue = DatumGetUInt32(FunctionCall1Coll(&hashfunctions[0],
2463  hashtable->collations[0],
2464  sslot.values[i]));
2465 
2466  /*
2467  * While we have not hit a hole in the hashtable and have not hit
2468  * the desired bucket, we have collided with some previous hash
2469  * value, so try the next bucket location. NB: this code must
2470  * match ExecHashGetSkewBucket.
2471  */
2472  bucket = hashvalue & (nbuckets - 1);
2473  while (hashtable->skewBucket[bucket] != NULL &&
2474  hashtable->skewBucket[bucket]->hashvalue != hashvalue)
2475  bucket = (bucket + 1) & (nbuckets - 1);
2476 
2477  /*
2478  * If we found an existing bucket with the same hashvalue, leave
2479  * it alone. It's okay for two MCVs to share a hashvalue.
2480  */
2481  if (hashtable->skewBucket[bucket] != NULL)
2482  continue;
2483 
2484  /* Okay, create a new skew bucket for this hashvalue. */
2485  hashtable->skewBucket[bucket] = (HashSkewBucket *)
2486  MemoryContextAlloc(hashtable->batchCxt,
2487  sizeof(HashSkewBucket));
2488  hashtable->skewBucket[bucket]->hashvalue = hashvalue;
2489  hashtable->skewBucket[bucket]->tuples = NULL;
2490  hashtable->skewBucketNums[hashtable->nSkewBuckets] = bucket;
2491  hashtable->nSkewBuckets++;
2492  hashtable->spaceUsed += SKEW_BUCKET_OVERHEAD;
2493  hashtable->spaceUsedSkew += SKEW_BUCKET_OVERHEAD;
2494  if (hashtable->spaceUsed > hashtable->spacePeak)
2495  hashtable->spacePeak = hashtable->spaceUsed;
2496  }
2497 
2498  free_attstatsslot(&sslot);
2499  }
2500 
2501  ReleaseSysCache(statsTuple);
2502 }
2503 
2504 /*
2505  * ExecHashGetSkewBucket
2506  *
2507  * Returns the index of the skew bucket for this hashvalue,
2508  * or INVALID_SKEW_BUCKET_NO if the hashvalue is not
2509  * associated with any active skew bucket.
2510  */
2511 int
2513 {
2514  int bucket;
2515 
2516  /*
2517  * Always return INVALID_SKEW_BUCKET_NO if not doing skew optimization (in
2518  * particular, this happens after the initial batch is done).
2519  */
2520  if (!hashtable->skewEnabled)
2521  return INVALID_SKEW_BUCKET_NO;
2522 
2523  /*
2524  * Since skewBucketLen is a power of 2, we can do a modulo by ANDing.
2525  */
2526  bucket = hashvalue & (hashtable->skewBucketLen - 1);
2527 
2528  /*
2529  * While we have not hit a hole in the hashtable and have not hit the
2530  * desired bucket, we have collided with some other hash value, so try the
2531  * next bucket location.
2532  */
2533  while (hashtable->skewBucket[bucket] != NULL &&
2534  hashtable->skewBucket[bucket]->hashvalue != hashvalue)
2535  bucket = (bucket + 1) & (hashtable->skewBucketLen - 1);
2536 
2537  /*
2538  * Found the desired bucket?
2539  */
2540  if (hashtable->skewBucket[bucket] != NULL)
2541  return bucket;
2542 
2543  /*
2544  * There must not be any hashtable entry for this hash value.
2545  */
2546  return INVALID_SKEW_BUCKET_NO;
2547 }
2548 
2549 /*
2550  * ExecHashSkewTableInsert
2551  *
2552  * Insert a tuple into the skew hashtable.
2553  *
2554  * This should generally match up with the current-batch case in
2555  * ExecHashTableInsert.
2556  */
2557 static void
2559  TupleTableSlot *slot,
2560  uint32 hashvalue,
2561  int bucketNumber)
2562 {
2563  bool shouldFree;
2564  MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
2565  HashJoinTuple hashTuple;
2566  int hashTupleSize;
2567 
2568  /* Create the HashJoinTuple */
2569  hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
2570  hashTuple = (HashJoinTuple) MemoryContextAlloc(hashtable->batchCxt,
2571  hashTupleSize);
2572  hashTuple->hashvalue = hashvalue;
2573  memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
2575 
2576  /* Push it onto the front of the skew bucket's list */
2577  hashTuple->next.unshared = hashtable->skewBucket[bucketNumber]->tuples;
2578  hashtable->skewBucket[bucketNumber]->tuples = hashTuple;
2579  Assert(hashTuple != hashTuple->next.unshared);
2580 
2581  /* Account for space used, and back off if we've used too much */
2582  hashtable->spaceUsed += hashTupleSize;
2583  hashtable->spaceUsedSkew += hashTupleSize;
2584  if (hashtable->spaceUsed > hashtable->spacePeak)
2585  hashtable->spacePeak = hashtable->spaceUsed;
2586  while (hashtable->spaceUsedSkew > hashtable->spaceAllowedSkew)
2587  ExecHashRemoveNextSkewBucket(hashtable);
2588 
2589  /* Check we are not over the total spaceAllowed, either */
2590  if (hashtable->spaceUsed > hashtable->spaceAllowed)
2591  ExecHashIncreaseNumBatches(hashtable);
2592 
2593  if (shouldFree)
2594  heap_free_minimal_tuple(tuple);
2595 }
2596 
2597 /*
2598  * ExecHashRemoveNextSkewBucket
2599  *
2600  * Remove the least valuable skew bucket by pushing its tuples into
2601  * the main hash table.
2602  */
2603 static void
2605 {
2606  int bucketToRemove;
2607  HashSkewBucket *bucket;
2608  uint32 hashvalue;
2609  int bucketno;
2610  int batchno;
2611  HashJoinTuple hashTuple;
2612 
2613  /* Locate the bucket to remove */
2614  bucketToRemove = hashtable->skewBucketNums[hashtable->nSkewBuckets - 1];
2615  bucket = hashtable->skewBucket[bucketToRemove];
2616 
2617  /*
2618  * Calculate which bucket and batch the tuples belong to in the main
2619  * hashtable. They all have the same hash value, so it's the same for all
2620  * of them. Also note that it's not possible for nbatch to increase while
2621  * we are processing the tuples.
2622  */
2623  hashvalue = bucket->hashvalue;
2624  ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
2625 
2626  /* Process all tuples in the bucket */
2627  hashTuple = bucket->tuples;
2628  while (hashTuple != NULL)
2629  {
2630  HashJoinTuple nextHashTuple = hashTuple->next.unshared;
2631  MinimalTuple tuple;
2632  Size tupleSize;
2633 
2634  /*
2635  * This code must agree with ExecHashTableInsert. We do not use
2636  * ExecHashTableInsert directly as ExecHashTableInsert expects a
2637  * TupleTableSlot while we already have HashJoinTuples.
2638  */
2639  tuple = HJTUPLE_MINTUPLE(hashTuple);
2640  tupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
2641 
2642  /* Decide whether to put the tuple in the hash table or a temp file */
2643  if (batchno == hashtable->curbatch)
2644  {
2645  /* Move the tuple to the main hash table */
2646  HashJoinTuple copyTuple;
2647 
2648  /*
2649  * We must copy the tuple into the dense storage, else it will not
2650  * be found by, eg, ExecHashIncreaseNumBatches.
2651  */
2652  copyTuple = (HashJoinTuple) dense_alloc(hashtable, tupleSize);
2653  memcpy(copyTuple, hashTuple, tupleSize);
2654  pfree(hashTuple);
2655 
2656  copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
2657  hashtable->buckets.unshared[bucketno] = copyTuple;
2658 
2659  /* We have reduced skew space, but overall space doesn't change */
2660  hashtable->spaceUsedSkew -= tupleSize;
2661  }
2662  else
2663  {
2664  /* Put the tuple into a temp file for later batches */
2665  Assert(batchno > hashtable->curbatch);
2666  ExecHashJoinSaveTuple(tuple, hashvalue,
2667  &hashtable->innerBatchFile[batchno]);
2668  pfree(hashTuple);
2669  hashtable->spaceUsed -= tupleSize;
2670  hashtable->spaceUsedSkew -= tupleSize;
2671  }
2672 
2673  hashTuple = nextHashTuple;
2674 
2675  /* allow this loop to be cancellable */
2677  }
2678 
2679  /*
2680  * Free the bucket struct itself and reset the hashtable entry to NULL.
2681  *
2682  * NOTE: this is not nearly as simple as it looks on the surface, because
2683  * of the possibility of collisions in the hashtable. Suppose that hash
2684  * values A and B collide at a particular hashtable entry, and that A was
2685  * entered first so B gets shifted to a different table entry. If we were
2686  * to remove A first then ExecHashGetSkewBucket would mistakenly start
2687  * reporting that B is not in the hashtable, because it would hit the NULL
2688  * before finding B. However, we always remove entries in the reverse
2689  * order of creation, so this failure cannot happen.
2690  */
2691  hashtable->skewBucket[bucketToRemove] = NULL;
2692  hashtable->nSkewBuckets--;
2693  pfree(bucket);
2694  hashtable->spaceUsed -= SKEW_BUCKET_OVERHEAD;
2695  hashtable->spaceUsedSkew -= SKEW_BUCKET_OVERHEAD;
2696 
2697  /*
2698  * If we have removed all skew buckets then give up on skew optimization.
2699  * Release the arrays since they aren't useful any more.
2700  */
2701  if (hashtable->nSkewBuckets == 0)
2702  {
2703  hashtable->skewEnabled = false;
2704  pfree(hashtable->skewBucket);
2705  pfree(hashtable->skewBucketNums);
2706  hashtable->skewBucket = NULL;
2707  hashtable->skewBucketNums = NULL;
2708  hashtable->spaceUsed -= hashtable->spaceUsedSkew;
2709  hashtable->spaceUsedSkew = 0;
2710  }
2711 }
2712 
2713 /*
2714  * Reserve space in the DSM segment for instrumentation data.
2715  */
2716 void
2718 {
2719  size_t size;
2720 
2721  /* don't need this if not instrumenting or no workers */
2722  if (!node->ps.instrument || pcxt->nworkers == 0)
2723  return;
2724 
2725  size = mul_size(pcxt->nworkers, sizeof(HashInstrumentation));
2726  size = add_size(size, offsetof(SharedHashInfo, hinstrument));
2727  shm_toc_estimate_chunk(&pcxt->estimator, size);
2728  shm_toc_estimate_keys(&pcxt->estimator, 1);
2729 }
2730 
2731 /*
2732  * Set up a space in the DSM for all workers to record instrumentation data
2733  * about their hash table.
2734  */
2735 void
2737 {
2738  size_t size;
2739 
2740  /* don't need this if not instrumenting or no workers */
2741  if (!node->ps.instrument || pcxt->nworkers == 0)
2742  return;
2743 
2744  size = offsetof(SharedHashInfo, hinstrument) +
2745  pcxt->nworkers * sizeof(HashInstrumentation);
2746  node->shared_info = (SharedHashInfo *) shm_toc_allocate(pcxt->toc, size);
2747 
2748  /* Each per-worker area must start out as zeroes. */
2749  memset(node->shared_info, 0, size);
2750 
2751  node->shared_info->num_workers = pcxt->nworkers;
2752  shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id,
2753  node->shared_info);
2754 }
2755 
2756 /*
2757  * Locate the DSM space for hash table instrumentation data that we'll write
2758  * to at shutdown time.
2759  */
2760 void
2762 {
2763  SharedHashInfo *shared_info;
2764 
2765  /* don't need this if not instrumenting */
2766  if (!node->ps.instrument)
2767  return;
2768 
2769  /*
2770  * Find our entry in the shared area, and set up a pointer to it so that
2771  * we'll accumulate stats there when shutting down or rebuilding the hash
2772  * table.
2773  */
2774  shared_info = (SharedHashInfo *)
2775  shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false);
2776  node->hinstrument = &shared_info->hinstrument[ParallelWorkerNumber];
2777 }
2778 
2779 /*
2780  * Collect EXPLAIN stats if needed, saving them into DSM memory if
2781  * ExecHashInitializeWorker was called, or local storage if not. In the
2782  * parallel case, this must be done in ExecShutdownHash() rather than
2783  * ExecEndHash() because the latter runs after we've detached from the DSM
2784  * segment.
2785  */
2786 void
2788 {
2789  /* Allocate save space if EXPLAIN'ing and we didn't do so already */
2790  if (node->ps.instrument && !node->hinstrument)
2792  /* Now accumulate data for the current (final) hash table */
2793  if (node->hinstrument && node->hashtable)
2795 }
2796 
2797 /*
2798  * Retrieve instrumentation data from workers before the DSM segment is
2799  * detached, so that EXPLAIN can access it.
2800  */
2801 void
2803 {
2804  SharedHashInfo *shared_info = node->shared_info;
2805  size_t size;
2806 
2807  if (shared_info == NULL)
2808  return;
2809 
2810  /* Replace node->shared_info with a copy in backend-local memory. */
2811  size = offsetof(SharedHashInfo, hinstrument) +
2812  shared_info->num_workers * sizeof(HashInstrumentation);
2813  node->shared_info = palloc(size);
2814  memcpy(node->shared_info, shared_info, size);
2815 }
2816 
2817 /*
2818  * Accumulate instrumentation data from 'hashtable' into an
2819  * initially-zeroed HashInstrumentation struct.
2820  *
2821  * This is used to merge information across successive hash table instances
2822  * within a single plan node. We take the maximum values of each interesting
2823  * number. The largest nbuckets and largest nbatch values might have occurred
2824  * in different instances, so there's some risk of confusion from reporting
2825  * unrelated numbers; but there's a bigger risk of misdiagnosing a performance
2826  * issue if we don't report the largest values. Similarly, we want to report
2827  * the largest spacePeak regardless of whether it happened in the same
2828  * instance as the largest nbuckets or nbatch. All the instances should have
2829  * the same nbuckets_original and nbatch_original; but there's little value
2830  * in depending on that here, so handle them the same way.
2831  */
2832 void
2834  HashJoinTable hashtable)
2835 {
2836  instrument->nbuckets = Max(instrument->nbuckets,
2837  hashtable->nbuckets);
2838  instrument->nbuckets_original = Max(instrument->nbuckets_original,
2839  hashtable->nbuckets_original);
2840  instrument->nbatch = Max(instrument->nbatch,
2841  hashtable->nbatch);
2842  instrument->nbatch_original = Max(instrument->nbatch_original,
2843  hashtable->nbatch_original);
2844  instrument->space_peak = Max(instrument->space_peak,
2845  hashtable->spacePeak);
2846 }
2847 
2848 /*
2849  * Allocate 'size' bytes from the currently active HashMemoryChunk
2850  */
2851 static void *
2853 {
2854  HashMemoryChunk newChunk;
2855  char *ptr;
2856 
2857  /* just in case the size is not already aligned properly */
2858  size = MAXALIGN(size);
2859 
2860  /*
2861  * If tuple size is larger than threshold, allocate a separate chunk.
2862  */
2863  if (size > HASH_CHUNK_THRESHOLD)
2864  {
2865  /* allocate new chunk and put it at the beginning of the list */
2866  newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
2867  HASH_CHUNK_HEADER_SIZE + size);
2868  newChunk->maxlen = size;
2869  newChunk->used = size;
2870  newChunk->ntuples = 1;
2871 
2872  /*
2873  * Add this chunk to the list after the first existing chunk, so that
2874  * we don't lose the remaining space in the "current" chunk.
2875  */
2876  if (hashtable->chunks != NULL)
2877  {
2878  newChunk->next = hashtable->chunks->next;
2879  hashtable->chunks->next.unshared = newChunk;
2880  }
2881  else
2882  {
2883  newChunk->next.unshared = hashtable->chunks;
2884  hashtable->chunks = newChunk;
2885  }
2886 
2887  return HASH_CHUNK_DATA(newChunk);
2888  }
2889 
2890  /*
2891  * See if we have enough space for it in the current chunk (if any). If
2892  * not, allocate a fresh chunk.
2893  */
2894  if ((hashtable->chunks == NULL) ||
2895  (hashtable->chunks->maxlen - hashtable->chunks->used) < size)
2896  {
2897  /* allocate new chunk and put it at the beginning of the list */
2898  newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
2900 
2901  newChunk->maxlen = HASH_CHUNK_SIZE;
2902  newChunk->used = size;
2903  newChunk->ntuples = 1;
2904 
2905  newChunk->next.unshared = hashtable->chunks;
2906  hashtable->chunks = newChunk;
2907 
2908  return HASH_CHUNK_DATA(newChunk);
2909  }
2910 
2911  /* There is enough space in the current chunk, let's add the tuple */
2912  ptr = HASH_CHUNK_DATA(hashtable->chunks) + hashtable->chunks->used;
2913  hashtable->chunks->used += size;
2914  hashtable->chunks->ntuples += 1;
2915 
2916  /* return pointer to the start of the tuple memory */
2917  return ptr;
2918 }
2919 
2920 /*
2921  * Allocate space for a tuple in shared dense storage. This is equivalent to
2922  * dense_alloc but for Parallel Hash using shared memory.
2923  *
2924  * While loading a tuple into shared memory, we might run out of memory and
2925  * decide to repartition, or determine that the load factor is too high and
2926  * decide to expand the bucket array, or discover that another participant has
2927  * commanded us to help do that. Return NULL if number of buckets or batches
2928  * has changed, indicating that the caller must retry (considering the
2929  * possibility that the tuple no longer belongs in the same batch).
2930  */
2931 static HashJoinTuple
2933  dsa_pointer *shared)
2934 {
2935  ParallelHashJoinState *pstate = hashtable->parallel_state;
2936  dsa_pointer chunk_shared;
2937  HashMemoryChunk chunk;
2938  Size chunk_size;
2939  HashJoinTuple result;
2940  int curbatch = hashtable->curbatch;
2941 
2942  size = MAXALIGN(size);
2943 
2944  /*
2945  * Fast path: if there is enough space in this backend's current chunk,
2946  * then we can allocate without any locking.
2947  */
2948  chunk = hashtable->current_chunk;
2949  if (chunk != NULL &&
2950  size <= HASH_CHUNK_THRESHOLD &&
2951  chunk->maxlen - chunk->used >= size)
2952  {
2953 
2954  chunk_shared = hashtable->current_chunk_shared;
2955  Assert(chunk == dsa_get_address(hashtable->area, chunk_shared));
2956  *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE + chunk->used;
2957  result = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + chunk->used);
2958  chunk->used += size;
2959 
2960  Assert(chunk->used <= chunk->maxlen);
2961  Assert(result == dsa_get_address(hashtable->area, *shared));
2962 
2963  return result;
2964  }
2965 
2966  /* Slow path: try to allocate a new chunk. */
2967  LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
2968 
2969  /*
2970  * Check if we need to help increase the number of buckets or batches.
2971  */
2972  if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
2974  {
2975  ParallelHashGrowth growth = pstate->growth;
2976 
2977  hashtable->current_chunk = NULL;
2978  LWLockRelease(&pstate->lock);
2979 
2980  /* Another participant has commanded us to help grow. */
2981  if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
2983  else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
2985 
2986  /* The caller must retry. */
2987  return NULL;
2988  }
2989 
2990  /* Oversized tuples get their own chunk. */
2991  if (size > HASH_CHUNK_THRESHOLD)
2992  chunk_size = size + HASH_CHUNK_HEADER_SIZE;
2993  else
2994  chunk_size = HASH_CHUNK_SIZE;
2995 
2996  /* Check if it's time to grow batches or buckets. */
2997  if (pstate->growth != PHJ_GROWTH_DISABLED)
2998  {
2999  Assert(curbatch == 0);
3001 
3002  /*
3003  * Check if our space limit would be exceeded. To avoid choking on
3004  * very large tuples or very low hash_mem setting, we'll always allow
3005  * each backend to allocate at least one chunk.
3006  */
3007  if (hashtable->batches[0].at_least_one_chunk &&
3008  hashtable->batches[0].shared->size +
3009  chunk_size > pstate->space_allowed)
3010  {
3012  hashtable->batches[0].shared->space_exhausted = true;
3013  LWLockRelease(&pstate->lock);
3014 
3015  return NULL;
3016  }
3017 
3018  /* Check if our load factor limit would be exceeded. */
3019  if (hashtable->nbatch == 1)
3020  {
3021  hashtable->batches[0].shared->ntuples += hashtable->batches[0].ntuples;
3022  hashtable->batches[0].ntuples = 0;
3023  /* Guard against integer overflow and alloc size overflow */
3024  if (hashtable->batches[0].shared->ntuples + 1 >
3025  hashtable->nbuckets * NTUP_PER_BUCKET &&
3026  hashtable->nbuckets < (INT_MAX / 2) &&
3027  hashtable->nbuckets * 2 <=
3028  MaxAllocSize / sizeof(dsa_pointer_atomic))
3029  {
3031  LWLockRelease(&pstate->lock);
3032 
3033  return NULL;
3034  }
3035  }
3036  }
3037 
3038  /* We are cleared to allocate a new chunk. */
3039  chunk_shared = dsa_allocate(hashtable->area, chunk_size);
3040  hashtable->batches[curbatch].shared->size += chunk_size;
3041  hashtable->batches[curbatch].at_least_one_chunk = true;
3042 
3043  /* Set up the chunk. */
3044  chunk = (HashMemoryChunk) dsa_get_address(hashtable->area, chunk_shared);
3045  *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE;
3046  chunk->maxlen = chunk_size - HASH_CHUNK_HEADER_SIZE;
3047  chunk->used = size;
3048 
3049  /*
3050  * Push it onto the list of chunks, so that it can be found if we need to
3051  * increase the number of buckets or batches (batch 0 only) and later for
3052  * freeing the memory (all batches).
3053  */
3054  chunk->next.shared = hashtable->batches[curbatch].shared->chunks;
3055  hashtable->batches[curbatch].shared->chunks = chunk_shared;
3056 
3057  if (size <= HASH_CHUNK_THRESHOLD)
3058  {
3059  /*
3060  * Make this the current chunk so that we can use the fast path to
3061  * fill the rest of it up in future calls.
3062  */
3063  hashtable->current_chunk = chunk;
3064  hashtable->current_chunk_shared = chunk_shared;
3065  }
3066  LWLockRelease(&pstate->lock);
3067 
3068  Assert(HASH_CHUNK_DATA(chunk) == dsa_get_address(hashtable->area, *shared));
3069  result = (HashJoinTuple) HASH_CHUNK_DATA(chunk);
3070 
3071  return result;
3072 }
3073 
3074 /*
3075  * One backend needs to set up the shared batch state including tuplestores.
3076  * Other backends will ensure they have correctly configured accessors by
3077  * called ExecParallelHashEnsureBatchAccessors().
3078  */
3079 static void
3081 {
3082  ParallelHashJoinState *pstate = hashtable->parallel_state;
3083  ParallelHashJoinBatch *batches;
3084  MemoryContext oldcxt;
3085  int i;
3086 
3087  Assert(hashtable->batches == NULL);
3088 
3089  /* Allocate space. */
3090  pstate->batches =
3091  dsa_allocate0(hashtable->area,
3092  EstimateParallelHashJoinBatch(hashtable) * nbatch);
3093  pstate->nbatch = nbatch;
3094  batches = dsa_get_address(hashtable->area, pstate->batches);
3095 
3096  /* Use hash join memory context. */
3097  oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
3098 
3099  /* Allocate this backend's accessor array. */
3100  hashtable->nbatch = nbatch;
3101  hashtable->batches =
3103 
3104  /* Set up the shared state, tuplestores and backend-local accessors. */
3105  for (i = 0; i < hashtable->nbatch; ++i)
3106  {
3107  ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
3108  ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
3109  char name[MAXPGPATH];
3110 
3111  /*
3112  * All members of shared were zero-initialized. We just need to set
3113  * up the Barrier.
3114  */
3115  BarrierInit(&shared->batch_barrier, 0);
3116  if (i == 0)
3117  {
3118  /* Batch 0 doesn't need to be loaded. */
3119  BarrierAttach(&shared->batch_barrier);
3120  while (BarrierPhase(&shared->batch_barrier) < PHJ_BATCH_PROBE)
3121  BarrierArriveAndWait(&shared->batch_barrier, 0);
3122  BarrierDetach(&shared->batch_barrier);
3123  }
3124 
3125  /* Initialize accessor state. All members were zero-initialized. */
3126  accessor->shared = shared;
3127 
3128  /* Initialize the shared tuplestores. */
3129  snprintf(name, sizeof(name), "i%dof%d", i, hashtable->nbatch);
3130  accessor->inner_tuples =
3132  pstate->nparticipants,
3134  sizeof(uint32),
3136  &pstate->fileset,
3137  name);
3138  snprintf(name, sizeof(name), "o%dof%d", i, hashtable->nbatch);
3139  accessor->outer_tuples =
3141  pstate->nparticipants),
3142  pstate->nparticipants,
3144  sizeof(uint32),
3146  &pstate->fileset,
3147  name);
3148  }
3149 
3150  MemoryContextSwitchTo(oldcxt);
3151 }
3152 
3153 /*
3154  * Free the current set of ParallelHashJoinBatchAccessor objects.
3155  */
3156 static void
3158 {
3159  int i;
3160 
3161  for (i = 0; i < hashtable->nbatch; ++i)
3162  {
3163  /* Make sure no files are left open. */
3164  sts_end_write(hashtable->batches[i].inner_tuples);
3165  sts_end_write(hashtable->batches[i].outer_tuples);
3168  }
3169  pfree(hashtable->batches);
3170  hashtable->batches = NULL;
3171 }
3172 
3173 /*
3174  * Make sure this backend has up-to-date accessors for the current set of
3175  * batches.
3176  */
3177 static void
3179 {
3180  ParallelHashJoinState *pstate = hashtable->parallel_state;
3181  ParallelHashJoinBatch *batches;
3182  MemoryContext oldcxt;
3183  int i;
3184 
3185  if (hashtable->batches != NULL)
3186  {
3187  if (hashtable->nbatch == pstate->nbatch)
3188  return;
3190  }
3191 
3192  /*
3193  * We should never see a state where the batch-tracking array is freed,
3194  * because we should have given up sooner if we join when the build
3195  * barrier has reached the PHJ_BUILD_FREE phase.
3196  */
3197  Assert(DsaPointerIsValid(pstate->batches));
3198 
3199  /* Use hash join memory context. */
3200  oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
3201 
3202  /* Allocate this backend's accessor array. */
3203  hashtable->nbatch = pstate->nbatch;
3204  hashtable->batches =
3206 
3207  /* Find the base of the pseudo-array of ParallelHashJoinBatch objects. */
3208  batches = (ParallelHashJoinBatch *)
3209  dsa_get_address(hashtable->area, pstate->batches);
3210 
3211  /* Set up the accessor array and attach to the tuplestores. */
3212  for (i = 0; i < hashtable->nbatch; ++i)
3213  {
3214  ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
3215  ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
3216 
3217  accessor->shared = shared;
3218  accessor->preallocated = 0;
3219  accessor->done = false;
3220  accessor->outer_eof = false;
3221  accessor->inner_tuples =
3224  &pstate->fileset);
3225  accessor->outer_tuples =
3227  pstate->nparticipants),
3229  &pstate->fileset);
3230  }
3231 
3232  MemoryContextSwitchTo(oldcxt);
3233 }
3234 
3235 /*
3236  * Allocate an empty shared memory hash table for a given batch.
3237  */
3238 void
3240 {
3241  ParallelHashJoinBatch *batch = hashtable->batches[batchno].shared;
3242  dsa_pointer_atomic *buckets;
3243  int nbuckets = hashtable->parallel_state->nbuckets;
3244  int i;
3245 
3246  batch->buckets =
3247  dsa_allocate(hashtable->area, sizeof(dsa_pointer_atomic) * nbuckets);
3248  buckets = (dsa_pointer_atomic *)
3249  dsa_get_address(hashtable->area, batch->buckets);
3250  for (i = 0; i < nbuckets; ++i)
3252 }
3253 
3254 /*
3255  * If we are currently attached to a shared hash join batch, detach. If we
3256  * are last to detach, clean up.
3257  */
3258 void
3260 {
3261  if (hashtable->parallel_state != NULL &&
3262  hashtable->curbatch >= 0)
3263  {
3264  int curbatch = hashtable->curbatch;
3265  ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
3266  bool attached = true;
3267 
3268  /* Make sure any temporary files are closed. */
3269  sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
3270  sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
3271 
3272  /* After attaching we always get at least to PHJ_BATCH_PROBE. */
3275 
3276  /*
3277  * If we're abandoning the PHJ_BATCH_PROBE phase early without having
3278  * reached the end of it, it means the plan doesn't want any more
3279  * tuples, and it is happy to abandon any tuples buffered in this
3280  * process's subplans. For correctness, we can't allow any process to
3281  * execute the PHJ_BATCH_SCAN phase, because we will never have the
3282  * complete set of match bits. Therefore we skip emitting unmatched
3283  * tuples in all backends (if this is a full/right join), as if those
3284  * tuples were all due to be emitted by this process and it has
3285  * abandoned them too.
3286  */
3287  if (BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE &&
3288  !hashtable->batches[curbatch].outer_eof)
3289  {
3290  /*
3291  * This flag may be written to by multiple backends during
3292  * PHJ_BATCH_PROBE phase, but will only be read in PHJ_BATCH_SCAN
3293  * phase so requires no extra locking.
3294  */
3295  batch->skip_unmatched = true;
3296  }
3297 
3298  /*
3299  * Even if we aren't doing a full/right outer join, we'll step through
3300  * the PHJ_BATCH_SCAN phase just to maintain the invariant that
3301  * freeing happens in PHJ_BATCH_FREE, but that'll be wait-free.
3302  */
3303  if (BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE)
3304  attached = BarrierArriveAndDetachExceptLast(&batch->batch_barrier);
3305  if (attached && BarrierArriveAndDetach(&batch->batch_barrier))
3306  {
3307  /*
3308  * We are not longer attached to the batch barrier, but we're the
3309  * process that was chosen to free resources and it's safe to
3310  * assert the current phase. The ParallelHashJoinBatch can't go
3311  * away underneath us while we are attached to the build barrier,
3312  * making this access safe.
3313  */
3315 
3316  /* Free shared chunks and buckets. */
3317  while (DsaPointerIsValid(batch->chunks))
3318  {
3319  HashMemoryChunk chunk =
3320  dsa_get_address(hashtable->area, batch->chunks);
3321  dsa_pointer next = chunk->next.shared;
3322 
3323  dsa_free(hashtable->area, batch->chunks);
3324  batch->chunks = next;
3325  }
3326  if (DsaPointerIsValid(batch->buckets))
3327  {
3328  dsa_free(hashtable->area, batch->buckets);
3329  batch->buckets = InvalidDsaPointer;
3330  }
3331  }
3332 
3333  /*
3334  * Track the largest batch we've been attached to. Though each
3335  * backend might see a different subset of batches, explain.c will
3336  * scan the results from all backends to find the largest value.
3337  */
3338  hashtable->spacePeak =
3339  Max(hashtable->spacePeak,
3340  batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
3341 
3342  /* Remember that we are not attached to a batch. */
3343  hashtable->curbatch = -1;
3344  }
3345 }
3346 
3347 /*
3348  * Detach from all shared resources. If we are last to detach, clean up.
3349  */
3350 void
3352 {
3353  ParallelHashJoinState *pstate = hashtable->parallel_state;
3354 
3355  /*
3356  * If we're involved in a parallel query, we must either have gotten all
3357  * the way to PHJ_BUILD_RUN, or joined too late and be in PHJ_BUILD_FREE.
3358  */
3359  Assert(!pstate ||
3361 
3362  if (pstate && BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_RUN)
3363  {
3364  int i;
3365 
3366  /* Make sure any temporary files are closed. */
3367  if (hashtable->batches)
3368  {
3369  for (i = 0; i < hashtable->nbatch; ++i)
3370  {
3371  sts_end_write(hashtable->batches[i].inner_tuples);
3372  sts_end_write(hashtable->batches[i].outer_tuples);
3375  }
3376  }
3377 
3378  /* If we're last to detach, clean up shared memory. */
3379  if (BarrierArriveAndDetach(&pstate->build_barrier))
3380  {
3381  /*
3382  * Late joining processes will see this state and give up
3383  * immediately.
3384  */
3386 
3387  if (DsaPointerIsValid(pstate->batches))
3388  {
3389  dsa_free(hashtable->area, pstate->batches);
3390  pstate->batches = InvalidDsaPointer;
3391  }
3392  }
3393  }
3394  hashtable->parallel_state = NULL;
3395 }
3396 
3397 /*
3398  * Get the first tuple in a given bucket identified by number.
3399  */
3400 static inline HashJoinTuple
3402 {
3403  HashJoinTuple tuple;
3404  dsa_pointer p;
3405 
3406  Assert(hashtable->parallel_state);
3407  p = dsa_pointer_atomic_read(&hashtable->buckets.shared[bucketno]);
3408  tuple = (HashJoinTuple) dsa_get_address(hashtable->area, p);
3409 
3410  return tuple;
3411 }
3412 
3413 /*
3414  * Get the next tuple in the same bucket as 'tuple'.
3415  */
3416 static inline HashJoinTuple
3418 {
3420 
3421  Assert(hashtable->parallel_state);
3422  next = (HashJoinTuple) dsa_get_address(hashtable->area, tuple->next.shared);
3423 
3424  return next;
3425 }
3426 
3427 /*
3428  * Insert a tuple at the front of a chain of tuples in DSA memory atomically.
3429  */
3430 static inline void
3432  HashJoinTuple tuple,
3433  dsa_pointer tuple_shared)
3434 {
3435  for (;;)
3436  {
3437  tuple->next.shared = dsa_pointer_atomic_read(head);
3439  &tuple->next.shared,
3440  tuple_shared))
3441  break;
3442  }
3443 }
3444 
3445 /*
3446  * Prepare to work on a given batch.
3447  */
3448 void
3450 {
3451  Assert(hashtable->batches[batchno].shared->buckets != InvalidDsaPointer);
3452 
3453  hashtable->curbatch = batchno;
3454  hashtable->buckets.shared = (dsa_pointer_atomic *)
3455  dsa_get_address(hashtable->area,
3456  hashtable->batches[batchno].shared->buckets);
3457  hashtable->nbuckets = hashtable->parallel_state->nbuckets;
3458  hashtable->log2_nbuckets = my_log2(hashtable->nbuckets);
3459  hashtable->current_chunk = NULL;
3461  hashtable->batches[batchno].at_least_one_chunk = false;
3462 }
3463 
3464 /*
3465  * Take the next available chunk from the queue of chunks being worked on in
3466  * parallel. Return NULL if there are none left. Otherwise return a pointer
3467  * to the chunk, and set *shared to the DSA pointer to the chunk.
3468  */
3469 static HashMemoryChunk
3471 {
3472  ParallelHashJoinState *pstate = hashtable->parallel_state;
3473  HashMemoryChunk chunk;
3474 
3475  LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
3476  if (DsaPointerIsValid(pstate->chunk_work_queue))
3477  {
3478  *shared = pstate->chunk_work_queue;
3479  chunk = (HashMemoryChunk)
3480  dsa_get_address(hashtable->area, *shared);
3481  pstate->chunk_work_queue = chunk->next.shared;
3482  }
3483  else
3484  chunk = NULL;
3485  LWLockRelease(&pstate->lock);
3486 
3487  return chunk;
3488 }
3489 
3490 /*
3491  * Increase the space preallocated in this backend for a given inner batch by
3492  * at least a given amount. This allows us to track whether a given batch
3493  * would fit in memory when loaded back in. Also increase the number of
3494  * batches or buckets if required.
3495  *
3496  * This maintains a running estimation of how much space will be taken when we
3497  * load the batch back into memory by simulating the way chunks will be handed
3498  * out to workers. It's not perfectly accurate because the tuples will be
3499  * packed into memory chunks differently by ExecParallelHashTupleAlloc(), but
3500  * it should be pretty close. It tends to overestimate by a fraction of a
3501  * chunk per worker since all workers gang up to preallocate during hashing,
3502  * but workers tend to reload batches alone if there are enough to go around,
3503  * leaving fewer partially filled chunks. This effect is bounded by
3504  * nparticipants.
3505  *
3506  * Return false if the number of batches or buckets has changed, and the
3507  * caller should reconsider which batch a given tuple now belongs in and call
3508  * again.
3509  */
3510 static bool
3511 ExecParallelHashTuplePrealloc(HashJoinTable hashtable, int batchno, size_t size)
3512 {
3513  ParallelHashJoinState *pstate = hashtable->parallel_state;
3514  ParallelHashJoinBatchAccessor *batch = &hashtable->batches[batchno];
3515  size_t want = Max(size, HASH_CHUNK_SIZE - HASH_CHUNK_HEADER_SIZE);
3516 
3517  Assert(batchno > 0);
3518  Assert(batchno < hashtable->nbatch);
3519  Assert(size == MAXALIGN(size));
3520 
3521  LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
3522 
3523  /* Has another participant commanded us to help grow? */
3524  if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
3526  {
3527  ParallelHashGrowth growth = pstate->growth;
3528 
3529  LWLockRelease(&pstate->lock);
3530  if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
3532  else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
3534 
3535  return false;
3536  }
3537 
3538  if (pstate->growth != PHJ_GROWTH_DISABLED &&
3539  batch->at_least_one_chunk &&
3540  (batch->shared->estimated_size + want + HASH_CHUNK_HEADER_SIZE
3541  > pstate->space_allowed))
3542  {
3543  /*
3544  * We have determined that this batch would exceed the space budget if
3545  * loaded into memory. Command all participants to help repartition.
3546  */
3547  batch->shared->space_exhausted = true;
3549  LWLockRelease(&pstate->lock);
3550 
3551  return false;
3552  }
3553 
3554  batch->at_least_one_chunk = true;
3555  batch->shared->estimated_size += want + HASH_CHUNK_HEADER_SIZE;
3556  batch->preallocated = want;
3557  LWLockRelease(&pstate->lock);
3558 
3559  return true;
3560 }
3561 
3562 /*
3563  * Calculate the limit on how much memory can be used by Hash and similar
3564  * plan types. This is work_mem times hash_mem_multiplier, and is
3565  * expressed in bytes.
3566  *
3567  * Exported for use by the planner, as well as other hash-like executor
3568  * nodes. This is a rather random place for this, but there is no better
3569  * place.
3570  */
3571 size_t
3573 {
3574  double mem_limit;
3575 
3576  /* Do initial calculation in double arithmetic */
3577  mem_limit = (double) work_mem * hash_mem_multiplier * 1024.0;
3578 
3579  /* Clamp in case it doesn't fit in size_t */
3580  mem_limit = Min(mem_limit, (double) SIZE_MAX);
3581 
3582  return (size_t) mem_limit;
3583 }
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:259
int ParallelWorkerNumber
Definition: parallel.c:113
void PrepareTempTablespaces(void)
Definition: tablespace.c:1337
bool BarrierArriveAndDetachExceptLast(Barrier *barrier)
Definition: barrier.c:213
int BarrierParticipants(Barrier *barrier)
Definition: barrier.c:281
bool BarrierArriveAndDetach(Barrier *barrier)
Definition: barrier.c:203
int BarrierAttach(Barrier *barrier)
Definition: barrier.c:236
void BarrierInit(Barrier *barrier, int participants)
Definition: barrier.c:100
int BarrierPhase(Barrier *barrier)
Definition: barrier.c:265
bool BarrierArriveAndWait(Barrier *barrier, uint32 wait_event_info)
Definition: barrier.c:125
bool BarrierDetach(Barrier *barrier)
Definition: barrier.c:256
static int32 next
Definition: blutils.c:219
void BufFileClose(BufFile *file)
Definition: buffile.c:407
unsigned int uint32
Definition: c.h:490
#define Min(x, y)
Definition: c.h:988
#define MAXALIGN(LEN)
Definition: c.h:795
#define Max(x, y)
Definition: c.h:982
#define OidIsValid(objectId)
Definition: c.h:759
size_t Size
Definition: c.h:589
void * dsa_get_address(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:944
void dsa_free(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:832
#define dsa_allocate0(area, size)
Definition: dsa.h:88
uint64 dsa_pointer
Definition: dsa.h:62
#define dsa_pointer_atomic_init
Definition: dsa.h:64
#define dsa_allocate(area, size)
Definition: dsa.h:84
#define dsa_pointer_atomic_write
Definition: dsa.h:66
#define InvalidDsaPointer
Definition: dsa.h:78
#define dsa_pointer_atomic_compare_exchange
Definition: dsa.h:68
#define dsa_pointer_atomic_read
Definition: dsa.h:65
pg_atomic_uint64 dsa_pointer_atomic
Definition: dsa.h:63
#define DsaPointerIsValid(x)
Definition: dsa.h:81
int my_log2(long num)
Definition: dynahash.c:1760
#define ERROR
Definition: elog.h:39
const char * name
Definition: encode.c:571
void ExecReScan(PlanState *node)
Definition: execAmi.c:78
List * ExecInitExprList(List *nodes, PlanState *parent)
Definition: execExpr.c:322
void ExecEndNode(PlanState *node)
Definition: execProcnode.c:557
PlanState * ExecInitNode(Plan *node, EState *estate, int eflags)
Definition: execProcnode.c:142
MinimalTuple ExecFetchSlotMinimalTuple(TupleTableSlot *slot, bool *shouldFree)
Definition: execTuples.c:1693
TupleTableSlot * ExecStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot, bool shouldFree)
Definition: execTuples.c:1447
void ExecInitResultTupleSlotTL(PlanState *planstate, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1800
const TupleTableSlotOps TTSOpsMinimalTuple
Definition: execTuples.c:85
void ExecAssignExprContext(EState *estate, PlanState *planstate)
Definition: execUtils.c:488
void ExecFreeExprContext(PlanState *planstate)
Definition: execUtils.c:658
#define outerPlanState(node)
Definition: execnodes.h:1131
struct HashJoinTupleData * HashJoinTuple
Definition: execnodes.h:2086
struct HashInstrumentation HashInstrumentation
#define EXEC_FLAG_BACKWARD
Definition: executor.h:68
#define ResetExprContext(econtext)
Definition: executor.h:542
static bool ExecQualAndReset(ExprState *state, ExprContext *econtext)
Definition: executor.h:438
static Datum ExecEvalExpr(ExprState *state, ExprContext *econtext, bool *isNull)
Definition: executor.h:331
#define EXEC_FLAG_MARK
Definition: executor.h:69
static TupleTableSlot * ExecProcNode(PlanState *node)
Definition: executor.h:267
#define palloc_object(type)
Definition: fe_memutils.h:62
#define repalloc_array(pointer, type, count)
Definition: fe_memutils.h:66
#define palloc_array(type, count)
Definition: fe_memutils.h:64
#define palloc0_array(type, count)
Definition: fe_memutils.h:65
#define palloc0_object(type)
Definition: fe_memutils.h:63
void fmgr_info(Oid functionId, FmgrInfo *finfo)
Definition: fmgr.c:127
Datum FunctionCall1Coll(FmgrInfo *flinfo, Oid collation, Datum arg1)
Definition: fmgr.c:1101
double hash_mem_multiplier
Definition: globals.c:126
int work_mem
Definition: globals.c:125
#define PHJ_GROW_BATCHES_REPARTITION
Definition: hashjoin.h:276
struct HashMemoryChunkData * HashMemoryChunk
Definition: hashjoin.h:137
#define PHJ_BATCH_SCAN
Definition: hashjoin.h:270
#define HASH_CHUNK_DATA(hc)
Definition: hashjoin.h:141
#define PHJ_BATCH_PROBE
Definition: hashjoin.h:269
#define PHJ_GROW_BUCKETS_REINSERT
Definition: hashjoin.h:284
#define SKEW_MIN_OUTER_FRACTION
Definition: hashjoin.h:111
#define PHJ_GROW_BUCKETS_ELECT
Definition: hashjoin.h:282
#define HJTUPLE_OVERHEAD
Definition: hashjoin.h:79
#define PHJ_GROW_BUCKETS_PHASE(n)
Definition: hashjoin.h:285
#define PHJ_GROW_BATCHES_ELECT
Definition: hashjoin.h:274
#define ParallelHashJoinBatchInner(batch)
Definition: hashjoin.h:171
#define PHJ_BUILD_FREE
Definition: hashjoin.h:263
#define PHJ_BUILD_HASH_INNER
Definition: hashjoin.h:260
#define NthParallelHashJoinBatch(base, n)
Definition: hashjoin.h:187
#define HASH_CHUNK_THRESHOLD
Definition: hashjoin.h:143
#define PHJ_BUILD_HASH_OUTER
Definition: hashjoin.h:261
#define HJTUPLE_MINTUPLE(hjtup)
Definition: hashjoin.h:80
#define SKEW_BUCKET_OVERHEAD
Definition: hashjoin.h:108
#define PHJ_GROW_BATCHES_DECIDE
Definition: hashjoin.h:277
#define PHJ_GROW_BATCHES_REALLOCATE
Definition: hashjoin.h:275
#define HASH_CHUNK_HEADER_SIZE
Definition: hashjoin.h:140
#define PHJ_GROW_BATCHES_FINISH
Definition: hashjoin.h:278
#define ParallelHashJoinBatchOuter(batch, nparticipants)
Definition: hashjoin.h:176
#define SKEW_HASH_MEM_PERCENT
Definition: hashjoin.h:110
#define PHJ_BUILD_ALLOCATE
Definition: hashjoin.h:259
#define PHJ_GROW_BUCKETS_REALLOCATE
Definition: hashjoin.h:283
#define PHJ_BATCH_FREE
Definition: hashjoin.h:271
#define PHJ_GROW_BATCHES_PHASE(n)
Definition: hashjoin.h:279
#define HASH_CHUNK_SIZE
Definition: hashjoin.h:139
ParallelHashGrowth
Definition: hashjoin.h:220
@ PHJ_GROWTH_NEED_MORE_BUCKETS
Definition: hashjoin.h:224
@ PHJ_GROWTH_OK
Definition: hashjoin.h:222
@ PHJ_GROWTH_NEED_MORE_BATCHES
Definition: hashjoin.h:226
@ PHJ_GROWTH_DISABLED
Definition: hashjoin.h:228
#define PHJ_BUILD_RUN
Definition: hashjoin.h:262
#define INVALID_SKEW_BUCKET_NO
Definition: hashjoin.h:109
#define PHJ_BUILD_ELECT
Definition: hashjoin.h:258
#define EstimateParallelHashJoinBatch(hashtable)
Definition: hashjoin.h:182
void heap_free_minimal_tuple(MinimalTuple mtup)
Definition: heaptuple.c:1427
#define HeapTupleIsValid(tuple)
Definition: htup.h:78
#define HeapTupleHeaderHasMatch(tup)
Definition: htup_details.h:514
#define SizeofMinimalTupleHeader
Definition: htup_details.h:647
#define HeapTupleHeaderClearMatch(tup)
Definition: htup_details.h:524
void InstrStartNode(Instrumentation *instr)
Definition: instrument.c:68
void InstrStopNode(Instrumentation *instr, double nTuples)
Definition: instrument.c:84
int j
Definition: isn.c:74
int i
Definition: isn.c:73
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:77
Assert(fmt[strlen(fmt) - 1] !='\n')
void free_attstatsslot(AttStatsSlot *sslot)
Definition: lsyscache.c:3302
bool op_strict(Oid opno)
Definition: lsyscache.c:1459
bool get_op_hash_functions(Oid opno, RegProcedure *lhs_procno, RegProcedure *rhs_procno)
Definition: lsyscache.c:509
bool get_attstatsslot(AttStatsSlot *sslot, HeapTuple statstuple, int reqkind, Oid reqop, int flags)
Definition: lsyscache.c:3192
#define ATTSTATSSLOT_NUMBERS
Definition: lsyscache.h:43
#define ATTSTATSSLOT_VALUES
Definition: lsyscache.h:42
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1195
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1803
@ LW_EXCLUSIVE
Definition: lwlock.h:115
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:314
void pfree(void *pointer)
Definition: mcxt.c:1436
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:1048
MemoryContext CurrentMemoryContext
Definition: mcxt.c:135
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1005
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:387
void * palloc(Size size)
Definition: mcxt.c:1210
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:153
#define MaxAllocSize
Definition: memutils.h:40
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:121
static void ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
Definition: nodeHash.c:1447
static void ExecHashRemoveNextSkewBucket(HashJoinTable hashtable)
Definition: nodeHash.c:2604
void ExecParallelHashTableInsert(HashJoinTable hashtable, TupleTableSlot *slot, uint32 hashvalue)
Definition: nodeHash.c:1698
static bool ExecParallelHashTuplePrealloc(HashJoinTable hashtable, int batchno, size_t size)
Definition: nodeHash.c:3511
void ExecParallelHashTableSetCurrentBatch(HashJoinTable hashtable, int batchno)
Definition: nodeHash.c:3449
static void ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable)
Definition: nodeHash.c:1510
static void ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable)
Definition: nodeHash.c:3178
void ExecHashTableReset(HashJoinTable hashtable)
Definition: nodeHash.c:2283
bool ExecHashGetHashValue(HashJoinTable hashtable, ExprContext *econtext, List *hashkeys, bool outer_tuple, bool keep_nulls, uint32 *hashvalue)
Definition: nodeHash.c:1807
static HashJoinTuple ExecParallelHashFirstTuple(HashJoinTable hashtable, int bucketno)
Definition: nodeHash.c:3401
void ExecHashInitializeDSM(HashState *node, ParallelContext *pcxt)
Definition: nodeHash.c:2736
bool ExecParallelScanHashBucket(HashJoinState *hjstate, ExprContext *econtext)
Definition: nodeHash.c:2008
static HashJoinTuple ExecParallelHashTupleAlloc(HashJoinTable hashtable, size_t size, dsa_pointer *shared)
Definition: nodeHash.c:2932
static void * dense_alloc(HashJoinTable hashtable, Size size)
Definition: nodeHash.c:2852
static void MultiExecParallelHash(HashState *node)
Definition: nodeHash.c:215
void ExecHashAccumInstrumentation(HashInstrumentation *instrument, HashJoinTable hashtable)
Definition: nodeHash.c:2833
static void MultiExecPrivateHash(HashState *node)
Definition: nodeHash.c:139
void ExecHashInitializeWorker(HashState *node, ParallelWorkerContext *pwcxt)
Definition: nodeHash.c:2761
static void ExecParallelHashPushTuple(dsa_pointer_atomic *head, HashJoinTuple tuple, dsa_pointer tuple_shared)
Definition: nodeHash.c:3431
void ExecHashTableDetachBatch(HashJoinTable hashtable)
Definition: nodeHash.c:3259
void ExecHashEstimate(HashState *node, ParallelContext *pcxt)
Definition: nodeHash.c:2717
HashState * ExecInitHash(Hash *node, EState *estate, int eflags)
Definition: nodeHash.c:361
void ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew, bool try_combined_hash_mem, int parallel_workers, size_t *space_allowed, int *numbuckets, int *numbatches, int *num_skew_mcvs)
Definition: nodeHash.c:670
void ExecPrepHashTableForUnmatched(HashJoinState *hjstate)
Definition: nodeHash.c:2059
static void ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node, int mcvsToUse)
Definition: nodeHash.c:2359
void ExecHashTableDetach(HashJoinTable hashtable)
Definition: nodeHash.c:3351
bool ExecParallelScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext)
Definition: nodeHash.c:2220
void ExecHashTableDestroy(HashJoinTable hashtable)
Definition: nodeHash.c:878
#define NTUP_PER_BUCKET
Definition: nodeHash.c:667
int ExecHashGetSkewBucket(HashJoinTable hashtable, uint32 hashvalue)
Definition: nodeHash.c:2512
static void ExecHashIncreaseNumBatches(HashJoinTable hashtable)
Definition: nodeHash.c:911
size_t get_hash_memory_limit(void)
Definition: nodeHash.c:3572
bool ExecScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext)
Definition: nodeHash.c:2146
static void ExecHashSkewTableInsert(HashJoinTable hashtable, TupleTableSlot *slot, uint32 hashvalue, int bucketNumber)
Definition: nodeHash.c:2558
static void ExecParallelHashRepartitionRest(HashJoinTable hashtable)
Definition: nodeHash.c:1357
static void ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch)
Definition: nodeHash.c:3080
void ExecHashTableResetMatchFlags(HashJoinTable hashtable)
Definition: nodeHash.c:2311
static void ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable)
Definition: nodeHash.c:3157
static HashJoinTuple ExecParallelHashNextTuple(HashJoinTable hashtable, HashJoinTuple tuple)
Definition: nodeHash.c:3417
void ExecEndHash(HashState *node)
Definition: nodeHash.c:414
void ExecShutdownHash(HashState *node)
Definition: nodeHash.c:2787
void ExecHashTableInsert(HashJoinTable hashtable, TupleTableSlot *slot, uint32 hashvalue)
Definition: nodeHash.c:1609
void ExecHashGetBucketAndBatch(HashJoinTable hashtable, uint32 hashvalue, int *bucketno, int *batchno)
Definition: nodeHash.c:1915
static void ExecParallelHashMergeCounters(HashJoinTable hashtable)
Definition: nodeHash.c:1417
static TupleTableSlot * ExecHash(PlanState *pstate)
Definition: nodeHash.c:92
void ExecParallelHashTableAlloc(HashJoinTable hashtable, int batchno)
Definition: nodeHash.c:3239
HashJoinTable ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations, bool keepNulls)
Definition: nodeHash.c:438
bool ExecParallelPrepHashTableForUnmatched(HashJoinState *hjstate)
Definition: nodeHash.c:2080
void ExecParallelHashTableInsertCurrentBatch(HashJoinTable hashtable, TupleTableSlot *slot, uint32 hashvalue)
Definition: nodeHash.c:1763
static HashMemoryChunk ExecParallelHashPopChunkQueue(HashJoinTable hashtable, dsa_pointer *shared)
Definition: nodeHash.c:3470
void ExecReScanHash(HashState *node)
Definition: nodeHash.c:2337
bool ExecScanHashBucket(HashJoinState *hjstate, ExprContext *econtext)
Definition: nodeHash.c:1947
static void ExecParallelHashRepartitionFirst(HashJoinTable hashtable)
Definition: nodeHash.c:1290
static void ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
Definition: nodeHash.c:1074
void ExecHashRetrieveInstrumentation(HashState *node)
Definition: nodeHash.c:2802
Node * MultiExecHash(HashState *node)
Definition: nodeHash.c:106
void ExecHashJoinSaveTuple(MinimalTuple tuple, uint32 hashvalue, BufFile **fileptr)
#define makeNode(_type_)
Definition: nodes.h:176
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:138
#define repalloc0_array(pointer, type, oldcount, count)
Definition: palloc.h:110
static uint32 pg_rotate_left32(uint32 word, int n)
Definition: pg_bitutils.h:322
static uint32 pg_nextpower2_32(uint32 num)
Definition: pg_bitutils.h:185
static uint32 pg_rotate_right32(uint32 word, int n)
Definition: pg_bitutils.h:316
#define pg_nextpower2_size_t
Definition: pg_bitutils.h:335
#define pg_prevpower2_size_t
Definition: pg_bitutils.h:336
#define MAXPGPATH
#define lfirst(lc)
Definition: pg_list.h:172
static int list_length(const List *l)
Definition: pg_list.h:152
#define NIL
Definition: pg_list.h:68
#define forboth(cell1, list1, cell2, list2)
Definition: pg_list.h:467
#define lfirst_oid(lc)
Definition: pg_list.h:174
#define outerPlan(node)
Definition: plannodes.h:186
#define snprintf
Definition: port.h:238
#define printf(...)
Definition: port.h:244
static uint32 DatumGetUInt32(Datum X)
Definition: postgres.h:222
uintptr_t Datum
Definition: postgres.h:64
static Datum Int16GetDatum(int16 X)
Definition: postgres.h:172
static Datum BoolGetDatum(bool X)
Definition: postgres.h:102
static Datum ObjectIdGetDatum(Oid X)
Definition: postgres.h:252
#define InvalidOid
Definition: postgres_ext.h:36
unsigned int Oid
Definition: postgres_ext.h:31
SharedTuplestoreAccessor * sts_initialize(SharedTuplestore *sts, int participants, int my_participant_number, size_t meta_data_size, int flags, SharedFileSet *fileset, const char *name)
MinimalTuple sts_parallel_scan_next(SharedTuplestoreAccessor *accessor, void *meta_data)
void sts_end_write(SharedTuplestoreAccessor *accessor)
SharedTuplestoreAccessor * sts_attach(SharedTuplestore *sts, int my_participant_number, SharedFileSet *fileset)
void sts_end_parallel_scan(SharedTuplestoreAccessor *accessor)
void sts_puttuple(SharedTuplestoreAccessor *accessor, void *meta_data, MinimalTuple tuple)
void sts_begin_parallel_scan(SharedTuplestoreAccessor *accessor)
#define SHARED_TUPLESTORE_SINGLE_PASS
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
Definition: shm_toc.c:171
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
Definition: shm_toc.c:88
void * shm_toc_lookup(shm_toc *toc, uint64 key, bool noError)
Definition: shm_toc.c:232
#define shm_toc_estimate_chunk(e, sz)
Definition: shm_toc.h:51
#define shm_toc_estimate_keys(e, cnt)
Definition: shm_toc.h:53
Size add_size(Size s1, Size s2)
Definition: shmem.c:502
Size mul_size(Size s1, Size s2)
Definition: shmem.c:519
Datum * values
Definition: lsyscache.h:53
float4 * numbers
Definition: lsyscache.h:56
MemoryContext ecxt_per_tuple_memory
Definition: execnodes.h:257
TupleTableSlot * ecxt_innertuple
Definition: execnodes.h:251
TupleTableSlot * ecxt_outertuple
Definition: execnodes.h:253
Definition: fmgr.h:57
HashJoinTuple hj_CurTuple
Definition: execnodes.h:2100
int hj_CurSkewBucketNo
Definition: execnodes.h:2099
ExprState * hashclauses
Definition: execnodes.h:2092
uint32 hj_CurHashValue
Definition: execnodes.h:2097
int hj_CurBucketNo
Definition: execnodes.h:2098
HashJoinTable hj_HashTable
Definition: execnodes.h:2096
TupleTableSlot * hj_HashTupleSlot
Definition: execnodes.h:2102
struct HashJoinTupleData ** unshared
Definition: hashjoin.h:300
FmgrInfo * outer_hashfunctions
Definition: hashjoin.h:340
HashMemoryChunk chunks
Definition: hashjoin.h:355
union HashJoinTableData::@97 buckets
ParallelHashJoinBatchAccessor * batches
Definition: hashjoin.h:361
MemoryContext hashCxt
Definition: hashjoin.h:351
double totalTuples
Definition: hashjoin.h:321
double partialTuples
Definition: hashjoin.h:322
ParallelHashJoinState * parallel_state
Definition: hashjoin.h:360
HashMemoryChunk current_chunk
Definition: hashjoin.h:358
bool * hashStrict
Definition: hashjoin.h:342
Size spaceAllowedSkew
Definition: hashjoin.h:349
int * skewBucketNums
Definition: hashjoin.h:311
BufFile ** innerBatchFile
Definition: hashjoin.h:332
int log2_nbuckets_optimal
Definition: hashjoin.h:294
dsa_pointer_atomic * shared
Definition: hashjoin.h:302
dsa_area * area
Definition: hashjoin.h:359
BufFile ** outerBatchFile
Definition: hashjoin.h:333
FmgrInfo * inner_hashfunctions
Definition: hashjoin.h:341
dsa_pointer current_chunk_shared
Definition: hashjoin.h:362
MemoryContext batchCxt
Definition: hashjoin.h:352
double skewTuples
Definition: hashjoin.h:323
HashSkewBucket ** skewBucket
Definition: hashjoin.h:308
union HashJoinTupleData::@95 next
dsa_pointer shared
Definition: hashjoin.h:73
uint32 hashvalue
Definition: hashjoin.h:75
struct HashJoinTupleData * unshared
Definition: hashjoin.h:72
union HashMemoryChunkData::@96 next
struct HashMemoryChunkData * unshared
Definition: hashjoin.h:126
dsa_pointer shared
Definition: hashjoin.h:127
HashJoinTuple tuples
Definition: hashjoin.h:105
uint32 hashvalue
Definition: hashjoin.h:104
struct ParallelHashJoinState * parallel_state
Definition: execnodes.h:2667
HashJoinTable hashtable
Definition: execnodes.h:2648
List * hashkeys
Definition: execnodes.h:2649
SharedHashInfo * shared_info
Definition: execnodes.h:2657
PlanState ps
Definition: execnodes.h:2647
HashInstrumentation * hinstrument
Definition: execnodes.h:2664
AttrNumber skewColumn
Definition: plannodes.h:1208
List * hashkeys
Definition: plannodes.h:1206
Oid skewTable
Definition: plannodes.h:1207
bool skewInherit
Definition: plannodes.h:1209
Cardinality rows_total
Definition: plannodes.h:1211
Plan plan
Definition: plannodes.h:1200
Definition: pg_list.h:54
Definition: nodes.h:129
shm_toc_estimator estimator
Definition: parallel.h:42
shm_toc * toc
Definition: parallel.h:45
SharedTuplestoreAccessor * outer_tuples
Definition: hashjoin.h:210
ParallelHashJoinBatch * shared
Definition: hashjoin.h:198
SharedTuplestoreAccessor * inner_tuples
Definition: hashjoin.h:209
dsa_pointer chunks
Definition: hashjoin.h:156
dsa_pointer buckets
Definition: hashjoin.h:153
Barrier grow_batches_barrier
Definition: hashjoin.h:250
dsa_pointer old_batches
Definition: hashjoin.h:238
dsa_pointer chunk_work_queue
Definition: hashjoin.h:243
Barrier grow_buckets_barrier
Definition: hashjoin.h:251
ParallelHashGrowth growth
Definition: hashjoin.h:242
SharedFileSet fileset
Definition: hashjoin.h:254
dsa_pointer batches
Definition: hashjoin.h:237
Instrumentation * instrument
Definition: execnodes.h:1045
Plan * plan
Definition: execnodes.h:1035
EState * state
Definition: execnodes.h:1037
ExprContext * ps_ExprContext
Definition: execnodes.h:1074
ProjectionInfo * ps_ProjInfo
Definition: execnodes.h:1075
ExecProcNodeMtd ExecProcNode
Definition: execnodes.h:1041
bool parallel_aware
Definition: plannodes.h:144
List * qual
Definition: plannodes.h:157
int plan_width
Definition: plannodes.h:139
Cardinality plan_rows
Definition: plannodes.h:138
int plan_node_id
Definition: plannodes.h:155
HashInstrumentation hinstrument[FLEXIBLE_ARRAY_MEMBER]
Definition: execnodes.h:2638
Definition: regguts.h:318
void ReleaseSysCache(HeapTuple tuple)
Definition: syscache.c:866
HeapTuple SearchSysCache3(int cacheId, Datum key1, Datum key2, Datum key3)
Definition: syscache.c:840
@ STATRELATTINH
Definition: syscache.h:97
#define TupIsNull(slot)
Definition: tuptable.h:300
@ WAIT_EVENT_HASH_GROW_BUCKETS_REINSERT
Definition: wait_event.h:108
@ WAIT_EVENT_HASH_GROW_BUCKETS_REALLOCATE
Definition: wait_event.h:107
@ WAIT_EVENT_HASH_BUILD_ELECT
Definition: wait_event.h:98
@ WAIT_EVENT_HASH_BUILD_HASH_INNER
Definition: wait_event.h:99
@ WAIT_EVENT_HASH_GROW_BATCHES_DECIDE
Definition: wait_event.h:101
@ WAIT_EVENT_HASH_GROW_BATCHES_FINISH
Definition: wait_event.h:103
@ WAIT_EVENT_HASH_GROW_BUCKETS_ELECT
Definition: wait_event.h:106
@ WAIT_EVENT_HASH_GROW_BATCHES_REALLOCATE
Definition: wait_event.h:104
@ WAIT_EVENT_HASH_GROW_BATCHES_REPARTITION
Definition: wait_event.h:105
@ WAIT_EVENT_HASH_BUILD_ALLOCATE
Definition: wait_event.h:97
@ WAIT_EVENT_HASH_GROW_BATCHES_ELECT
Definition: wait_event.h:102