PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
dynahash.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * dynahash.c
4  * dynamic hash tables
5  *
6  * dynahash.c supports both local-to-a-backend hash tables and hash tables in
7  * shared memory. For shared hash tables, it is the caller's responsibility
8  * to provide appropriate access interlocking. The simplest convention is
9  * that a single LWLock protects the whole hash table. Searches (HASH_FIND or
10  * hash_seq_search) need only shared lock, but any update requires exclusive
11  * lock. For heavily-used shared tables, the single-lock approach creates a
12  * concurrency bottleneck, so we also support "partitioned" locking wherein
13  * there are multiple LWLocks guarding distinct subsets of the table. To use
14  * a hash table in partitioned mode, the HASH_PARTITION flag must be given
15  * to hash_create. This prevents any attempt to split buckets on-the-fly.
16  * Therefore, each hash bucket chain operates independently, and no fields
17  * of the hash header change after init except nentries and freeList.
18  * A partitioned table uses spinlocks to guard changes of those fields.
19  * This lets any subset of the hash buckets be treated as a separately
20  * lockable partition. We expect callers to use the low-order bits of a
21  * lookup key's hash value as a partition number --- this will work because
22  * of the way calc_bucket() maps hash values to bucket numbers.
23  *
24  * For hash tables in shared memory, the memory allocator function should
25  * match malloc's semantics of returning NULL on failure. For hash tables
26  * in local memory, we typically use palloc() which will throw error on
27  * failure. The code in this file has to cope with both cases.
28  *
29  * dynahash.c provides support for these types of lookup keys:
30  *
31  * 1. Null-terminated C strings (truncated if necessary to fit in keysize),
32  * compared as though by strcmp(). This is the default behavior.
33  *
34  * 2. Arbitrary binary data of size keysize, compared as though by memcmp().
35  * (Caller must ensure there are no undefined padding bits in the keys!)
36  * This is selected by specifying HASH_BLOBS flag to hash_create.
37  *
38  * 3. More complex key behavior can be selected by specifying user-supplied
39  * hashing, comparison, and/or key-copying functions. At least a hashing
40  * function must be supplied; comparison defaults to memcmp() and key copying
41  * to memcpy() when a user-defined hashing function is selected.
42  *
43  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
44  * Portions Copyright (c) 1994, Regents of the University of California
45  *
46  *
47  * IDENTIFICATION
48  * src/backend/utils/hash/dynahash.c
49  *
50  *-------------------------------------------------------------------------
51  */
52 
53 /*
54  * Original comments:
55  *
56  * Dynamic hashing, after CACM April 1988 pp 446-457, by Per-Ake Larson.
57  * Coded into C, with minor code improvements, and with hsearch(3) interface,
58  * by ejp@ausmelb.oz, Jul 26, 1988: 13:16;
59  * also, hcreate/hdestroy routines added to simulate hsearch(3).
60  *
61  * These routines simulate hsearch(3) and family, with the important
62  * difference that the hash table is dynamic - can grow indefinitely
63  * beyond its original size (as supplied to hcreate()).
64  *
65  * Performance appears to be comparable to that of hsearch(3).
66  * The 'source-code' options referred to in hsearch(3)'s 'man' page
67  * are not implemented; otherwise functionality is identical.
68  *
69  * Compilation controls:
70  * HASH_DEBUG controls some informative traces, mainly for debugging.
71  * HASH_STATISTICS causes HashAccesses and HashCollisions to be maintained;
72  * when combined with HASH_DEBUG, these are displayed by hdestroy().
73  *
74  * Problems & fixes to ejp@ausmelb.oz. WARNING: relies on pre-processor
75  * concatenation property, in probably unnecessary code 'optimization'.
76  *
77  * Modified margo@postgres.berkeley.edu February 1990
78  * added multiple table interface
79  * Modified by sullivan@postgres.berkeley.edu April 1990
80  * changed ctl structure for shared memory
81  */
82 
83 #include "postgres.h"
84 
85 #include <limits.h>
86 
87 #include "access/xact.h"
88 #include "storage/shmem.h"
89 #include "storage/spin.h"
90 #include "utils/dynahash.h"
91 #include "utils/memutils.h"
92 
93 
94 /*
95  * Constants
96  *
97  * A hash table has a top-level "directory", each of whose entries points
98  * to a "segment" of ssize bucket headers. The maximum number of hash
99  * buckets is thus dsize * ssize (but dsize may be expansible). Of course,
100  * the number of records in the table can be larger, but we don't want a
101  * whole lot of records per bucket or performance goes down.
102  *
103  * In a hash table allocated in shared memory, the directory cannot be
104  * expanded because it must stay at a fixed address. The directory size
105  * should be selected using hash_select_dirsize (and you'd better have
106  * a good idea of the maximum number of entries!). For non-shared hash
107  * tables, the initial directory size can be left at the default.
108  */
109 #define DEF_SEGSIZE 256
110 #define DEF_SEGSIZE_SHIFT 8 /* must be log2(DEF_SEGSIZE) */
111 #define DEF_DIRSIZE 256
112 #define DEF_FFACTOR 1 /* default fill factor */
113 
114 /* Number of freelists to be used for a partitioned hash table. */
115 #define NUM_FREELISTS 32
116 
117 /* A hash bucket is a linked list of HASHELEMENTs */
119 
120 /* A hash segment is an array of bucket headers */
122 
123 /*
124  * Using array of FreeListData instead of separate arrays of mutexes, nentries
125  * and freeLists prevents, at least partially, sharing one cache line between
126  * different mutexes (see below).
127  */
128 typedef struct
129 {
130  slock_t mutex; /* spinlock */
131  long nentries; /* number of entries */
132  HASHELEMENT *freeList; /* list of free elements */
133 } FreeListData;
134 
135 /*
136  * Header structure for a hash table --- contains all changeable info
137  *
138  * In a shared-memory hash table, the HASHHDR is in shared memory, while
139  * each backend has a local HTAB struct. For a non-shared table, there isn't
140  * any functional difference between HASHHDR and HTAB, but we separate them
141  * anyway to share code between shared and non-shared tables.
142  */
143 struct HASHHDR
144 {
145  /*
146  * The freelist can become a point of contention on high-concurrency hash
147  * tables, so we use an array of freelist, each with its own mutex and
148  * nentries count, instead of just a single one.
149  *
150  * If hash table is not partitioned only freeList[0] is used and spinlocks
151  * are not used at all.
152  */
154 
155  /* These fields can change, but not in a partitioned table */
156  /* Also, dsize can't change in a shared table, even if unpartitioned */
157  long dsize; /* directory size */
158  long nsegs; /* number of allocated segments (<= dsize) */
159  uint32 max_bucket; /* ID of maximum bucket in use */
160  uint32 high_mask; /* mask to modulo into entire table */
161  uint32 low_mask; /* mask to modulo into lower half of table */
162 
163  /* These fields are fixed at hashtable creation */
164  Size keysize; /* hash key length in bytes */
165  Size entrysize; /* total user element size in bytes */
166  long num_partitions; /* # partitions (must be power of 2), or 0 */
167  long ffactor; /* target fill factor */
168  long max_dsize; /* 'dsize' limit if directory is fixed size */
169  long ssize; /* segment size --- must be power of 2 */
170  int sshift; /* segment shift = log2(ssize) */
171  int nelem_alloc; /* number of entries to allocate at once */
172 
173 #ifdef HASH_STATISTICS
174 
175  /*
176  * Count statistics here. NB: stats code doesn't bother with mutex, so
177  * counts could be corrupted a bit in a partitioned table.
178  */
179  long accesses;
180  long collisions;
181 #endif
182 };
183 
184 #define IS_PARTITIONED(hctl) ((hctl)->num_partitions != 0)
185 
186 #define FREELIST_IDX(hctl, hashcode) \
187  (IS_PARTITIONED(hctl) ? hashcode % NUM_FREELISTS : 0)
188 
189 /*
190  * Top control structure for a hashtable --- in a shared table, each backend
191  * has its own copy (OK since no fields change at runtime)
192  */
193 struct HTAB
194 {
195  HASHHDR *hctl; /* => shared control information */
196  HASHSEGMENT *dir; /* directory of segment starts */
197  HashValueFunc hash; /* hash function */
198  HashCompareFunc match; /* key comparison function */
199  HashCopyFunc keycopy; /* key copying function */
200  HashAllocFunc alloc; /* memory allocator */
201  MemoryContext hcxt; /* memory context if default allocator used */
202  char *tabname; /* table name (for error messages) */
203  bool isshared; /* true if table is in shared memory */
204  bool isfixed; /* if true, don't enlarge */
205 
206  /* freezing a shared table isn't allowed, so we can keep state here */
207  bool frozen; /* true = no more inserts allowed */
208 
209  /* We keep local copies of these fixed values to reduce contention */
210  Size keysize; /* hash key length in bytes */
211  long ssize; /* segment size --- must be power of 2 */
212  int sshift; /* segment shift = log2(ssize) */
213 };
214 
215 /*
216  * Key (also entry) part of a HASHELEMENT
217  */
218 #define ELEMENTKEY(helem) (((char *)(helem)) + MAXALIGN(sizeof(HASHELEMENT)))
219 
220 /*
221  * Obtain element pointer given pointer to key
222  */
223 #define ELEMENT_FROM_KEY(key) \
224  ((HASHELEMENT *) (((char *) (key)) - MAXALIGN(sizeof(HASHELEMENT))))
225 
226 /*
227  * Fast MOD arithmetic, assuming that y is a power of 2 !
228  */
229 #define MOD(x,y) ((x) & ((y)-1))
230 
231 #if HASH_STATISTICS
232 static long hash_accesses,
233  hash_collisions,
234  hash_expansions;
235 #endif
236 
237 /*
238  * Private function prototypes
239  */
240 static void *DynaHashAlloc(Size size);
241 static HASHSEGMENT seg_alloc(HTAB *hashp);
242 static bool element_alloc(HTAB *hashp, int nelem, int freelist_idx);
243 static bool dir_realloc(HTAB *hashp);
244 static bool expand_table(HTAB *hashp);
245 static HASHBUCKET get_hash_entry(HTAB *hashp, int freelist_idx);
246 static void hdefault(HTAB *hashp);
247 static int choose_nelem_alloc(Size entrysize);
248 static bool init_htab(HTAB *hashp, long nelem);
249 static void hash_corrupted(HTAB *hashp);
250 static long next_pow2_long(long num);
251 static int next_pow2_int(long num);
252 static void register_seq_scan(HTAB *hashp);
253 static void deregister_seq_scan(HTAB *hashp);
254 static bool has_seq_scans(HTAB *hashp);
255 
256 
257 /*
258  * memory allocation support
259  */
261 
262 static void *
264 {
265  Assert(MemoryContextIsValid(CurrentDynaHashCxt));
266  return MemoryContextAlloc(CurrentDynaHashCxt, size);
267 }
268 
269 
270 /*
271  * HashCompareFunc for string keys
272  *
273  * Because we copy keys with strlcpy(), they will be truncated at keysize-1
274  * bytes, so we can only compare that many ... hence strncmp is almost but
275  * not quite the right thing.
276  */
277 static int
278 string_compare(const char *key1, const char *key2, Size keysize)
279 {
280  return strncmp(key1, key2, keysize - 1);
281 }
282 
283 
284 /************************** CREATE ROUTINES **********************/
285 
286 /*
287  * hash_create -- create a new dynamic hash table
288  *
289  * tabname: a name for the table (for debugging purposes)
290  * nelem: maximum number of elements expected
291  * *info: additional table parameters, as indicated by flags
292  * flags: bitmask indicating which parameters to take from *info
293  *
294  * Note: for a shared-memory hashtable, nelem needs to be a pretty good
295  * estimate, since we can't expand the table on the fly. But an unshared
296  * hashtable can be expanded on-the-fly, so it's better for nelem to be
297  * on the small side and let the table grow if it's exceeded. An overly
298  * large nelem will penalize hash_seq_search speed without buying much.
299  */
300 HTAB *
301 hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
302 {
303  HTAB *hashp;
304  HASHHDR *hctl;
305 
306  /*
307  * For shared hash tables, we have a local hash header (HTAB struct) that
308  * we allocate in TopMemoryContext; all else is in shared memory.
309  *
310  * For non-shared hash tables, everything including the hash header is in
311  * a memory context created specially for the hash table --- this makes
312  * hash_destroy very simple. The memory context is made a child of either
313  * a context specified by the caller, or TopMemoryContext if nothing is
314  * specified.
315  */
316  if (flags & HASH_SHARED_MEM)
317  {
318  /* Set up to allocate the hash header */
319  CurrentDynaHashCxt = TopMemoryContext;
320  }
321  else
322  {
323  /* Create the hash table's private memory context */
324  if (flags & HASH_CONTEXT)
325  CurrentDynaHashCxt = info->hcxt;
326  else
327  CurrentDynaHashCxt = TopMemoryContext;
328  CurrentDynaHashCxt = AllocSetContextCreate(CurrentDynaHashCxt,
329  tabname,
331  }
332 
333  /* Initialize the hash header, plus a copy of the table name */
334  hashp = (HTAB *) DynaHashAlloc(sizeof(HTAB) + strlen(tabname) + 1);
335  MemSet(hashp, 0, sizeof(HTAB));
336 
337  hashp->tabname = (char *) (hashp + 1);
338  strcpy(hashp->tabname, tabname);
339 
340  /*
341  * Select the appropriate hash function (see comments at head of file).
342  */
343  if (flags & HASH_FUNCTION)
344  hashp->hash = info->hash;
345  else if (flags & HASH_BLOBS)
346  {
347  /* We can optimize hashing for common key sizes */
348  Assert(flags & HASH_ELEM);
349  if (info->keysize == sizeof(uint32))
350  hashp->hash = uint32_hash;
351  else
352  hashp->hash = tag_hash;
353  }
354  else
355  hashp->hash = string_hash; /* default hash function */
356 
357  /*
358  * If you don't specify a match function, it defaults to string_compare if
359  * you used string_hash (either explicitly or by default) and to memcmp
360  * otherwise.
361  *
362  * Note: explicitly specifying string_hash is deprecated, because this
363  * might not work for callers in loadable modules on some platforms due to
364  * referencing a trampoline instead of the string_hash function proper.
365  * Just let it default, eh?
366  */
367  if (flags & HASH_COMPARE)
368  hashp->match = info->match;
369  else if (hashp->hash == string_hash)
371  else
372  hashp->match = memcmp;
373 
374  /*
375  * Similarly, the key-copying function defaults to strlcpy or memcpy.
376  */
377  if (flags & HASH_KEYCOPY)
378  hashp->keycopy = info->keycopy;
379  else if (hashp->hash == string_hash)
380  hashp->keycopy = (HashCopyFunc) strlcpy;
381  else
382  hashp->keycopy = memcpy;
383 
384  /* And select the entry allocation function, too. */
385  if (flags & HASH_ALLOC)
386  hashp->alloc = info->alloc;
387  else
388  hashp->alloc = DynaHashAlloc;
389 
390  if (flags & HASH_SHARED_MEM)
391  {
392  /*
393  * ctl structure and directory are preallocated for shared memory
394  * tables. Note that HASH_DIRSIZE and HASH_ALLOC had better be set as
395  * well.
396  */
397  hashp->hctl = info->hctl;
398  hashp->dir = (HASHSEGMENT *) (((char *) info->hctl) + sizeof(HASHHDR));
399  hashp->hcxt = NULL;
400  hashp->isshared = true;
401 
402  /* hash table already exists, we're just attaching to it */
403  if (flags & HASH_ATTACH)
404  {
405  /* make local copies of some heavily-used values */
406  hctl = hashp->hctl;
407  hashp->keysize = hctl->keysize;
408  hashp->ssize = hctl->ssize;
409  hashp->sshift = hctl->sshift;
410 
411  return hashp;
412  }
413  }
414  else
415  {
416  /* setup hash table defaults */
417  hashp->hctl = NULL;
418  hashp->dir = NULL;
419  hashp->hcxt = CurrentDynaHashCxt;
420  hashp->isshared = false;
421  }
422 
423  if (!hashp->hctl)
424  {
425  hashp->hctl = (HASHHDR *) hashp->alloc(sizeof(HASHHDR));
426  if (!hashp->hctl)
427  ereport(ERROR,
428  (errcode(ERRCODE_OUT_OF_MEMORY),
429  errmsg("out of memory")));
430  }
431 
432  hashp->frozen = false;
433 
434  hdefault(hashp);
435 
436  hctl = hashp->hctl;
437 
438  if (flags & HASH_PARTITION)
439  {
440  /* Doesn't make sense to partition a local hash table */
441  Assert(flags & HASH_SHARED_MEM);
442 
443  /*
444  * The number of partitions had better be a power of 2. Also, it must
445  * be less than INT_MAX (see init_htab()), so call the int version of
446  * next_pow2.
447  */
449 
450  hctl->num_partitions = info->num_partitions;
451  }
452 
453  if (flags & HASH_SEGMENT)
454  {
455  hctl->ssize = info->ssize;
456  hctl->sshift = my_log2(info->ssize);
457  /* ssize had better be a power of 2 */
458  Assert(hctl->ssize == (1L << hctl->sshift));
459  }
460  if (flags & HASH_FFACTOR)
461  hctl->ffactor = info->ffactor;
462 
463  /*
464  * SHM hash tables have fixed directory size passed by the caller.
465  */
466  if (flags & HASH_DIRSIZE)
467  {
468  hctl->max_dsize = info->max_dsize;
469  hctl->dsize = info->dsize;
470  }
471 
472  /*
473  * hash table now allocates space for key and data but you have to say how
474  * much space to allocate
475  */
476  if (flags & HASH_ELEM)
477  {
478  Assert(info->entrysize >= info->keysize);
479  hctl->keysize = info->keysize;
480  hctl->entrysize = info->entrysize;
481  }
482 
483  /* make local copies of heavily-used constant fields */
484  hashp->keysize = hctl->keysize;
485  hashp->ssize = hctl->ssize;
486  hashp->sshift = hctl->sshift;
487 
488  /* Build the hash directory structure */
489  if (!init_htab(hashp, nelem))
490  elog(ERROR, "failed to initialize hash table \"%s\"", hashp->tabname);
491 
492  /*
493  * For a shared hash table, preallocate the requested number of elements.
494  * This reduces problems with run-time out-of-shared-memory conditions.
495  *
496  * For a non-shared hash table, preallocate the requested number of
497  * elements if it's less than our chosen nelem_alloc. This avoids wasting
498  * space if the caller correctly estimates a small table size.
499  */
500  if ((flags & HASH_SHARED_MEM) ||
501  nelem < hctl->nelem_alloc)
502  {
503  int i,
504  freelist_partitions,
505  nelem_alloc,
506  nelem_alloc_first;
507 
508  /*
509  * If hash table is partitioned all freeLists have equal number of
510  * elements. Otherwise only freeList[0] is used.
511  */
512  if (IS_PARTITIONED(hashp->hctl))
513  freelist_partitions = NUM_FREELISTS;
514  else
515  freelist_partitions = 1;
516 
517  nelem_alloc = nelem / freelist_partitions;
518  if (nelem_alloc == 0)
519  nelem_alloc = 1;
520 
521  /* Make sure all memory will be used */
522  if (nelem_alloc * freelist_partitions < nelem)
523  nelem_alloc_first =
524  nelem - nelem_alloc * (freelist_partitions - 1);
525  else
526  nelem_alloc_first = nelem_alloc;
527 
528  for (i = 0; i < freelist_partitions; i++)
529  {
530  int temp = (i == 0) ? nelem_alloc_first : nelem_alloc;
531 
532  if (!element_alloc(hashp, temp, i))
533  ereport(ERROR,
534  (errcode(ERRCODE_OUT_OF_MEMORY),
535  errmsg("out of memory")));
536  }
537  }
538 
539  if (flags & HASH_FIXED_SIZE)
540  hashp->isfixed = true;
541  return hashp;
542 }
543 
544 /*
545  * Set default HASHHDR parameters.
546  */
547 static void
548 hdefault(HTAB *hashp)
549 {
550  HASHHDR *hctl = hashp->hctl;
551 
552  MemSet(hctl, 0, sizeof(HASHHDR));
553 
554  hctl->dsize = DEF_DIRSIZE;
555  hctl->nsegs = 0;
556 
557  /* rather pointless defaults for key & entry size */
558  hctl->keysize = sizeof(char *);
559  hctl->entrysize = 2 * sizeof(char *);
560 
561  hctl->num_partitions = 0; /* not partitioned */
562 
563  hctl->ffactor = DEF_FFACTOR;
564 
565  /* table has no fixed maximum size */
566  hctl->max_dsize = NO_MAX_DSIZE;
567 
568  hctl->ssize = DEF_SEGSIZE;
569  hctl->sshift = DEF_SEGSIZE_SHIFT;
570 
571 #ifdef HASH_STATISTICS
572  hctl->accesses = hctl->collisions = 0;
573 #endif
574 }
575 
576 /*
577  * Given the user-specified entry size, choose nelem_alloc, ie, how many
578  * elements to add to the hash table when we need more.
579  */
580 static int
582 {
583  int nelem_alloc;
584  Size elementSize;
585  Size allocSize;
586 
587  /* Each element has a HASHELEMENT header plus user data. */
588  /* NB: this had better match element_alloc() */
589  elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(entrysize);
590 
591  /*
592  * The idea here is to choose nelem_alloc at least 32, but round up so
593  * that the allocation request will be a power of 2 or just less. This
594  * makes little difference for hash tables in shared memory, but for hash
595  * tables managed by palloc, the allocation request will be rounded up to
596  * a power of 2 anyway. If we fail to take this into account, we'll waste
597  * as much as half the allocated space.
598  */
599  allocSize = 32 * 4; /* assume elementSize at least 8 */
600  do
601  {
602  allocSize <<= 1;
603  nelem_alloc = allocSize / elementSize;
604  } while (nelem_alloc < 32);
605 
606  return nelem_alloc;
607 }
608 
609 /*
610  * Compute derived fields of hctl and build the initial directory/segment
611  * arrays
612  */
613 static bool
614 init_htab(HTAB *hashp, long nelem)
615 {
616  HASHHDR *hctl = hashp->hctl;
617  HASHSEGMENT *segp;
618  int nbuckets;
619  int nsegs;
620  int i;
621 
622  /*
623  * initialize mutex if it's a partitioned table
624  */
625  if (IS_PARTITIONED(hctl))
626  for (i = 0; i < NUM_FREELISTS; i++)
627  SpinLockInit(&(hctl->freeList[i].mutex));
628 
629  /*
630  * Divide number of elements by the fill factor to determine a desired
631  * number of buckets. Allocate space for the next greater power of two
632  * number of buckets
633  */
634  nbuckets = next_pow2_int((nelem - 1) / hctl->ffactor + 1);
635 
636  /*
637  * In a partitioned table, nbuckets must be at least equal to
638  * num_partitions; were it less, keys with apparently different partition
639  * numbers would map to the same bucket, breaking partition independence.
640  * (Normally nbuckets will be much bigger; this is just a safety check.)
641  */
642  while (nbuckets < hctl->num_partitions)
643  nbuckets <<= 1;
644 
645  hctl->max_bucket = hctl->low_mask = nbuckets - 1;
646  hctl->high_mask = (nbuckets << 1) - 1;
647 
648  /*
649  * Figure number of directory segments needed, round up to a power of 2
650  */
651  nsegs = (nbuckets - 1) / hctl->ssize + 1;
652  nsegs = next_pow2_int(nsegs);
653 
654  /*
655  * Make sure directory is big enough. If pre-allocated directory is too
656  * small, choke (caller screwed up).
657  */
658  if (nsegs > hctl->dsize)
659  {
660  if (!(hashp->dir))
661  hctl->dsize = nsegs;
662  else
663  return false;
664  }
665 
666  /* Allocate a directory */
667  if (!(hashp->dir))
668  {
669  CurrentDynaHashCxt = hashp->hcxt;
670  hashp->dir = (HASHSEGMENT *)
671  hashp->alloc(hctl->dsize * sizeof(HASHSEGMENT));
672  if (!hashp->dir)
673  return false;
674  }
675 
676  /* Allocate initial segments */
677  for (segp = hashp->dir; hctl->nsegs < nsegs; hctl->nsegs++, segp++)
678  {
679  *segp = seg_alloc(hashp);
680  if (*segp == NULL)
681  return false;
682  }
683 
684  /* Choose number of entries to allocate at a time */
686 
687 #if HASH_DEBUG
688  fprintf(stderr, "init_htab:\n%s%p\n%s%ld\n%s%ld\n%s%d\n%s%ld\n%s%u\n%s%x\n%s%x\n%s%ld\n%s%ld\n",
689  "TABLE POINTER ", hashp,
690  "DIRECTORY SIZE ", hctl->dsize,
691  "SEGMENT SIZE ", hctl->ssize,
692  "SEGMENT SHIFT ", hctl->sshift,
693  "FILL FACTOR ", hctl->ffactor,
694  "MAX BUCKET ", hctl->max_bucket,
695  "HIGH MASK ", hctl->high_mask,
696  "LOW MASK ", hctl->low_mask,
697  "NSEGS ", hctl->nsegs,
698  "NENTRIES ", hash_get_num_entries(hctl));
699 #endif
700  return true;
701 }
702 
703 /*
704  * Estimate the space needed for a hashtable containing the given number
705  * of entries of given size.
706  * NOTE: this is used to estimate the footprint of hashtables in shared
707  * memory; therefore it does not count HTAB which is in local memory.
708  * NB: assumes that all hash structure parameters have default values!
709  */
710 Size
711 hash_estimate_size(long num_entries, Size entrysize)
712 {
713  Size size;
714  long nBuckets,
715  nSegments,
716  nDirEntries,
717  nElementAllocs,
718  elementSize,
719  elementAllocCnt;
720 
721  /* estimate number of buckets wanted */
722  nBuckets = next_pow2_long((num_entries - 1) / DEF_FFACTOR + 1);
723  /* # of segments needed for nBuckets */
724  nSegments = next_pow2_long((nBuckets - 1) / DEF_SEGSIZE + 1);
725  /* directory entries */
726  nDirEntries = DEF_DIRSIZE;
727  while (nDirEntries < nSegments)
728  nDirEntries <<= 1; /* dir_alloc doubles dsize at each call */
729 
730  /* fixed control info */
731  size = MAXALIGN(sizeof(HASHHDR)); /* but not HTAB, per above */
732  /* directory */
733  size = add_size(size, mul_size(nDirEntries, sizeof(HASHSEGMENT)));
734  /* segments */
735  size = add_size(size, mul_size(nSegments,
736  MAXALIGN(DEF_SEGSIZE * sizeof(HASHBUCKET))));
737  /* elements --- allocated in groups of choose_nelem_alloc() entries */
738  elementAllocCnt = choose_nelem_alloc(entrysize);
739  nElementAllocs = (num_entries - 1) / elementAllocCnt + 1;
740  elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(entrysize);
741  size = add_size(size,
742  mul_size(nElementAllocs,
743  mul_size(elementAllocCnt, elementSize)));
744 
745  return size;
746 }
747 
748 /*
749  * Select an appropriate directory size for a hashtable with the given
750  * maximum number of entries.
751  * This is only needed for hashtables in shared memory, whose directories
752  * cannot be expanded dynamically.
753  * NB: assumes that all hash structure parameters have default values!
754  *
755  * XXX this had better agree with the behavior of init_htab()...
756  */
757 long
758 hash_select_dirsize(long num_entries)
759 {
760  long nBuckets,
761  nSegments,
762  nDirEntries;
763 
764  /* estimate number of buckets wanted */
765  nBuckets = next_pow2_long((num_entries - 1) / DEF_FFACTOR + 1);
766  /* # of segments needed for nBuckets */
767  nSegments = next_pow2_long((nBuckets - 1) / DEF_SEGSIZE + 1);
768  /* directory entries */
769  nDirEntries = DEF_DIRSIZE;
770  while (nDirEntries < nSegments)
771  nDirEntries <<= 1; /* dir_alloc doubles dsize at each call */
772 
773  return nDirEntries;
774 }
775 
776 /*
777  * Compute the required initial memory allocation for a shared-memory
778  * hashtable with the given parameters. We need space for the HASHHDR
779  * and for the (non expansible) directory.
780  */
781 Size
782 hash_get_shared_size(HASHCTL *info, int flags)
783 {
784  Assert(flags & HASH_DIRSIZE);
785  Assert(info->dsize == info->max_dsize);
786  return sizeof(HASHHDR) + info->dsize * sizeof(HASHSEGMENT);
787 }
788 
789 
790 /********************** DESTROY ROUTINES ************************/
791 
792 void
794 {
795  if (hashp != NULL)
796  {
797  /* allocation method must be one we know how to free, too */
798  Assert(hashp->alloc == DynaHashAlloc);
799  /* so this hashtable must have it's own context */
800  Assert(hashp->hcxt != NULL);
801 
802  hash_stats("destroy", hashp);
803 
804  /*
805  * Free everything by destroying the hash table's memory context.
806  */
807  MemoryContextDelete(hashp->hcxt);
808  }
809 }
810 
811 void
812 hash_stats(const char *where, HTAB *hashp)
813 {
814 #if HASH_STATISTICS
815  fprintf(stderr, "%s: this HTAB -- accesses %ld collisions %ld\n",
816  where, hashp->hctl->accesses, hashp->hctl->collisions);
817 
818  fprintf(stderr, "hash_stats: entries %ld keysize %ld maxp %u segmentcount %ld\n",
819  hash_get_num_entries(hashp), (long) hashp->hctl->keysize,
820  hashp->hctl->max_bucket, hashp->hctl->nsegs);
821  fprintf(stderr, "%s: total accesses %ld total collisions %ld\n",
822  where, hash_accesses, hash_collisions);
823  fprintf(stderr, "hash_stats: total expansions %ld\n",
824  hash_expansions);
825 #endif
826 }
827 
828 /*******************************SEARCH ROUTINES *****************************/
829 
830 
831 /*
832  * get_hash_value -- exported routine to calculate a key's hash value
833  *
834  * We export this because for partitioned tables, callers need to compute
835  * the partition number (from the low-order bits of the hash value) before
836  * searching.
837  */
838 uint32
839 get_hash_value(HTAB *hashp, const void *keyPtr)
840 {
841  return hashp->hash(keyPtr, hashp->keysize);
842 }
843 
844 /* Convert a hash value to a bucket number */
845 static inline uint32
846 calc_bucket(HASHHDR *hctl, uint32 hash_val)
847 {
848  uint32 bucket;
849 
850  bucket = hash_val & hctl->high_mask;
851  if (bucket > hctl->max_bucket)
852  bucket = bucket & hctl->low_mask;
853 
854  return bucket;
855 }
856 
857 /*
858  * hash_search -- look up key in table and perform action
859  * hash_search_with_hash_value -- same, with key's hash value already computed
860  *
861  * action is one of:
862  * HASH_FIND: look up key in table
863  * HASH_ENTER: look up key in table, creating entry if not present
864  * HASH_ENTER_NULL: same, but return NULL if out of memory
865  * HASH_REMOVE: look up key in table, remove entry if present
866  *
867  * Return value is a pointer to the element found/entered/removed if any,
868  * or NULL if no match was found. (NB: in the case of the REMOVE action,
869  * the result is a dangling pointer that shouldn't be dereferenced!)
870  *
871  * HASH_ENTER will normally ereport a generic "out of memory" error if
872  * it is unable to create a new entry. The HASH_ENTER_NULL operation is
873  * the same except it will return NULL if out of memory. Note that
874  * HASH_ENTER_NULL cannot be used with the default palloc-based allocator,
875  * since palloc internally ereports on out-of-memory.
876  *
877  * If foundPtr isn't NULL, then *foundPtr is set TRUE if we found an
878  * existing entry in the table, FALSE otherwise. This is needed in the
879  * HASH_ENTER case, but is redundant with the return value otherwise.
880  *
881  * For hash_search_with_hash_value, the hashvalue parameter must have been
882  * calculated with get_hash_value().
883  */
884 void *
886  const void *keyPtr,
887  HASHACTION action,
888  bool *foundPtr)
889 {
890  return hash_search_with_hash_value(hashp,
891  keyPtr,
892  hashp->hash(keyPtr, hashp->keysize),
893  action,
894  foundPtr);
895 }
896 
897 void *
899  const void *keyPtr,
900  uint32 hashvalue,
901  HASHACTION action,
902  bool *foundPtr)
903 {
904  HASHHDR *hctl = hashp->hctl;
905  Size keysize;
906  uint32 bucket;
907  long segment_num;
908  long segment_ndx;
909  HASHSEGMENT segp;
910  HASHBUCKET currBucket;
911  HASHBUCKET *prevBucketPtr;
912  HashCompareFunc match;
913  int freelist_idx = FREELIST_IDX(hctl, hashvalue);
914 
915 #if HASH_STATISTICS
916  hash_accesses++;
917  hctl->accesses++;
918 #endif
919 
920  /*
921  * If inserting, check if it is time to split a bucket.
922  *
923  * NOTE: failure to expand table is not a fatal error, it just means we
924  * have to run at higher fill factor than we wanted. However, if we're
925  * using the palloc allocator then it will throw error anyway on
926  * out-of-memory, so we must do this before modifying the table.
927  */
928  if (action == HASH_ENTER || action == HASH_ENTER_NULL)
929  {
930  /*
931  * Can't split if running in partitioned mode, nor if frozen, nor if
932  * table is the subject of any active hash_seq_search scans. Strange
933  * order of these tests is to try to check cheaper conditions first.
934  */
935  if (!IS_PARTITIONED(hctl) && !hashp->frozen &&
936  hctl->freeList[0].nentries / (long) (hctl->max_bucket + 1) >= hctl->ffactor &&
937  !has_seq_scans(hashp))
938  (void) expand_table(hashp);
939  }
940 
941  /*
942  * Do the initial lookup
943  */
944  bucket = calc_bucket(hctl, hashvalue);
945 
946  segment_num = bucket >> hashp->sshift;
947  segment_ndx = MOD(bucket, hashp->ssize);
948 
949  segp = hashp->dir[segment_num];
950 
951  if (segp == NULL)
952  hash_corrupted(hashp);
953 
954  prevBucketPtr = &segp[segment_ndx];
955  currBucket = *prevBucketPtr;
956 
957  /*
958  * Follow collision chain looking for matching key
959  */
960  match = hashp->match; /* save one fetch in inner loop */
961  keysize = hashp->keysize; /* ditto */
962 
963  while (currBucket != NULL)
964  {
965  if (currBucket->hashvalue == hashvalue &&
966  match(ELEMENTKEY(currBucket), keyPtr, keysize) == 0)
967  break;
968  prevBucketPtr = &(currBucket->link);
969  currBucket = *prevBucketPtr;
970 #if HASH_STATISTICS
971  hash_collisions++;
972  hctl->collisions++;
973 #endif
974  }
975 
976  if (foundPtr)
977  *foundPtr = (bool) (currBucket != NULL);
978 
979  /*
980  * OK, now what?
981  */
982  switch (action)
983  {
984  case HASH_FIND:
985  if (currBucket != NULL)
986  return (void *) ELEMENTKEY(currBucket);
987  return NULL;
988 
989  case HASH_REMOVE:
990  if (currBucket != NULL)
991  {
992  /* if partitioned, must lock to touch nentries and freeList */
993  if (IS_PARTITIONED(hctl))
994  SpinLockAcquire(&(hctl->freeList[freelist_idx].mutex));
995 
996  Assert(hctl->freeList[freelist_idx].nentries > 0);
997  hctl->freeList[freelist_idx].nentries--;
998 
999  /* remove record from hash bucket's chain. */
1000  *prevBucketPtr = currBucket->link;
1001 
1002  /* add the record to the freelist for this table. */
1003  currBucket->link = hctl->freeList[freelist_idx].freeList;
1004  hctl->freeList[freelist_idx].freeList = currBucket;
1005 
1006  if (IS_PARTITIONED(hctl))
1007  SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1008 
1009  /*
1010  * better hope the caller is synchronizing access to this
1011  * element, because someone else is going to reuse it the next
1012  * time something is added to the table
1013  */
1014  return (void *) ELEMENTKEY(currBucket);
1015  }
1016  return NULL;
1017 
1018  case HASH_ENTER_NULL:
1019  /* ENTER_NULL does not work with palloc-based allocator */
1020  Assert(hashp->alloc != DynaHashAlloc);
1021  /* FALL THRU */
1022 
1023  case HASH_ENTER:
1024  /* Return existing element if found, else create one */
1025  if (currBucket != NULL)
1026  return (void *) ELEMENTKEY(currBucket);
1027 
1028  /* disallow inserts if frozen */
1029  if (hashp->frozen)
1030  elog(ERROR, "cannot insert into frozen hashtable \"%s\"",
1031  hashp->tabname);
1032 
1033  currBucket = get_hash_entry(hashp, freelist_idx);
1034  if (currBucket == NULL)
1035  {
1036  /* out of memory */
1037  if (action == HASH_ENTER_NULL)
1038  return NULL;
1039  /* report a generic message */
1040  if (hashp->isshared)
1041  ereport(ERROR,
1042  (errcode(ERRCODE_OUT_OF_MEMORY),
1043  errmsg("out of shared memory")));
1044  else
1045  ereport(ERROR,
1046  (errcode(ERRCODE_OUT_OF_MEMORY),
1047  errmsg("out of memory")));
1048  }
1049 
1050  /* link into hashbucket chain */
1051  *prevBucketPtr = currBucket;
1052  currBucket->link = NULL;
1053 
1054  /* copy key into record */
1055  currBucket->hashvalue = hashvalue;
1056  hashp->keycopy(ELEMENTKEY(currBucket), keyPtr, keysize);
1057 
1058  /*
1059  * Caller is expected to fill the data field on return. DO NOT
1060  * insert any code that could possibly throw error here, as doing
1061  * so would leave the table entry incomplete and hence corrupt the
1062  * caller's data structure.
1063  */
1064 
1065  return (void *) ELEMENTKEY(currBucket);
1066  }
1067 
1068  elog(ERROR, "unrecognized hash action code: %d", (int) action);
1069 
1070  return NULL; /* keep compiler quiet */
1071 }
1072 
1073 /*
1074  * hash_update_hash_key -- change the hash key of an existing table entry
1075  *
1076  * This is equivalent to removing the entry, making a new entry, and copying
1077  * over its data, except that the entry never goes to the table's freelist.
1078  * Therefore this cannot suffer an out-of-memory failure, even if there are
1079  * other processes operating in other partitions of the hashtable.
1080  *
1081  * Returns TRUE if successful, FALSE if the requested new hash key is already
1082  * present. Throws error if the specified entry pointer isn't actually a
1083  * table member.
1084  *
1085  * NB: currently, there is no special case for old and new hash keys being
1086  * identical, which means we'll report FALSE for that situation. This is
1087  * preferable for existing uses.
1088  *
1089  * NB: for a partitioned hashtable, caller must hold lock on both relevant
1090  * partitions, if the new hash key would belong to a different partition.
1091  */
1092 bool
1094  void *existingEntry,
1095  const void *newKeyPtr)
1096 {
1097  HASHELEMENT *existingElement = ELEMENT_FROM_KEY(existingEntry);
1098  HASHHDR *hctl = hashp->hctl;
1099  uint32 newhashvalue;
1100  Size keysize;
1101  uint32 bucket;
1102  uint32 newbucket;
1103  long segment_num;
1104  long segment_ndx;
1105  HASHSEGMENT segp;
1106  HASHBUCKET currBucket;
1107  HASHBUCKET *prevBucketPtr;
1108  HASHBUCKET *oldPrevPtr;
1109  HashCompareFunc match;
1110 
1111 #if HASH_STATISTICS
1112  hash_accesses++;
1113  hctl->accesses++;
1114 #endif
1115 
1116  /* disallow updates if frozen */
1117  if (hashp->frozen)
1118  elog(ERROR, "cannot update in frozen hashtable \"%s\"",
1119  hashp->tabname);
1120 
1121  /*
1122  * Lookup the existing element using its saved hash value. We need to do
1123  * this to be able to unlink it from its hash chain, but as a side benefit
1124  * we can verify the validity of the passed existingEntry pointer.
1125  */
1126  bucket = calc_bucket(hctl, existingElement->hashvalue);
1127 
1128  segment_num = bucket >> hashp->sshift;
1129  segment_ndx = MOD(bucket, hashp->ssize);
1130 
1131  segp = hashp->dir[segment_num];
1132 
1133  if (segp == NULL)
1134  hash_corrupted(hashp);
1135 
1136  prevBucketPtr = &segp[segment_ndx];
1137  currBucket = *prevBucketPtr;
1138 
1139  while (currBucket != NULL)
1140  {
1141  if (currBucket == existingElement)
1142  break;
1143  prevBucketPtr = &(currBucket->link);
1144  currBucket = *prevBucketPtr;
1145  }
1146 
1147  if (currBucket == NULL)
1148  elog(ERROR, "hash_update_hash_key argument is not in hashtable \"%s\"",
1149  hashp->tabname);
1150 
1151  oldPrevPtr = prevBucketPtr;
1152 
1153  /*
1154  * Now perform the equivalent of a HASH_ENTER operation to locate the hash
1155  * chain we want to put the entry into.
1156  */
1157  newhashvalue = hashp->hash(newKeyPtr, hashp->keysize);
1158 
1159  newbucket = calc_bucket(hctl, newhashvalue);
1160 
1161  segment_num = newbucket >> hashp->sshift;
1162  segment_ndx = MOD(newbucket, hashp->ssize);
1163 
1164  segp = hashp->dir[segment_num];
1165 
1166  if (segp == NULL)
1167  hash_corrupted(hashp);
1168 
1169  prevBucketPtr = &segp[segment_ndx];
1170  currBucket = *prevBucketPtr;
1171 
1172  /*
1173  * Follow collision chain looking for matching key
1174  */
1175  match = hashp->match; /* save one fetch in inner loop */
1176  keysize = hashp->keysize; /* ditto */
1177 
1178  while (currBucket != NULL)
1179  {
1180  if (currBucket->hashvalue == newhashvalue &&
1181  match(ELEMENTKEY(currBucket), newKeyPtr, keysize) == 0)
1182  break;
1183  prevBucketPtr = &(currBucket->link);
1184  currBucket = *prevBucketPtr;
1185 #if HASH_STATISTICS
1186  hash_collisions++;
1187  hctl->collisions++;
1188 #endif
1189  }
1190 
1191  if (currBucket != NULL)
1192  return false; /* collision with an existing entry */
1193 
1194  currBucket = existingElement;
1195 
1196  /*
1197  * If old and new hash values belong to the same bucket, we need not
1198  * change any chain links, and indeed should not since this simplistic
1199  * update will corrupt the list if currBucket is the last element. (We
1200  * cannot fall out earlier, however, since we need to scan the bucket to
1201  * check for duplicate keys.)
1202  */
1203  if (bucket != newbucket)
1204  {
1205  /* OK to remove record from old hash bucket's chain. */
1206  *oldPrevPtr = currBucket->link;
1207 
1208  /* link into new hashbucket chain */
1209  *prevBucketPtr = currBucket;
1210  currBucket->link = NULL;
1211  }
1212 
1213  /* copy new key into record */
1214  currBucket->hashvalue = newhashvalue;
1215  hashp->keycopy(ELEMENTKEY(currBucket), newKeyPtr, keysize);
1216 
1217  /* rest of record is untouched */
1218 
1219  return true;
1220 }
1221 
1222 /*
1223  * create a new entry if possible
1224  */
1225 static HASHBUCKET
1226 get_hash_entry(HTAB *hashp, int freelist_idx)
1227 {
1228  HASHHDR *hctl = hashp->hctl;
1229  HASHBUCKET newElement;
1230  int borrow_from_idx;
1231 
1232  for (;;)
1233  {
1234  /* if partitioned, must lock to touch nentries and freeList */
1235  if (IS_PARTITIONED(hctl))
1236  SpinLockAcquire(&hctl->freeList[freelist_idx].mutex);
1237 
1238  /* try to get an entry from the freelist */
1239  newElement = hctl->freeList[freelist_idx].freeList;
1240 
1241  if (newElement != NULL)
1242  break;
1243 
1244  if (IS_PARTITIONED(hctl))
1245  SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1246 
1247  /* no free elements. allocate another chunk of buckets */
1248  if (!element_alloc(hashp, hctl->nelem_alloc, freelist_idx))
1249  {
1250  if (!IS_PARTITIONED(hctl))
1251  return NULL; /* out of memory */
1252 
1253  /* try to borrow element from another partition */
1254  borrow_from_idx = freelist_idx;
1255  for (;;)
1256  {
1257  borrow_from_idx = (borrow_from_idx + 1) % NUM_FREELISTS;
1258  if (borrow_from_idx == freelist_idx)
1259  break;
1260 
1261  SpinLockAcquire(&(hctl->freeList[borrow_from_idx].mutex));
1262  newElement = hctl->freeList[borrow_from_idx].freeList;
1263 
1264  if (newElement != NULL)
1265  {
1266  hctl->freeList[borrow_from_idx].freeList = newElement->link;
1267  SpinLockRelease(&(hctl->freeList[borrow_from_idx].mutex));
1268 
1269  SpinLockAcquire(&hctl->freeList[freelist_idx].mutex);
1270  hctl->freeList[freelist_idx].nentries++;
1271  SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1272 
1273  break;
1274  }
1275 
1276  SpinLockRelease(&(hctl->freeList[borrow_from_idx].mutex));
1277  }
1278 
1279  return newElement;
1280  }
1281  }
1282 
1283  /* remove entry from freelist, bump nentries */
1284  hctl->freeList[freelist_idx].freeList = newElement->link;
1285  hctl->freeList[freelist_idx].nentries++;
1286 
1287  if (IS_PARTITIONED(hctl))
1288  SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1289 
1290  return newElement;
1291 }
1292 
1293 /*
1294  * hash_get_num_entries -- get the number of entries in a hashtable
1295  */
1296 long
1298 {
1299  int i;
1300  long sum = hashp->hctl->freeList[0].nentries;
1301 
1302  /*
1303  * We currently don't bother with the mutex; it's only sensible to call
1304  * this function if you've got lock on all partitions of the table.
1305  */
1306 
1307  if (!IS_PARTITIONED(hashp->hctl))
1308  return sum;
1309 
1310  for (i = 1; i < NUM_FREELISTS; i++)
1311  sum += hashp->hctl->freeList[i].nentries;
1312 
1313  return sum;
1314 }
1315 
1316 /*
1317  * hash_seq_init/_search/_term
1318  * Sequentially search through hash table and return
1319  * all the elements one by one, return NULL when no more.
1320  *
1321  * hash_seq_term should be called if and only if the scan is abandoned before
1322  * completion; if hash_seq_search returns NULL then it has already done the
1323  * end-of-scan cleanup.
1324  *
1325  * NOTE: caller may delete the returned element before continuing the scan.
1326  * However, deleting any other element while the scan is in progress is
1327  * UNDEFINED (it might be the one that curIndex is pointing at!). Also,
1328  * if elements are added to the table while the scan is in progress, it is
1329  * unspecified whether they will be visited by the scan or not.
1330  *
1331  * NOTE: it is possible to use hash_seq_init/hash_seq_search without any
1332  * worry about hash_seq_term cleanup, if the hashtable is first locked against
1333  * further insertions by calling hash_freeze.
1334  *
1335  * NOTE: to use this with a partitioned hashtable, caller had better hold
1336  * at least shared lock on all partitions of the table throughout the scan!
1337  * We can cope with insertions or deletions by our own backend, but *not*
1338  * with concurrent insertions or deletions by another.
1339  */
1340 void
1342 {
1343  status->hashp = hashp;
1344  status->curBucket = 0;
1345  status->curEntry = NULL;
1346  if (!hashp->frozen)
1347  register_seq_scan(hashp);
1348 }
1349 
1350 void *
1352 {
1353  HTAB *hashp;
1354  HASHHDR *hctl;
1355  uint32 max_bucket;
1356  long ssize;
1357  long segment_num;
1358  long segment_ndx;
1359  HASHSEGMENT segp;
1360  uint32 curBucket;
1361  HASHELEMENT *curElem;
1362 
1363  if ((curElem = status->curEntry) != NULL)
1364  {
1365  /* Continuing scan of curBucket... */
1366  status->curEntry = curElem->link;
1367  if (status->curEntry == NULL) /* end of this bucket */
1368  ++status->curBucket;
1369  return (void *) ELEMENTKEY(curElem);
1370  }
1371 
1372  /*
1373  * Search for next nonempty bucket starting at curBucket.
1374  */
1375  curBucket = status->curBucket;
1376  hashp = status->hashp;
1377  hctl = hashp->hctl;
1378  ssize = hashp->ssize;
1379  max_bucket = hctl->max_bucket;
1380 
1381  if (curBucket > max_bucket)
1382  {
1383  hash_seq_term(status);
1384  return NULL; /* search is done */
1385  }
1386 
1387  /*
1388  * first find the right segment in the table directory.
1389  */
1390  segment_num = curBucket >> hashp->sshift;
1391  segment_ndx = MOD(curBucket, ssize);
1392 
1393  segp = hashp->dir[segment_num];
1394 
1395  /*
1396  * Pick up the first item in this bucket's chain. If chain is not empty
1397  * we can begin searching it. Otherwise we have to advance to find the
1398  * next nonempty bucket. We try to optimize that case since searching a
1399  * near-empty hashtable has to iterate this loop a lot.
1400  */
1401  while ((curElem = segp[segment_ndx]) == NULL)
1402  {
1403  /* empty bucket, advance to next */
1404  if (++curBucket > max_bucket)
1405  {
1406  status->curBucket = curBucket;
1407  hash_seq_term(status);
1408  return NULL; /* search is done */
1409  }
1410  if (++segment_ndx >= ssize)
1411  {
1412  segment_num++;
1413  segment_ndx = 0;
1414  segp = hashp->dir[segment_num];
1415  }
1416  }
1417 
1418  /* Begin scan of curBucket... */
1419  status->curEntry = curElem->link;
1420  if (status->curEntry == NULL) /* end of this bucket */
1421  ++curBucket;
1422  status->curBucket = curBucket;
1423  return (void *) ELEMENTKEY(curElem);
1424 }
1425 
1426 void
1428 {
1429  if (!status->hashp->frozen)
1430  deregister_seq_scan(status->hashp);
1431 }
1432 
1433 /*
1434  * hash_freeze
1435  * Freeze a hashtable against future insertions (deletions are
1436  * still allowed)
1437  *
1438  * The reason for doing this is that by preventing any more bucket splits,
1439  * we no longer need to worry about registering hash_seq_search scans,
1440  * and thus caller need not be careful about ensuring hash_seq_term gets
1441  * called at the right times.
1442  *
1443  * Multiple calls to hash_freeze() are allowed, but you can't freeze a table
1444  * with active scans (since hash_seq_term would then do the wrong thing).
1445  */
1446 void
1448 {
1449  if (hashp->isshared)
1450  elog(ERROR, "cannot freeze shared hashtable \"%s\"", hashp->tabname);
1451  if (!hashp->frozen && has_seq_scans(hashp))
1452  elog(ERROR, "cannot freeze hashtable \"%s\" because it has active scans",
1453  hashp->tabname);
1454  hashp->frozen = true;
1455 }
1456 
1457 
1458 /********************************* UTILITIES ************************/
1459 
1460 /*
1461  * Expand the table by adding one more hash bucket.
1462  */
1463 static bool
1465 {
1466  HASHHDR *hctl = hashp->hctl;
1467  HASHSEGMENT old_seg,
1468  new_seg;
1469  long old_bucket,
1470  new_bucket;
1471  long new_segnum,
1472  new_segndx;
1473  long old_segnum,
1474  old_segndx;
1475  HASHBUCKET *oldlink,
1476  *newlink;
1477  HASHBUCKET currElement,
1478  nextElement;
1479 
1480  Assert(!IS_PARTITIONED(hctl));
1481 
1482 #ifdef HASH_STATISTICS
1483  hash_expansions++;
1484 #endif
1485 
1486  new_bucket = hctl->max_bucket + 1;
1487  new_segnum = new_bucket >> hashp->sshift;
1488  new_segndx = MOD(new_bucket, hashp->ssize);
1489 
1490  if (new_segnum >= hctl->nsegs)
1491  {
1492  /* Allocate new segment if necessary -- could fail if dir full */
1493  if (new_segnum >= hctl->dsize)
1494  if (!dir_realloc(hashp))
1495  return false;
1496  if (!(hashp->dir[new_segnum] = seg_alloc(hashp)))
1497  return false;
1498  hctl->nsegs++;
1499  }
1500 
1501  /* OK, we created a new bucket */
1502  hctl->max_bucket++;
1503 
1504  /*
1505  * *Before* changing masks, find old bucket corresponding to same hash
1506  * values; values in that bucket may need to be relocated to new bucket.
1507  * Note that new_bucket is certainly larger than low_mask at this point,
1508  * so we can skip the first step of the regular hash mask calc.
1509  */
1510  old_bucket = (new_bucket & hctl->low_mask);
1511 
1512  /*
1513  * If we crossed a power of 2, readjust masks.
1514  */
1515  if ((uint32) new_bucket > hctl->high_mask)
1516  {
1517  hctl->low_mask = hctl->high_mask;
1518  hctl->high_mask = (uint32) new_bucket | hctl->low_mask;
1519  }
1520 
1521  /*
1522  * Relocate records to the new bucket. NOTE: because of the way the hash
1523  * masking is done in calc_bucket, only one old bucket can need to be
1524  * split at this point. With a different way of reducing the hash value,
1525  * that might not be true!
1526  */
1527  old_segnum = old_bucket >> hashp->sshift;
1528  old_segndx = MOD(old_bucket, hashp->ssize);
1529 
1530  old_seg = hashp->dir[old_segnum];
1531  new_seg = hashp->dir[new_segnum];
1532 
1533  oldlink = &old_seg[old_segndx];
1534  newlink = &new_seg[new_segndx];
1535 
1536  for (currElement = *oldlink;
1537  currElement != NULL;
1538  currElement = nextElement)
1539  {
1540  nextElement = currElement->link;
1541  if ((long) calc_bucket(hctl, currElement->hashvalue) == old_bucket)
1542  {
1543  *oldlink = currElement;
1544  oldlink = &currElement->link;
1545  }
1546  else
1547  {
1548  *newlink = currElement;
1549  newlink = &currElement->link;
1550  }
1551  }
1552  /* don't forget to terminate the rebuilt hash chains... */
1553  *oldlink = NULL;
1554  *newlink = NULL;
1555 
1556  return true;
1557 }
1558 
1559 
1560 static bool
1562 {
1563  HASHSEGMENT *p;
1564  HASHSEGMENT *old_p;
1565  long new_dsize;
1566  long old_dirsize;
1567  long new_dirsize;
1568 
1569  if (hashp->hctl->max_dsize != NO_MAX_DSIZE)
1570  return false;
1571 
1572  /* Reallocate directory */
1573  new_dsize = hashp->hctl->dsize << 1;
1574  old_dirsize = hashp->hctl->dsize * sizeof(HASHSEGMENT);
1575  new_dirsize = new_dsize * sizeof(HASHSEGMENT);
1576 
1577  old_p = hashp->dir;
1578  CurrentDynaHashCxt = hashp->hcxt;
1579  p = (HASHSEGMENT *) hashp->alloc((Size) new_dirsize);
1580 
1581  if (p != NULL)
1582  {
1583  memcpy(p, old_p, old_dirsize);
1584  MemSet(((char *) p) + old_dirsize, 0, new_dirsize - old_dirsize);
1585  hashp->dir = p;
1586  hashp->hctl->dsize = new_dsize;
1587 
1588  /* XXX assume the allocator is palloc, so we know how to free */
1589  Assert(hashp->alloc == DynaHashAlloc);
1590  pfree(old_p);
1591 
1592  return true;
1593  }
1594 
1595  return false;
1596 }
1597 
1598 
1599 static HASHSEGMENT
1601 {
1602  HASHSEGMENT segp;
1603 
1604  CurrentDynaHashCxt = hashp->hcxt;
1605  segp = (HASHSEGMENT) hashp->alloc(sizeof(HASHBUCKET) * hashp->ssize);
1606 
1607  if (!segp)
1608  return NULL;
1609 
1610  MemSet(segp, 0, sizeof(HASHBUCKET) * hashp->ssize);
1611 
1612  return segp;
1613 }
1614 
1615 /*
1616  * allocate some new elements and link them into the indicated free list
1617  */
1618 static bool
1619 element_alloc(HTAB *hashp, int nelem, int freelist_idx)
1620 {
1621  HASHHDR *hctl = hashp->hctl;
1622  Size elementSize;
1623  HASHELEMENT *firstElement;
1624  HASHELEMENT *tmpElement;
1625  HASHELEMENT *prevElement;
1626  int i;
1627 
1628  if (hashp->isfixed)
1629  return false;
1630 
1631  /* Each element has a HASHELEMENT header plus user data. */
1632  elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(hctl->entrysize);
1633 
1634  CurrentDynaHashCxt = hashp->hcxt;
1635  firstElement = (HASHELEMENT *) hashp->alloc(nelem * elementSize);
1636 
1637  if (!firstElement)
1638  return false;
1639 
1640  /* prepare to link all the new entries into the freelist */
1641  prevElement = NULL;
1642  tmpElement = firstElement;
1643  for (i = 0; i < nelem; i++)
1644  {
1645  tmpElement->link = prevElement;
1646  prevElement = tmpElement;
1647  tmpElement = (HASHELEMENT *) (((char *) tmpElement) + elementSize);
1648  }
1649 
1650  /* if partitioned, must lock to touch freeList */
1651  if (IS_PARTITIONED(hctl))
1652  SpinLockAcquire(&hctl->freeList[freelist_idx].mutex);
1653 
1654  /* freelist could be nonempty if two backends did this concurrently */
1655  firstElement->link = hctl->freeList[freelist_idx].freeList;
1656  hctl->freeList[freelist_idx].freeList = prevElement;
1657 
1658  if (IS_PARTITIONED(hctl))
1659  SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1660 
1661  return true;
1662 }
1663 
1664 /* complain when we have detected a corrupted hashtable */
1665 static void
1667 {
1668  /*
1669  * If the corruption is in a shared hashtable, we'd better force a
1670  * systemwide restart. Otherwise, just shut down this one backend.
1671  */
1672  if (hashp->isshared)
1673  elog(PANIC, "hash table \"%s\" corrupted", hashp->tabname);
1674  else
1675  elog(FATAL, "hash table \"%s\" corrupted", hashp->tabname);
1676 }
1677 
1678 /* calculate ceil(log base 2) of num */
1679 int
1680 my_log2(long num)
1681 {
1682  int i;
1683  long limit;
1684 
1685  /* guard against too-large input, which would put us into infinite loop */
1686  if (num > LONG_MAX / 2)
1687  num = LONG_MAX / 2;
1688 
1689  for (i = 0, limit = 1; limit < num; i++, limit <<= 1)
1690  ;
1691  return i;
1692 }
1693 
1694 /* calculate first power of 2 >= num, bounded to what will fit in a long */
1695 static long
1697 {
1698  /* my_log2's internal range check is sufficient */
1699  return 1L << my_log2(num);
1700 }
1701 
1702 /* calculate first power of 2 >= num, bounded to what will fit in an int */
1703 static int
1704 next_pow2_int(long num)
1705 {
1706  if (num > INT_MAX / 2)
1707  num = INT_MAX / 2;
1708  return 1 << my_log2(num);
1709 }
1710 
1711 
1712 /************************* SEQ SCAN TRACKING ************************/
1713 
1714 /*
1715  * We track active hash_seq_search scans here. The need for this mechanism
1716  * comes from the fact that a scan will get confused if a bucket split occurs
1717  * while it's in progress: it might visit entries twice, or even miss some
1718  * entirely (if it's partway through the same bucket that splits). Hence
1719  * we want to inhibit bucket splits if there are any active scans on the
1720  * table being inserted into. This is a fairly rare case in current usage,
1721  * so just postponing the split until the next insertion seems sufficient.
1722  *
1723  * Given present usages of the function, only a few scans are likely to be
1724  * open concurrently; so a finite-size stack of open scans seems sufficient,
1725  * and we don't worry that linear search is too slow. Note that we do
1726  * allow multiple scans of the same hashtable to be open concurrently.
1727  *
1728  * This mechanism can support concurrent scan and insertion in a shared
1729  * hashtable if it's the same backend doing both. It would fail otherwise,
1730  * but locking reasons seem to preclude any such scenario anyway, so we don't
1731  * worry.
1732  *
1733  * This arrangement is reasonably robust if a transient hashtable is deleted
1734  * without notifying us. The absolute worst case is we might inhibit splits
1735  * in another table created later at exactly the same address. We will give
1736  * a warning at transaction end for reference leaks, so any bugs leading to
1737  * lack of notification should be easy to catch.
1738  */
1739 
1740 #define MAX_SEQ_SCANS 100
1741 
1742 static HTAB *seq_scan_tables[MAX_SEQ_SCANS]; /* tables being scanned */
1743 static int seq_scan_level[MAX_SEQ_SCANS]; /* subtransaction nest level */
1744 static int num_seq_scans = 0;
1745 
1746 
1747 /* Register a table as having an active hash_seq_search scan */
1748 static void
1750 {
1752  elog(ERROR, "too many active hash_seq_search scans, cannot start one on \"%s\"",
1753  hashp->tabname);
1754  seq_scan_tables[num_seq_scans] = hashp;
1756  num_seq_scans++;
1757 }
1758 
1759 /* Deregister an active scan */
1760 static void
1762 {
1763  int i;
1764 
1765  /* Search backward since it's most likely at the stack top */
1766  for (i = num_seq_scans - 1; i >= 0; i--)
1767  {
1768  if (seq_scan_tables[i] == hashp)
1769  {
1770  seq_scan_tables[i] = seq_scan_tables[num_seq_scans - 1];
1772  num_seq_scans--;
1773  return;
1774  }
1775  }
1776  elog(ERROR, "no hash_seq_search scan for hash table \"%s\"",
1777  hashp->tabname);
1778 }
1779 
1780 /* Check if a table has any active scan */
1781 static bool
1783 {
1784  int i;
1785 
1786  for (i = 0; i < num_seq_scans; i++)
1787  {
1788  if (seq_scan_tables[i] == hashp)
1789  return true;
1790  }
1791  return false;
1792 }
1793 
1794 /* Clean up any open scans at end of transaction */
1795 void
1796 AtEOXact_HashTables(bool isCommit)
1797 {
1798  /*
1799  * During abort cleanup, open scans are expected; just silently clean 'em
1800  * out. An open scan at commit means someone forgot a hash_seq_term()
1801  * call, so complain.
1802  *
1803  * Note: it's tempting to try to print the tabname here, but refrain for
1804  * fear of touching deallocated memory. This isn't a user-facing message
1805  * anyway, so it needn't be pretty.
1806  */
1807  if (isCommit)
1808  {
1809  int i;
1810 
1811  for (i = 0; i < num_seq_scans; i++)
1812  {
1813  elog(WARNING, "leaked hash_seq_search scan for hash table %p",
1814  seq_scan_tables[i]);
1815  }
1816  }
1817  num_seq_scans = 0;
1818 }
1819 
1820 /* Clean up any open scans at end of subtransaction */
1821 void
1822 AtEOSubXact_HashTables(bool isCommit, int nestDepth)
1823 {
1824  int i;
1825 
1826  /*
1827  * Search backward to make cleanup easy. Note we must check all entries,
1828  * not only those at the end of the array, because deletion technique
1829  * doesn't keep them in order.
1830  */
1831  for (i = num_seq_scans - 1; i >= 0; i--)
1832  {
1833  if (seq_scan_level[i] >= nestDepth)
1834  {
1835  if (isCommit)
1836  elog(WARNING, "leaked hash_seq_search scan for hash table %p",
1837  seq_scan_tables[i]);
1838  seq_scan_tables[i] = seq_scan_tables[num_seq_scans - 1];
1840  num_seq_scans--;
1841  }
1842  }
1843 }
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:898
static int seq_scan_level[MAX_SEQ_SCANS]
Definition: dynahash.c:1743
Size keysize
Definition: dynahash.c:164
int slock_t
Definition: s_lock.h:888
Size keysize
Definition: dynahash.c:210
long dsize
Definition: dynahash.c:157
uint32 curBucket
Definition: hsearch.h:115
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:793
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:200
#define HASH_CONTEXT
Definition: hsearch.h:93
#define HASH_ELEM
Definition: hsearch.h:87
uint32 max_bucket
Definition: dynahash.c:159
MemoryContext hcxt
Definition: hsearch.h:78
long ssize
Definition: hsearch.h:68
static long next_pow2_long(long num)
Definition: dynahash.c:1696
HashCopyFunc keycopy
Definition: hsearch.h:76
Size entrysize
Definition: dynahash.c:165
static uint32 calc_bucket(HASHHDR *hctl, uint32 hash_val)
Definition: dynahash.c:846
#define IS_PARTITIONED(hctl)
Definition: dynahash.c:184
uint32 string_hash(const void *key, Size keysize)
Definition: hashfn.c:34
#define SpinLockInit(lock)
Definition: spin.h:60
#define NO_MAX_DSIZE
Definition: hsearch.h:100
void *(* HashAllocFunc)(Size request)
Definition: hsearch.h:44
#define ELEMENTKEY(helem)
Definition: dynahash.c:218
long num_partitions
Definition: dynahash.c:166
void AtEOSubXact_HashTables(bool isCommit, int nestDepth)
Definition: dynahash.c:1822
Size entrysize
Definition: hsearch.h:73
#define NUM_FREELISTS
Definition: dynahash.c:115
void hash_stats(const char *where, HTAB *hashp)
Definition: dynahash.c:812
#define ELEMENT_FROM_KEY(key)
Definition: dynahash.c:223
int errcode(int sqlerrcode)
Definition: elog.c:575
HashAllocFunc alloc
Definition: dynahash.c:200
#define MemSet(start, val, len)
Definition: c.h:857
HASHELEMENT * curEntry
Definition: hsearch.h:116
uint32(* HashValueFunc)(const void *key, Size keysize)
Definition: hsearch.h:21
long hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1297
slock_t mutex
Definition: dynahash.c:130
uint32 low_mask
Definition: dynahash.c:161
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:885
#define HASH_SHARED_MEM
Definition: hsearch.h:94
static int string_compare(const char *key1, const char *key2, Size keysize)
Definition: dynahash.c:278
static HASHBUCKET get_hash_entry(HTAB *hashp, int freelist_idx)
Definition: dynahash.c:1226
long dsize
Definition: hsearch.h:69
#define PANIC
Definition: elog.h:53
uint32 uint32_hash(const void *key, Size keysize)
Definition: hashfn.c:64
#define HASH_PARTITION
Definition: hsearch.h:83
#define HASH_ATTACH
Definition: hsearch.h:95
char bool
Definition: c.h:202
long ffactor
Definition: dynahash.c:167
HashValueFunc hash
Definition: dynahash.c:197
#define SpinLockAcquire(lock)
Definition: spin.h:62
Definition: dynahash.c:193
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition: dynahash.c:839
long max_dsize
Definition: hsearch.h:70
void pfree(void *pointer)
Definition: mcxt.c:950
HASHBUCKET * HASHSEGMENT
Definition: dynahash.c:121
long ssize
Definition: dynahash.c:211
#define ERROR
Definition: elog.h:43
long num_partitions
Definition: hsearch.h:67
#define FATAL
Definition: elog.h:52
int sshift
Definition: dynahash.c:212
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:165
#define DEF_SEGSIZE_SHIFT
Definition: dynahash.c:110
#define MOD(x, y)
Definition: dynahash.c:229
HashAllocFunc alloc
Definition: hsearch.h:77
#define HASH_KEYCOPY
Definition: hsearch.h:91
long max_dsize
Definition: dynahash.c:168
static void hdefault(HTAB *hashp)
Definition: dynahash.c:548
HASHHDR * hctl
Definition: dynahash.c:195
static void hash_corrupted(HTAB *hashp)
Definition: dynahash.c:1666
static bool has_seq_scans(HTAB *hashp)
Definition: dynahash.c:1782
struct HASHHDR HASHHDR
Definition: hsearch.h:58
unsigned int uint32
Definition: c.h:268
HASHELEMENT * HASHBUCKET
Definition: dynahash.c:118
HTAB * hashp
Definition: hsearch.h:114
static bool init_htab(HTAB *hashp, long nelem)
Definition: dynahash.c:614
uint32 high_mask
Definition: dynahash.c:160
long nentries
Definition: dynahash.c:131
#define ereport(elevel, rest)
Definition: elog.h:122
MemoryContext TopMemoryContext
Definition: mcxt.c:43
int my_log2(long num)
Definition: dynahash.c:1680
#define WARNING
Definition: elog.h:40
HashCompareFunc match
Definition: dynahash.c:198
Size hash_estimate_size(long num_entries, Size entrysize)
Definition: dynahash.c:711
static int next_pow2_int(long num)
Definition: dynahash.c:1704
bool isshared
Definition: dynahash.c:203
#define SpinLockRelease(lock)
Definition: spin.h:64
#define MAX_SEQ_SCANS
Definition: dynahash.c:1740
#define HASH_BLOBS
Definition: hsearch.h:88
bool frozen
Definition: dynahash.c:207
Size mul_size(Size s1, Size s2)
Definition: shmem.c:492
MemoryContext AllocSetContextCreate(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition: aset.c:322
static int num_seq_scans
Definition: dynahash.c:1744
static HTAB * seq_scan_tables[MAX_SEQ_SCANS]
Definition: dynahash.c:1742
uint32 tag_hash(const void *key, Size keysize)
Definition: hashfn.c:52
HTAB * hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
Definition: dynahash.c:301
long ssize
Definition: dynahash.c:169
Size add_size(Size s1, Size s2)
Definition: shmem.c:475
FreeListData freeList[NUM_FREELISTS]
Definition: dynahash.c:153
static void deregister_seq_scan(HTAB *hashp)
Definition: dynahash.c:1761
Size keysize
Definition: hsearch.h:72
HashCompareFunc match
Definition: hsearch.h:75
MemoryContext hcxt
Definition: dynahash.c:201
long hash_select_dirsize(long num_entries)
Definition: dynahash.c:758
HashCopyFunc keycopy
Definition: dynahash.c:199
#define HASH_SEGMENT
Definition: hsearch.h:84
int GetCurrentTransactionNestLevel(void)
Definition: xact.c:761
size_t strlcpy(char *dst, const char *src, size_t siz)
Definition: strlcpy.c:45
#define MemoryContextIsValid(context)
Definition: memnodes.h:97
#define NULL
Definition: c.h:229
static void register_seq_scan(HTAB *hashp)
Definition: dynahash.c:1749
#define Assert(condition)
Definition: c.h:675
int nelem_alloc
Definition: dynahash.c:171
#define HASH_COMPARE
Definition: hsearch.h:90
size_t Size
Definition: c.h:356
void hash_freeze(HTAB *hashp)
Definition: dynahash.c:1447
static HASHSEGMENT seg_alloc(HTAB *hashp)
Definition: dynahash.c:1600
#define MAXALIGN(LEN)
Definition: c.h:588
Size hash_get_shared_size(HASHCTL *info, int flags)
Definition: dynahash.c:782
static bool expand_table(HTAB *hashp)
Definition: dynahash.c:1464
bool hash_update_hash_key(HTAB *hashp, void *existingEntry, const void *newKeyPtr)
Definition: dynahash.c:1093
HASHHDR * hctl
Definition: hsearch.h:79
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1351
long ffactor
Definition: hsearch.h:71
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1341
char * tabname
Definition: dynahash.c:202
#define HASH_FIXED_SIZE
Definition: hsearch.h:96
bool isfixed
Definition: dynahash.c:204
#define HASH_FFACTOR
Definition: hsearch.h:86
static void * DynaHashAlloc(Size size)
Definition: dynahash.c:263
int(* HashCompareFunc)(const void *key1, const void *key2, Size keysize)
Definition: hsearch.h:29
#define HASH_DIRSIZE
Definition: hsearch.h:85
int errmsg(const char *fmt,...)
Definition: elog.c:797
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:707
struct HASHELEMENT * link
Definition: hsearch.h:53
static int choose_nelem_alloc(Size entrysize)
Definition: dynahash.c:581
static MemoryContext CurrentDynaHashCxt
Definition: dynahash.c:260
int i
#define HASH_ALLOC
Definition: hsearch.h:92
HASHELEMENT * freeList
Definition: dynahash.c:132
HASHSEGMENT * dir
Definition: dynahash.c:196
#define elog
Definition: elog.h:219
static bool element_alloc(HTAB *hashp, int nelem, int freelist_idx)
Definition: dynahash.c:1619
#define FREELIST_IDX(hctl, hashcode)
Definition: dynahash.c:186
void AtEOXact_HashTables(bool isCommit)
Definition: dynahash.c:1796
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:224
int sshift
Definition: dynahash.c:170
static bool dir_realloc(HTAB *hashp)
Definition: dynahash.c:1561
#define DEF_FFACTOR
Definition: dynahash.c:112
void *(* HashCopyFunc)(void *dest, const void *src, Size keysize)
Definition: hsearch.h:37
#define DEF_SEGSIZE
Definition: dynahash.c:109
void hash_seq_term(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1427
uint32 hashvalue
Definition: hsearch.h:54
HASHACTION
Definition: hsearch.h:103
HashValueFunc hash
Definition: hsearch.h:74
#define HASH_FUNCTION
Definition: hsearch.h:89
#define DEF_DIRSIZE
Definition: dynahash.c:111
long nsegs
Definition: dynahash.c:158