PostgreSQL Source Code  git master
dynahash.c File Reference
#include "postgres.h"
#include <limits.h>
#include "access/xact.h"
#include "common/hashfn.h"
#include "port/pg_bitutils.h"
#include "storage/shmem.h"
#include "storage/spin.h"
#include "utils/dynahash.h"
#include "utils/memutils.h"
Include dependency graph for dynahash.c:

Go to the source code of this file.

Data Structures

struct  FreeListData
 
struct  HASHHDR
 
struct  HTAB
 

Macros

#define DEF_SEGSIZE   256
 
#define DEF_SEGSIZE_SHIFT   8 /* must be log2(DEF_SEGSIZE) */
 
#define DEF_DIRSIZE   256
 
#define NUM_FREELISTS   32
 
#define IS_PARTITIONED(hctl)   ((hctl)->num_partitions != 0)
 
#define FREELIST_IDX(hctl, hashcode)    (IS_PARTITIONED(hctl) ? (hashcode) % NUM_FREELISTS : 0)
 
#define ELEMENTKEY(helem)   (((char *)(helem)) + MAXALIGN(sizeof(HASHELEMENT)))
 
#define ELEMENT_FROM_KEY(key)    ((HASHELEMENT *) (((char *) (key)) - MAXALIGN(sizeof(HASHELEMENT))))
 
#define MOD(x, y)   ((x) & ((y)-1))
 
#define MAX_SEQ_SCANS   100
 

Typedefs

typedef HASHELEMENTHASHBUCKET
 
typedef HASHBUCKETHASHSEGMENT
 

Functions

static void * DynaHashAlloc (Size size)
 
static HASHSEGMENT seg_alloc (HTAB *hashp)
 
static bool element_alloc (HTAB *hashp, int nelem, int freelist_idx)
 
static bool dir_realloc (HTAB *hashp)
 
static bool expand_table (HTAB *hashp)
 
static HASHBUCKET get_hash_entry (HTAB *hashp, int freelist_idx)
 
static void hdefault (HTAB *hashp)
 
static int choose_nelem_alloc (Size entrysize)
 
static bool init_htab (HTAB *hashp, long nelem)
 
static void hash_corrupted (HTAB *hashp) pg_attribute_noreturn()
 
static uint32 hash_initial_lookup (HTAB *hashp, uint32 hashvalue, HASHBUCKET **bucketptr)
 
static long next_pow2_long (long num)
 
static int next_pow2_int (long num)
 
static void register_seq_scan (HTAB *hashp)
 
static void deregister_seq_scan (HTAB *hashp)
 
static bool has_seq_scans (HTAB *hashp)
 
static int string_compare (const char *key1, const char *key2, Size keysize)
 
HTABhash_create (const char *tabname, long nelem, const HASHCTL *info, int flags)
 
Size hash_estimate_size (long num_entries, Size entrysize)
 
long hash_select_dirsize (long num_entries)
 
Size hash_get_shared_size (HASHCTL *info, int flags)
 
void hash_destroy (HTAB *hashp)
 
void hash_stats (const char *where, HTAB *hashp)
 
uint32 get_hash_value (HTAB *hashp, const void *keyPtr)
 
static uint32 calc_bucket (HASHHDR *hctl, uint32 hash_val)
 
void * hash_search (HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
 
void * hash_search_with_hash_value (HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
 
bool hash_update_hash_key (HTAB *hashp, void *existingEntry, const void *newKeyPtr)
 
long hash_get_num_entries (HTAB *hashp)
 
void hash_seq_init (HASH_SEQ_STATUS *status, HTAB *hashp)
 
void * hash_seq_search (HASH_SEQ_STATUS *status)
 
void hash_seq_term (HASH_SEQ_STATUS *status)
 
void hash_freeze (HTAB *hashp)
 
int my_log2 (long num)
 
void AtEOXact_HashTables (bool isCommit)
 
void AtEOSubXact_HashTables (bool isCommit, int nestDepth)
 

Variables

static MemoryContext CurrentDynaHashCxt = NULL
 
static HTABseq_scan_tables [MAX_SEQ_SCANS]
 
static int seq_scan_level [MAX_SEQ_SCANS]
 
static int num_seq_scans = 0
 

Macro Definition Documentation

◆ DEF_DIRSIZE

#define DEF_DIRSIZE   256

Definition at line 125 of file dynahash.c.

◆ DEF_SEGSIZE

#define DEF_SEGSIZE   256

Definition at line 123 of file dynahash.c.

◆ DEF_SEGSIZE_SHIFT

#define DEF_SEGSIZE_SHIFT   8 /* must be log2(DEF_SEGSIZE) */

Definition at line 124 of file dynahash.c.

◆ ELEMENT_FROM_KEY

#define ELEMENT_FROM_KEY (   key)     ((HASHELEMENT *) (((char *) (key)) - MAXALIGN(sizeof(HASHELEMENT))))

Definition at line 249 of file dynahash.c.

◆ ELEMENTKEY

#define ELEMENTKEY (   helem)    (((char *)(helem)) + MAXALIGN(sizeof(HASHELEMENT)))

Definition at line 244 of file dynahash.c.

◆ FREELIST_IDX

#define FREELIST_IDX (   hctl,
  hashcode 
)     (IS_PARTITIONED(hctl) ? (hashcode) % NUM_FREELISTS : 0)

Definition at line 212 of file dynahash.c.

◆ IS_PARTITIONED

#define IS_PARTITIONED (   hctl)    ((hctl)->num_partitions != 0)

Definition at line 210 of file dynahash.c.

◆ MAX_SEQ_SCANS

#define MAX_SEQ_SCANS   100

Definition at line 1813 of file dynahash.c.

◆ MOD

#define MOD (   x,
  y 
)    ((x) & ((y)-1))

Definition at line 255 of file dynahash.c.

◆ NUM_FREELISTS

#define NUM_FREELISTS   32

Definition at line 128 of file dynahash.c.

Typedef Documentation

◆ HASHBUCKET

Definition at line 131 of file dynahash.c.

◆ HASHSEGMENT

Definition at line 134 of file dynahash.c.

Function Documentation

◆ AtEOSubXact_HashTables()

void AtEOSubXact_HashTables ( bool  isCommit,
int  nestDepth 
)

Definition at line 1895 of file dynahash.c.

1896 {
1897  int i;
1898 
1899  /*
1900  * Search backward to make cleanup easy. Note we must check all entries,
1901  * not only those at the end of the array, because deletion technique
1902  * doesn't keep them in order.
1903  */
1904  for (i = num_seq_scans - 1; i >= 0; i--)
1905  {
1906  if (seq_scan_level[i] >= nestDepth)
1907  {
1908  if (isCommit)
1909  elog(WARNING, "leaked hash_seq_search scan for hash table %p",
1910  seq_scan_tables[i]);
1913  num_seq_scans--;
1914  }
1915  }
1916 }
static HTAB * seq_scan_tables[MAX_SEQ_SCANS]
Definition: dynahash.c:1815
static int seq_scan_level[MAX_SEQ_SCANS]
Definition: dynahash.c:1816
static int num_seq_scans
Definition: dynahash.c:1817
#define WARNING
Definition: elog.h:36
#define elog(elevel,...)
Definition: elog.h:224
int i
Definition: isn.c:73

References elog, i, num_seq_scans, seq_scan_level, seq_scan_tables, and WARNING.

Referenced by AbortSubTransaction(), and CommitSubTransaction().

◆ AtEOXact_HashTables()

void AtEOXact_HashTables ( bool  isCommit)

Definition at line 1869 of file dynahash.c.

1870 {
1871  /*
1872  * During abort cleanup, open scans are expected; just silently clean 'em
1873  * out. An open scan at commit means someone forgot a hash_seq_term()
1874  * call, so complain.
1875  *
1876  * Note: it's tempting to try to print the tabname here, but refrain for
1877  * fear of touching deallocated memory. This isn't a user-facing message
1878  * anyway, so it needn't be pretty.
1879  */
1880  if (isCommit)
1881  {
1882  int i;
1883 
1884  for (i = 0; i < num_seq_scans; i++)
1885  {
1886  elog(WARNING, "leaked hash_seq_search scan for hash table %p",
1887  seq_scan_tables[i]);
1888  }
1889  }
1890  num_seq_scans = 0;
1891 }

References elog, i, num_seq_scans, seq_scan_tables, and WARNING.

Referenced by AbortTransaction(), BackgroundWriterMain(), CheckpointerMain(), CommitTransaction(), pgarch_archiveXlog(), PrepareTransaction(), WalSummarizerMain(), and WalWriterMain().

◆ calc_bucket()

static uint32 calc_bucket ( HASHHDR hctl,
uint32  hash_val 
)
inlinestatic

Definition at line 918 of file dynahash.c.

919 {
920  uint32 bucket;
921 
922  bucket = hash_val & hctl->high_mask;
923  if (bucket > hctl->max_bucket)
924  bucket = bucket & hctl->low_mask;
925 
926  return bucket;
927 }
unsigned int uint32
Definition: c.h:506
uint32 high_mask
Definition: dynahash.c:187
uint32 max_bucket
Definition: dynahash.c:186
uint32 low_mask
Definition: dynahash.c:188

References HASHHDR::high_mask, HASHHDR::low_mask, and HASHHDR::max_bucket.

Referenced by expand_table(), and hash_initial_lookup().

◆ choose_nelem_alloc()

static int choose_nelem_alloc ( Size  entrysize)
static

Definition at line 656 of file dynahash.c.

657 {
658  int nelem_alloc;
659  Size elementSize;
660  Size allocSize;
661 
662  /* Each element has a HASHELEMENT header plus user data. */
663  /* NB: this had better match element_alloc() */
664  elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(entrysize);
665 
666  /*
667  * The idea here is to choose nelem_alloc at least 32, but round up so
668  * that the allocation request will be a power of 2 or just less. This
669  * makes little difference for hash tables in shared memory, but for hash
670  * tables managed by palloc, the allocation request will be rounded up to
671  * a power of 2 anyway. If we fail to take this into account, we'll waste
672  * as much as half the allocated space.
673  */
674  allocSize = 32 * 4; /* assume elementSize at least 8 */
675  do
676  {
677  allocSize <<= 1;
678  nelem_alloc = allocSize / elementSize;
679  } while (nelem_alloc < 32);
680 
681  return nelem_alloc;
682 }
#define MAXALIGN(LEN)
Definition: c.h:811
size_t Size
Definition: c.h:605

References MAXALIGN.

Referenced by hash_estimate_size(), and init_htab().

◆ deregister_seq_scan()

static void deregister_seq_scan ( HTAB hashp)
static

Definition at line 1834 of file dynahash.c.

1835 {
1836  int i;
1837 
1838  /* Search backward since it's most likely at the stack top */
1839  for (i = num_seq_scans - 1; i >= 0; i--)
1840  {
1841  if (seq_scan_tables[i] == hashp)
1842  {
1845  num_seq_scans--;
1846  return;
1847  }
1848  }
1849  elog(ERROR, "no hash_seq_search scan for hash table \"%s\"",
1850  hashp->tabname);
1851 }
#define ERROR
Definition: elog.h:39
char * tabname
Definition: dynahash.c:228

References elog, ERROR, i, num_seq_scans, seq_scan_level, seq_scan_tables, and HTAB::tabname.

Referenced by hash_seq_term().

◆ dir_realloc()

static bool dir_realloc ( HTAB hashp)
static

Definition at line 1605 of file dynahash.c.

1606 {
1607  HASHSEGMENT *p;
1608  HASHSEGMENT *old_p;
1609  long new_dsize;
1610  long old_dirsize;
1611  long new_dirsize;
1612 
1613  if (hashp->hctl->max_dsize != NO_MAX_DSIZE)
1614  return false;
1615 
1616  /* Reallocate directory */
1617  new_dsize = hashp->hctl->dsize << 1;
1618  old_dirsize = hashp->hctl->dsize * sizeof(HASHSEGMENT);
1619  new_dirsize = new_dsize * sizeof(HASHSEGMENT);
1620 
1621  old_p = hashp->dir;
1622  CurrentDynaHashCxt = hashp->hcxt;
1623  p = (HASHSEGMENT *) hashp->alloc((Size) new_dirsize);
1624 
1625  if (p != NULL)
1626  {
1627  memcpy(p, old_p, old_dirsize);
1628  MemSet(((char *) p) + old_dirsize, 0, new_dirsize - old_dirsize);
1629  hashp->dir = p;
1630  hashp->hctl->dsize = new_dsize;
1631 
1632  /* XXX assume the allocator is palloc, so we know how to free */
1633  Assert(hashp->alloc == DynaHashAlloc);
1634  pfree(old_p);
1635 
1636  return true;
1637  }
1638 
1639  return false;
1640 }
#define Assert(condition)
Definition: c.h:858
#define MemSet(start, val, len)
Definition: c.h:1020
static void * DynaHashAlloc(Size size)
Definition: dynahash.c:291
static MemoryContext CurrentDynaHashCxt
Definition: dynahash.c:288
HASHBUCKET * HASHSEGMENT
Definition: dynahash.c:134
#define NO_MAX_DSIZE
Definition: hsearch.h:108
void pfree(void *pointer)
Definition: mcxt.c:1520
long max_dsize
Definition: dynahash.c:194
long dsize
Definition: dynahash.c:184
HASHHDR * hctl
Definition: dynahash.c:221
MemoryContext hcxt
Definition: dynahash.c:227
HashAllocFunc alloc
Definition: dynahash.c:226
HASHSEGMENT * dir
Definition: dynahash.c:222

References HTAB::alloc, Assert, CurrentDynaHashCxt, HTAB::dir, HASHHDR::dsize, DynaHashAlloc(), HTAB::hctl, HTAB::hcxt, HASHHDR::max_dsize, MemSet, NO_MAX_DSIZE, and pfree().

Referenced by expand_table().

◆ DynaHashAlloc()

static void * DynaHashAlloc ( Size  size)
static

Definition at line 291 of file dynahash.c.

292 {
296 }
#define MCXT_ALLOC_NO_OOM
Definition: fe_memutils.h:17
void * MemoryContextAllocExtended(MemoryContext context, Size size, int flags)
Definition: mcxt.c:1237
#define MemoryContextIsValid(context)
Definition: memnodes.h:145
static pg_noinline void Size size
Definition: slab.c:607

References Assert, CurrentDynaHashCxt, MCXT_ALLOC_NO_OOM, MemoryContextAllocExtended(), MemoryContextIsValid, and size.

Referenced by dir_realloc(), hash_create(), and hash_destroy().

◆ element_alloc()

static bool element_alloc ( HTAB hashp,
int  nelem,
int  freelist_idx 
)
static

Definition at line 1663 of file dynahash.c.

1664 {
1665  HASHHDR *hctl = hashp->hctl;
1666  Size elementSize;
1667  HASHELEMENT *firstElement;
1668  HASHELEMENT *tmpElement;
1669  HASHELEMENT *prevElement;
1670  int i;
1671 
1672  if (hashp->isfixed)
1673  return false;
1674 
1675  /* Each element has a HASHELEMENT header plus user data. */
1676  elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(hctl->entrysize);
1677 
1678  CurrentDynaHashCxt = hashp->hcxt;
1679  firstElement = (HASHELEMENT *) hashp->alloc(nelem * elementSize);
1680 
1681  if (!firstElement)
1682  return false;
1683 
1684  /* prepare to link all the new entries into the freelist */
1685  prevElement = NULL;
1686  tmpElement = firstElement;
1687  for (i = 0; i < nelem; i++)
1688  {
1689  tmpElement->link = prevElement;
1690  prevElement = tmpElement;
1691  tmpElement = (HASHELEMENT *) (((char *) tmpElement) + elementSize);
1692  }
1693 
1694  /* if partitioned, must lock to touch freeList */
1695  if (IS_PARTITIONED(hctl))
1696  SpinLockAcquire(&hctl->freeList[freelist_idx].mutex);
1697 
1698  /* freelist could be nonempty if two backends did this concurrently */
1699  firstElement->link = hctl->freeList[freelist_idx].freeList;
1700  hctl->freeList[freelist_idx].freeList = prevElement;
1701 
1702  if (IS_PARTITIONED(hctl))
1703  SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1704 
1705  return true;
1706 }
#define IS_PARTITIONED(hctl)
Definition: dynahash.c:210
#define SpinLockRelease(lock)
Definition: spin.h:64
#define SpinLockAcquire(lock)
Definition: spin.h:62
slock_t mutex
Definition: dynahash.c:155
HASHELEMENT * freeList
Definition: dynahash.c:157
struct HASHELEMENT * link
Definition: hsearch.h:53
FreeListData freeList[NUM_FREELISTS]
Definition: dynahash.c:180
Size entrysize
Definition: dynahash.c:192
bool isfixed
Definition: dynahash.c:230

References HTAB::alloc, CurrentDynaHashCxt, HASHHDR::entrysize, FreeListData::freeList, HASHHDR::freeList, HTAB::hctl, HTAB::hcxt, i, IS_PARTITIONED, HTAB::isfixed, HASHELEMENT::link, MAXALIGN, FreeListData::mutex, SpinLockAcquire, and SpinLockRelease.

Referenced by get_hash_entry(), and hash_create().

◆ expand_table()

static bool expand_table ( HTAB hashp)
static

Definition at line 1508 of file dynahash.c.

1509 {
1510  HASHHDR *hctl = hashp->hctl;
1511  HASHSEGMENT old_seg,
1512  new_seg;
1513  long old_bucket,
1514  new_bucket;
1515  long new_segnum,
1516  new_segndx;
1517  long old_segnum,
1518  old_segndx;
1519  HASHBUCKET *oldlink,
1520  *newlink;
1521  HASHBUCKET currElement,
1522  nextElement;
1523 
1524  Assert(!IS_PARTITIONED(hctl));
1525 
1526 #ifdef HASH_STATISTICS
1527  hash_expansions++;
1528 #endif
1529 
1530  new_bucket = hctl->max_bucket + 1;
1531  new_segnum = new_bucket >> hashp->sshift;
1532  new_segndx = MOD(new_bucket, hashp->ssize);
1533 
1534  if (new_segnum >= hctl->nsegs)
1535  {
1536  /* Allocate new segment if necessary -- could fail if dir full */
1537  if (new_segnum >= hctl->dsize)
1538  if (!dir_realloc(hashp))
1539  return false;
1540  if (!(hashp->dir[new_segnum] = seg_alloc(hashp)))
1541  return false;
1542  hctl->nsegs++;
1543  }
1544 
1545  /* OK, we created a new bucket */
1546  hctl->max_bucket++;
1547 
1548  /*
1549  * *Before* changing masks, find old bucket corresponding to same hash
1550  * values; values in that bucket may need to be relocated to new bucket.
1551  * Note that new_bucket is certainly larger than low_mask at this point,
1552  * so we can skip the first step of the regular hash mask calc.
1553  */
1554  old_bucket = (new_bucket & hctl->low_mask);
1555 
1556  /*
1557  * If we crossed a power of 2, readjust masks.
1558  */
1559  if ((uint32) new_bucket > hctl->high_mask)
1560  {
1561  hctl->low_mask = hctl->high_mask;
1562  hctl->high_mask = (uint32) new_bucket | hctl->low_mask;
1563  }
1564 
1565  /*
1566  * Relocate records to the new bucket. NOTE: because of the way the hash
1567  * masking is done in calc_bucket, only one old bucket can need to be
1568  * split at this point. With a different way of reducing the hash value,
1569  * that might not be true!
1570  */
1571  old_segnum = old_bucket >> hashp->sshift;
1572  old_segndx = MOD(old_bucket, hashp->ssize);
1573 
1574  old_seg = hashp->dir[old_segnum];
1575  new_seg = hashp->dir[new_segnum];
1576 
1577  oldlink = &old_seg[old_segndx];
1578  newlink = &new_seg[new_segndx];
1579 
1580  for (currElement = *oldlink;
1581  currElement != NULL;
1582  currElement = nextElement)
1583  {
1584  nextElement = currElement->link;
1585  if ((long) calc_bucket(hctl, currElement->hashvalue) == old_bucket)
1586  {
1587  *oldlink = currElement;
1588  oldlink = &currElement->link;
1589  }
1590  else
1591  {
1592  *newlink = currElement;
1593  newlink = &currElement->link;
1594  }
1595  }
1596  /* don't forget to terminate the rebuilt hash chains... */
1597  *oldlink = NULL;
1598  *newlink = NULL;
1599 
1600  return true;
1601 }
static HASHSEGMENT seg_alloc(HTAB *hashp)
Definition: dynahash.c:1644
#define MOD(x, y)
Definition: dynahash.c:255
static bool dir_realloc(HTAB *hashp)
Definition: dynahash.c:1605
static uint32 calc_bucket(HASHHDR *hctl, uint32 hash_val)
Definition: dynahash.c:918
uint32 hashvalue
Definition: hsearch.h:54
long nsegs
Definition: dynahash.c:185
long ssize
Definition: dynahash.c:237
int sshift
Definition: dynahash.c:238

References Assert, calc_bucket(), HTAB::dir, dir_realloc(), HASHHDR::dsize, HASHELEMENT::hashvalue, HTAB::hctl, HASHHDR::high_mask, IS_PARTITIONED, HASHELEMENT::link, HASHHDR::low_mask, HASHHDR::max_bucket, MOD, HASHHDR::nsegs, seg_alloc(), HTAB::sshift, and HTAB::ssize.

Referenced by hash_search_with_hash_value().

◆ get_hash_entry()

static HASHBUCKET get_hash_entry ( HTAB hashp,
int  freelist_idx 
)
static

Definition at line 1256 of file dynahash.c.

1257 {
1258  HASHHDR *hctl = hashp->hctl;
1259  HASHBUCKET newElement;
1260 
1261  for (;;)
1262  {
1263  /* if partitioned, must lock to touch nentries and freeList */
1264  if (IS_PARTITIONED(hctl))
1265  SpinLockAcquire(&hctl->freeList[freelist_idx].mutex);
1266 
1267  /* try to get an entry from the freelist */
1268  newElement = hctl->freeList[freelist_idx].freeList;
1269 
1270  if (newElement != NULL)
1271  break;
1272 
1273  if (IS_PARTITIONED(hctl))
1274  SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1275 
1276  /*
1277  * No free elements in this freelist. In a partitioned table, there
1278  * might be entries in other freelists, but to reduce contention we
1279  * prefer to first try to get another chunk of buckets from the main
1280  * shmem allocator. If that fails, though, we *MUST* root through all
1281  * the other freelists before giving up. There are multiple callers
1282  * that assume that they can allocate every element in the initially
1283  * requested table size, or that deleting an element guarantees they
1284  * can insert a new element, even if shared memory is entirely full.
1285  * Failing because the needed element is in a different freelist is
1286  * not acceptable.
1287  */
1288  if (!element_alloc(hashp, hctl->nelem_alloc, freelist_idx))
1289  {
1290  int borrow_from_idx;
1291 
1292  if (!IS_PARTITIONED(hctl))
1293  return NULL; /* out of memory */
1294 
1295  /* try to borrow element from another freelist */
1296  borrow_from_idx = freelist_idx;
1297  for (;;)
1298  {
1299  borrow_from_idx = (borrow_from_idx + 1) % NUM_FREELISTS;
1300  if (borrow_from_idx == freelist_idx)
1301  break; /* examined all freelists, fail */
1302 
1303  SpinLockAcquire(&(hctl->freeList[borrow_from_idx].mutex));
1304  newElement = hctl->freeList[borrow_from_idx].freeList;
1305 
1306  if (newElement != NULL)
1307  {
1308  hctl->freeList[borrow_from_idx].freeList = newElement->link;
1309  SpinLockRelease(&(hctl->freeList[borrow_from_idx].mutex));
1310 
1311  /* careful: count the new element in its proper freelist */
1312  SpinLockAcquire(&hctl->freeList[freelist_idx].mutex);
1313  hctl->freeList[freelist_idx].nentries++;
1314  SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1315 
1316  return newElement;
1317  }
1318 
1319  SpinLockRelease(&(hctl->freeList[borrow_from_idx].mutex));
1320  }
1321 
1322  /* no elements available to borrow either, so out of memory */
1323  return NULL;
1324  }
1325  }
1326 
1327  /* remove entry from freelist, bump nentries */
1328  hctl->freeList[freelist_idx].freeList = newElement->link;
1329  hctl->freeList[freelist_idx].nentries++;
1330 
1331  if (IS_PARTITIONED(hctl))
1332  SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1333 
1334  return newElement;
1335 }
static bool element_alloc(HTAB *hashp, int nelem, int freelist_idx)
Definition: dynahash.c:1663
#define NUM_FREELISTS
Definition: dynahash.c:128
long nentries
Definition: dynahash.c:156
int nelem_alloc
Definition: dynahash.c:197

References element_alloc(), FreeListData::freeList, HASHHDR::freeList, HTAB::hctl, IS_PARTITIONED, HASHELEMENT::link, FreeListData::mutex, HASHHDR::nelem_alloc, FreeListData::nentries, NUM_FREELISTS, SpinLockAcquire, and SpinLockRelease.

Referenced by hash_search_with_hash_value().

◆ get_hash_value()

uint32 get_hash_value ( HTAB hashp,
const void *  keyPtr 
)

Definition at line 911 of file dynahash.c.

912 {
913  return hashp->hash(keyPtr, hashp->keysize);
914 }
HashValueFunc hash
Definition: dynahash.c:223
Size keysize
Definition: dynahash.c:236

References HTAB::hash, and HTAB::keysize.

Referenced by BufTableHashCode(), and LockTagHashCode().

◆ has_seq_scans()

static bool has_seq_scans ( HTAB hashp)
static

Definition at line 1855 of file dynahash.c.

1856 {
1857  int i;
1858 
1859  for (i = 0; i < num_seq_scans; i++)
1860  {
1861  if (seq_scan_tables[i] == hashp)
1862  return true;
1863  }
1864  return false;
1865 }

References i, num_seq_scans, and seq_scan_tables.

Referenced by hash_freeze(), and hash_search_with_hash_value().

◆ hash_corrupted()

static void hash_corrupted ( HTAB hashp)
static

Definition at line 1737 of file dynahash.c.

1738 {
1739  /*
1740  * If the corruption is in a shared hashtable, we'd better force a
1741  * systemwide restart. Otherwise, just shut down this one backend.
1742  */
1743  if (hashp->isshared)
1744  elog(PANIC, "hash table \"%s\" corrupted", hashp->tabname);
1745  else
1746  elog(FATAL, "hash table \"%s\" corrupted", hashp->tabname);
1747 }
#define FATAL
Definition: elog.h:41
#define PANIC
Definition: elog.h:42
bool isshared
Definition: dynahash.c:229

References elog, FATAL, HTAB::isshared, PANIC, and HTAB::tabname.

Referenced by hash_initial_lookup().

◆ hash_create()

HTAB* hash_create ( const char *  tabname,
long  nelem,
const HASHCTL info,
int  flags 
)

Definition at line 352 of file dynahash.c.

353 {
354  HTAB *hashp;
355  HASHHDR *hctl;
356 
357  /*
358  * Hash tables now allocate space for key and data, but you have to say
359  * how much space to allocate.
360  */
361  Assert(flags & HASH_ELEM);
362  Assert(info->keysize > 0);
363  Assert(info->entrysize >= info->keysize);
364 
365  /*
366  * For shared hash tables, we have a local hash header (HTAB struct) that
367  * we allocate in TopMemoryContext; all else is in shared memory.
368  *
369  * For non-shared hash tables, everything including the hash header is in
370  * a memory context created specially for the hash table --- this makes
371  * hash_destroy very simple. The memory context is made a child of either
372  * a context specified by the caller, or TopMemoryContext if nothing is
373  * specified.
374  */
375  if (flags & HASH_SHARED_MEM)
376  {
377  /* Set up to allocate the hash header */
379  }
380  else
381  {
382  /* Create the hash table's private memory context */
383  if (flags & HASH_CONTEXT)
384  CurrentDynaHashCxt = info->hcxt;
385  else
388  "dynahash",
390  }
391 
392  /* Initialize the hash header, plus a copy of the table name */
393  hashp = (HTAB *) DynaHashAlloc(sizeof(HTAB) + strlen(tabname) + 1);
394  MemSet(hashp, 0, sizeof(HTAB));
395 
396  hashp->tabname = (char *) (hashp + 1);
397  strcpy(hashp->tabname, tabname);
398 
399  /* If we have a private context, label it with hashtable's name */
400  if (!(flags & HASH_SHARED_MEM))
402 
403  /*
404  * Select the appropriate hash function (see comments at head of file).
405  */
406  if (flags & HASH_FUNCTION)
407  {
408  Assert(!(flags & (HASH_BLOBS | HASH_STRINGS)));
409  hashp->hash = info->hash;
410  }
411  else if (flags & HASH_BLOBS)
412  {
413  Assert(!(flags & HASH_STRINGS));
414  /* We can optimize hashing for common key sizes */
415  if (info->keysize == sizeof(uint32))
416  hashp->hash = uint32_hash;
417  else
418  hashp->hash = tag_hash;
419  }
420  else
421  {
422  /*
423  * string_hash used to be considered the default hash method, and in a
424  * non-assert build it effectively still is. But we now consider it
425  * an assertion error to not say HASH_STRINGS explicitly. To help
426  * catch mistaken usage of HASH_STRINGS, we also insist on a
427  * reasonably long string length: if the keysize is only 4 or 8 bytes,
428  * it's almost certainly an integer or pointer not a string.
429  */
430  Assert(flags & HASH_STRINGS);
431  Assert(info->keysize > 8);
432 
433  hashp->hash = string_hash;
434  }
435 
436  /*
437  * If you don't specify a match function, it defaults to string_compare if
438  * you used string_hash, and to memcmp otherwise.
439  *
440  * Note: explicitly specifying string_hash is deprecated, because this
441  * might not work for callers in loadable modules on some platforms due to
442  * referencing a trampoline instead of the string_hash function proper.
443  * Specify HASH_STRINGS instead.
444  */
445  if (flags & HASH_COMPARE)
446  hashp->match = info->match;
447  else if (hashp->hash == string_hash)
449  else
450  hashp->match = memcmp;
451 
452  /*
453  * Similarly, the key-copying function defaults to strlcpy or memcpy.
454  */
455  if (flags & HASH_KEYCOPY)
456  hashp->keycopy = info->keycopy;
457  else if (hashp->hash == string_hash)
458  {
459  /*
460  * The signature of keycopy is meant for memcpy(), which returns
461  * void*, but strlcpy() returns size_t. Since we never use the return
462  * value of keycopy, and size_t is pretty much always the same size as
463  * void *, this should be safe. The extra cast in the middle is to
464  * avoid warnings from -Wcast-function-type.
465  */
467  }
468  else
469  hashp->keycopy = memcpy;
470 
471  /* And select the entry allocation function, too. */
472  if (flags & HASH_ALLOC)
473  hashp->alloc = info->alloc;
474  else
475  hashp->alloc = DynaHashAlloc;
476 
477  if (flags & HASH_SHARED_MEM)
478  {
479  /*
480  * ctl structure and directory are preallocated for shared memory
481  * tables. Note that HASH_DIRSIZE and HASH_ALLOC had better be set as
482  * well.
483  */
484  hashp->hctl = info->hctl;
485  hashp->dir = (HASHSEGMENT *) (((char *) info->hctl) + sizeof(HASHHDR));
486  hashp->hcxt = NULL;
487  hashp->isshared = true;
488 
489  /* hash table already exists, we're just attaching to it */
490  if (flags & HASH_ATTACH)
491  {
492  /* make local copies of some heavily-used values */
493  hctl = hashp->hctl;
494  hashp->keysize = hctl->keysize;
495  hashp->ssize = hctl->ssize;
496  hashp->sshift = hctl->sshift;
497 
498  return hashp;
499  }
500  }
501  else
502  {
503  /* setup hash table defaults */
504  hashp->hctl = NULL;
505  hashp->dir = NULL;
506  hashp->hcxt = CurrentDynaHashCxt;
507  hashp->isshared = false;
508  }
509 
510  if (!hashp->hctl)
511  {
512  hashp->hctl = (HASHHDR *) hashp->alloc(sizeof(HASHHDR));
513  if (!hashp->hctl)
514  ereport(ERROR,
515  (errcode(ERRCODE_OUT_OF_MEMORY),
516  errmsg("out of memory")));
517  }
518 
519  hashp->frozen = false;
520 
521  hdefault(hashp);
522 
523  hctl = hashp->hctl;
524 
525  if (flags & HASH_PARTITION)
526  {
527  /* Doesn't make sense to partition a local hash table */
528  Assert(flags & HASH_SHARED_MEM);
529 
530  /*
531  * The number of partitions had better be a power of 2. Also, it must
532  * be less than INT_MAX (see init_htab()), so call the int version of
533  * next_pow2.
534  */
536 
537  hctl->num_partitions = info->num_partitions;
538  }
539 
540  if (flags & HASH_SEGMENT)
541  {
542  hctl->ssize = info->ssize;
543  hctl->sshift = my_log2(info->ssize);
544  /* ssize had better be a power of 2 */
545  Assert(hctl->ssize == (1L << hctl->sshift));
546  }
547 
548  /*
549  * SHM hash tables have fixed directory size passed by the caller.
550  */
551  if (flags & HASH_DIRSIZE)
552  {
553  hctl->max_dsize = info->max_dsize;
554  hctl->dsize = info->dsize;
555  }
556 
557  /* remember the entry sizes, too */
558  hctl->keysize = info->keysize;
559  hctl->entrysize = info->entrysize;
560 
561  /* make local copies of heavily-used constant fields */
562  hashp->keysize = hctl->keysize;
563  hashp->ssize = hctl->ssize;
564  hashp->sshift = hctl->sshift;
565 
566  /* Build the hash directory structure */
567  if (!init_htab(hashp, nelem))
568  elog(ERROR, "failed to initialize hash table \"%s\"", hashp->tabname);
569 
570  /*
571  * For a shared hash table, preallocate the requested number of elements.
572  * This reduces problems with run-time out-of-shared-memory conditions.
573  *
574  * For a non-shared hash table, preallocate the requested number of
575  * elements if it's less than our chosen nelem_alloc. This avoids wasting
576  * space if the caller correctly estimates a small table size.
577  */
578  if ((flags & HASH_SHARED_MEM) ||
579  nelem < hctl->nelem_alloc)
580  {
581  int i,
582  freelist_partitions,
583  nelem_alloc,
584  nelem_alloc_first;
585 
586  /*
587  * If hash table is partitioned, give each freelist an equal share of
588  * the initial allocation. Otherwise only freeList[0] is used.
589  */
590  if (IS_PARTITIONED(hashp->hctl))
591  freelist_partitions = NUM_FREELISTS;
592  else
593  freelist_partitions = 1;
594 
595  nelem_alloc = nelem / freelist_partitions;
596  if (nelem_alloc <= 0)
597  nelem_alloc = 1;
598 
599  /*
600  * Make sure we'll allocate all the requested elements; freeList[0]
601  * gets the excess if the request isn't divisible by NUM_FREELISTS.
602  */
603  if (nelem_alloc * freelist_partitions < nelem)
604  nelem_alloc_first =
605  nelem - nelem_alloc * (freelist_partitions - 1);
606  else
607  nelem_alloc_first = nelem_alloc;
608 
609  for (i = 0; i < freelist_partitions; i++)
610  {
611  int temp = (i == 0) ? nelem_alloc_first : nelem_alloc;
612 
613  if (!element_alloc(hashp, temp, i))
614  ereport(ERROR,
615  (errcode(ERRCODE_OUT_OF_MEMORY),
616  errmsg("out of memory")));
617  }
618  }
619 
620  if (flags & HASH_FIXED_SIZE)
621  hashp->isfixed = true;
622  return hashp;
623 }
void(* pg_funcptr_t)(void)
Definition: c.h:388
static bool init_htab(HTAB *hashp, long nelem)
Definition: dynahash.c:689
static int next_pow2_int(long num)
Definition: dynahash.c:1777
static int string_compare(const char *key1, const char *key2, Size keysize)
Definition: dynahash.c:307
static void hdefault(HTAB *hashp)
Definition: dynahash.c:629
int my_log2(long num)
Definition: dynahash.c:1751
int errcode(int sqlerrcode)
Definition: elog.c:859
int errmsg(const char *fmt,...)
Definition: elog.c:1072
#define ereport(elevel,...)
Definition: elog.h:149
uint32 tag_hash(const void *key, Size keysize)
Definition: hashfn.c:677
uint32 uint32_hash(const void *key, Size keysize)
Definition: hashfn.c:688
uint32 string_hash(const void *key, Size keysize)
Definition: hashfn.c:660
#define HASH_KEYCOPY
Definition: hsearch.h:100
#define HASH_STRINGS
Definition: hsearch.h:96
int(* HashCompareFunc)(const void *key1, const void *key2, Size keysize)
Definition: hsearch.h:29
#define HASH_CONTEXT
Definition: hsearch.h:102
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_ALLOC
Definition: hsearch.h:101
#define HASH_DIRSIZE
Definition: hsearch.h:94
#define HASH_SEGMENT
Definition: hsearch.h:93
#define HASH_ATTACH
Definition: hsearch.h:104
#define HASH_COMPARE
Definition: hsearch.h:99
#define HASH_FUNCTION
Definition: hsearch.h:98
#define HASH_BLOBS
Definition: hsearch.h:97
#define HASH_SHARED_MEM
Definition: hsearch.h:103
#define HASH_FIXED_SIZE
Definition: hsearch.h:105
#define HASH_PARTITION
Definition: hsearch.h:92
void *(* HashCopyFunc)(void *dest, const void *src, Size keysize)
Definition: hsearch.h:37
MemoryContext TopMemoryContext
Definition: mcxt.c:149
void MemoryContextSetIdentifier(MemoryContext context, const char *id)
Definition: mcxt.c:612
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:160
size_t strlcpy(char *dst, const char *src, size_t siz)
Definition: strlcpy.c:45
long ssize
Definition: hsearch.h:70
HashAllocFunc alloc
Definition: hsearch.h:84
Size keysize
Definition: hsearch.h:75
HashValueFunc hash
Definition: hsearch.h:78
Size entrysize
Definition: hsearch.h:76
long dsize
Definition: hsearch.h:72
HashCompareFunc match
Definition: hsearch.h:80
HASHHDR * hctl
Definition: hsearch.h:88
MemoryContext hcxt
Definition: hsearch.h:86
long num_partitions
Definition: hsearch.h:68
HashCopyFunc keycopy
Definition: hsearch.h:82
long max_dsize
Definition: hsearch.h:73
long num_partitions
Definition: dynahash.c:193
Size keysize
Definition: dynahash.c:191
int sshift
Definition: dynahash.c:196
long ssize
Definition: dynahash.c:195
Definition: dynahash.c:220
HashCompareFunc match
Definition: dynahash.c:224
HashCopyFunc keycopy
Definition: dynahash.c:225
bool frozen
Definition: dynahash.c:233

References HTAB::alloc, HASHCTL::alloc, ALLOCSET_DEFAULT_SIZES, AllocSetContextCreate, Assert, CurrentDynaHashCxt, HTAB::dir, HASHHDR::dsize, HASHCTL::dsize, DynaHashAlloc(), element_alloc(), elog, HASHHDR::entrysize, HASHCTL::entrysize, ereport, errcode(), errmsg(), ERROR, HTAB::frozen, HTAB::hash, HASHCTL::hash, HASH_ALLOC, HASH_ATTACH, HASH_BLOBS, HASH_COMPARE, HASH_CONTEXT, HASH_DIRSIZE, HASH_ELEM, HASH_FIXED_SIZE, HASH_FUNCTION, HASH_KEYCOPY, HASH_PARTITION, HASH_SEGMENT, HASH_SHARED_MEM, HASH_STRINGS, HTAB::hctl, HASHCTL::hctl, HTAB::hcxt, HASHCTL::hcxt, hdefault(), i, init_htab(), IS_PARTITIONED, HTAB::isfixed, HTAB::isshared, HTAB::keycopy, HASHCTL::keycopy, HASHHDR::keysize, HTAB::keysize, HASHCTL::keysize, HTAB::match, HASHCTL::match, HASHHDR::max_dsize, HASHCTL::max_dsize, MemoryContextSetIdentifier(), MemSet, my_log2(), next_pow2_int(), NUM_FREELISTS, HASHHDR::num_partitions, HASHCTL::num_partitions, HASHHDR::sshift, HTAB::sshift, HASHHDR::ssize, HTAB::ssize, HASHCTL::ssize, string_compare(), string_hash(), strlcpy(), HTAB::tabname, tag_hash(), TopMemoryContext, and uint32_hash().

Referenced by _hash_finish_split(), _PG_init(), AddEventToPendingNotifies(), AddPendingSync(), assign_record_type_typmod(), begin_heap_rewrite(), build_guc_variables(), build_join_rel_hash(), BuildEventTriggerCache(), CheckForSessionAndXactLocks(), CompactCheckpointerRequestQueue(), compute_array_stats(), compute_tsvector_stats(), create_seq_hashtable(), createConnHash(), CreateLocalPredicateLockHash(), CreatePartitionDirectory(), do_autovacuum(), EnablePortalManager(), ExecInitModifyTable(), ExecuteTruncateGuts(), find_all_inheritors(), find_oper_cache_entry(), find_rendezvous_variable(), get_json_object_as_hash(), GetComboCommandId(), GetConnection(), gistInitBuildBuffers(), gistInitParentMap(), init_missing_cache(), init_procedure_caches(), init_rel_sync_cache(), init_timezone_hashtable(), init_ts_config_cache(), init_uncommitted_enum_types(), init_uncommitted_enum_values(), InitBufferPoolAccess(), InitializeAttoptCache(), InitializeRelfilenumberMap(), InitializeShippableCache(), InitializeTableSpaceCache(), InitLocalBuffers(), InitLocks(), InitQueryHashTable(), InitRecoveryTransactionEnvironment(), InitSync(), json_unique_check_init(), load_categories_hash(), log_invalid_page(), logical_begin_heap_rewrite(), logicalrep_partmap_init(), logicalrep_relmap_init(), lookup_collation_cache(), lookup_proof_cache(), lookup_ts_dictionary_cache(), lookup_ts_parser_cache(), lookup_type_cache(), LookupOpclassInfo(), pa_allocate_worker(), plpgsql_estate_setup(), plpgsql_HashTableInit(), PLy_add_exceptions(), populate_recordset_object_start(), process_syncing_tables_for_apply(), rebuild_database_list(), record_C_func(), RegisterExtensibleNodeEntry(), RelationCacheInitialize(), ReorderBufferAllocate(), ReorderBufferBuildTupleCidHash(), ReorderBufferToastInitHash(), ResetUnloggedRelationsInDbspaceDir(), ri_InitHashTables(), select_perl_context(), SerializePendingSyncs(), set_rtable_names(), ShmemInitHash(), smgropen(), transformGraph(), and XLogPrefetcherAllocate().

◆ hash_destroy()

void hash_destroy ( HTAB hashp)

Definition at line 865 of file dynahash.c.

866 {
867  if (hashp != NULL)
868  {
869  /* allocation method must be one we know how to free, too */
870  Assert(hashp->alloc == DynaHashAlloc);
871  /* so this hashtable must have its own context */
872  Assert(hashp->hcxt != NULL);
873 
874  hash_stats("destroy", hashp);
875 
876  /*
877  * Free everything by destroying the hash table's memory context.
878  */
879  MemoryContextDelete(hashp->hcxt);
880  }
881 }
void hash_stats(const char *where, HTAB *hashp)
Definition: dynahash.c:884
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:454

References HTAB::alloc, Assert, DynaHashAlloc(), hash_stats(), HTAB::hcxt, and MemoryContextDelete().

Referenced by _hash_finish_split(), CheckForSessionAndXactLocks(), CompactCheckpointerRequestQueue(), ExecuteTruncateGuts(), find_all_inheritors(), get_json_object_as_hash(), InitLocks(), pgoutput_shutdown(), populate_recordset_object_end(), PostPrepare_PredicateLocks(), process_syncing_tables_for_apply(), ReleasePredicateLocksLocal(), ReorderBufferReturnTXN(), ReorderBufferToastReset(), ReorderBufferTruncateTXN(), ResetSequenceCaches(), ResetUnloggedRelationsInDbspaceDir(), SerializePendingSyncs(), set_rtable_names(), ShutdownRecoveryTransactionEnvironment(), XLogCheckInvalidPages(), and XLogPrefetcherFree().

◆ hash_estimate_size()

Size hash_estimate_size ( long  num_entries,
Size  entrysize 
)

Definition at line 783 of file dynahash.c.

784 {
785  Size size;
786  long nBuckets,
787  nSegments,
788  nDirEntries,
789  nElementAllocs,
790  elementSize,
791  elementAllocCnt;
792 
793  /* estimate number of buckets wanted */
794  nBuckets = next_pow2_long(num_entries);
795  /* # of segments needed for nBuckets */
796  nSegments = next_pow2_long((nBuckets - 1) / DEF_SEGSIZE + 1);
797  /* directory entries */
798  nDirEntries = DEF_DIRSIZE;
799  while (nDirEntries < nSegments)
800  nDirEntries <<= 1; /* dir_alloc doubles dsize at each call */
801 
802  /* fixed control info */
803  size = MAXALIGN(sizeof(HASHHDR)); /* but not HTAB, per above */
804  /* directory */
805  size = add_size(size, mul_size(nDirEntries, sizeof(HASHSEGMENT)));
806  /* segments */
807  size = add_size(size, mul_size(nSegments,
808  MAXALIGN(DEF_SEGSIZE * sizeof(HASHBUCKET))));
809  /* elements --- allocated in groups of choose_nelem_alloc() entries */
810  elementAllocCnt = choose_nelem_alloc(entrysize);
811  nElementAllocs = (num_entries - 1) / elementAllocCnt + 1;
812  elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(entrysize);
813  size = add_size(size,
814  mul_size(nElementAllocs,
815  mul_size(elementAllocCnt, elementSize)));
816 
817  return size;
818 }
#define DEF_DIRSIZE
Definition: dynahash.c:125
static int choose_nelem_alloc(Size entrysize)
Definition: dynahash.c:656
#define DEF_SEGSIZE
Definition: dynahash.c:123
static long next_pow2_long(long num)
Definition: dynahash.c:1769
Size add_size(Size s1, Size s2)
Definition: shmem.c:493
Size mul_size(Size s1, Size s2)
Definition: shmem.c:510

References add_size(), choose_nelem_alloc(), DEF_DIRSIZE, DEF_SEGSIZE, MAXALIGN, mul_size(), next_pow2_long(), and size.

Referenced by BufTableShmemSize(), CalculateShmemSize(), InjectionPointShmemSize(), LockShmemSize(), pgss_memsize(), PredicateLockShmemSize(), and WaitEventExtensionShmemSize().

◆ hash_freeze()

void hash_freeze ( HTAB hashp)

Definition at line 1491 of file dynahash.c.

1492 {
1493  if (hashp->isshared)
1494  elog(ERROR, "cannot freeze shared hashtable \"%s\"", hashp->tabname);
1495  if (!hashp->frozen && has_seq_scans(hashp))
1496  elog(ERROR, "cannot freeze hashtable \"%s\" because it has active scans",
1497  hashp->tabname);
1498  hashp->frozen = true;
1499 }
static bool has_seq_scans(HTAB *hashp)
Definition: dynahash.c:1855

References elog, ERROR, HTAB::frozen, has_seq_scans(), HTAB::isshared, and HTAB::tabname.

◆ hash_get_num_entries()

long hash_get_num_entries ( HTAB hashp)

Definition at line 1341 of file dynahash.c.

1342 {
1343  int i;
1344  long sum = hashp->hctl->freeList[0].nentries;
1345 
1346  /*
1347  * We currently don't bother with acquiring the mutexes; it's only
1348  * sensible to call this function if you've got lock on all partitions of
1349  * the table.
1350  */
1351  if (IS_PARTITIONED(hashp->hctl))
1352  {
1353  for (i = 1; i < NUM_FREELISTS; i++)
1354  sum += hashp->hctl->freeList[i].nentries;
1355  }
1356 
1357  return sum;
1358 }

References HASHHDR::freeList, HTAB::hctl, i, IS_PARTITIONED, FreeListData::nentries, and NUM_FREELISTS.

Referenced by build_guc_variables(), compute_array_stats(), compute_tsvector_stats(), entry_alloc(), entry_dealloc(), entry_reset(), EstimatePendingSyncsSpace(), EstimateUncommittedEnumsSpace(), get_crosstab_tuplestore(), get_explain_guc_options(), get_guc_variables(), GetLockStatusData(), GetPredicateLockStatusData(), GetRunningTransactionLocks(), GetWaitEventExtensionNames(), hash_stats(), pgss_shmem_shutdown(), ResetUnloggedRelationsInDbspaceDir(), SerializePendingSyncs(), transformGraph(), and XLogHaveInvalidPages().

◆ hash_get_shared_size()

Size hash_get_shared_size ( HASHCTL info,
int  flags 
)

Definition at line 854 of file dynahash.c.

855 {
856  Assert(flags & HASH_DIRSIZE);
857  Assert(info->dsize == info->max_dsize);
858  return sizeof(HASHHDR) + info->dsize * sizeof(HASHSEGMENT);
859 }
struct HASHHDR HASHHDR
Definition: hsearch.h:58

References Assert, HASHCTL::dsize, HASH_DIRSIZE, and HASHCTL::max_dsize.

Referenced by ShmemInitHash().

◆ hash_initial_lookup()

static uint32 hash_initial_lookup ( HTAB hashp,
uint32  hashvalue,
HASHBUCKET **  bucketptr 
)
inlinestatic

Definition at line 1713 of file dynahash.c.

1714 {
1715  HASHHDR *hctl = hashp->hctl;
1716  HASHSEGMENT segp;
1717  long segment_num;
1718  long segment_ndx;
1719  uint32 bucket;
1720 
1721  bucket = calc_bucket(hctl, hashvalue);
1722 
1723  segment_num = bucket >> hashp->sshift;
1724  segment_ndx = MOD(bucket, hashp->ssize);
1725 
1726  segp = hashp->dir[segment_num];
1727 
1728  if (segp == NULL)
1729  hash_corrupted(hashp);
1730 
1731  *bucketptr = &segp[segment_ndx];
1732  return bucket;
1733 }
static void hash_corrupted(HTAB *hashp) pg_attribute_noreturn()
Definition: dynahash.c:1737

References calc_bucket(), HTAB::dir, hash_corrupted(), HTAB::hctl, MOD, HTAB::sshift, and HTAB::ssize.

Referenced by hash_search_with_hash_value(), and hash_update_hash_key().

◆ hash_search()

void* hash_search ( HTAB hashp,
const void *  keyPtr,
HASHACTION  action,
bool foundPtr 
)

Definition at line 955 of file dynahash.c.

959 {
960  return hash_search_with_hash_value(hashp,
961  keyPtr,
962  hashp->hash(keyPtr, hashp->keysize),
963  action,
964  foundPtr);
965 }
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:968

References generate_unaccent_rules::action, HTAB::hash, hash_search_with_hash_value(), and HTAB::keysize.

Referenced by _hash_finish_split(), _hash_splitbucket(), add_guc_variable(), add_join_rel(), AddEnumLabel(), AddEventToPendingNotifies(), AddPendingSync(), ApplyLogicalMappingFile(), assign_record_type_typmod(), AsyncExistsPendingNotify(), AtEOSubXact_RelationCache(), AtEOXact_RelationCache(), build_guc_variables(), build_join_rel_hash(), BuildEventTriggerCache(), CheckAndPromotePredicateLockRequest(), CheckForSerializableConflictOut(), CheckForSessionAndXactLocks(), CompactCheckpointerRequestQueue(), compile_plperl_function(), compile_pltcl_function(), compute_array_stats(), compute_tsvector_stats(), createNewConnection(), define_custom_variable(), deleteConnection(), do_autovacuum(), DropAllPredicateLocksFromTable(), DropAllPreparedStatements(), DropPreparedStatement(), DropRelationAllLocalBuffers(), DropRelationLocalBuffers(), entry_alloc(), entry_dealloc(), entry_reset(), EnumTypeUncommitted(), EnumUncommitted(), EnumValuesCreate(), EventCacheLookup(), ExecInitModifyTable(), ExecLookupResultRelByOid(), ExecuteTruncateGuts(), ExtendBufferedRelLocal(), FetchPreparedStatement(), find_all_inheritors(), find_join_rel(), find_oper_cache_entry(), find_option(), find_rendezvous_variable(), forget_invalid_pages(), forget_invalid_pages_db(), ForgetPrivateRefCountEntry(), get_attribute_options(), get_cast_hashentry(), get_rel_sync_entry(), get_tablespace(), GetComboCommandId(), GetConnection(), getConnectionByName(), GetExtensibleNodeEntry(), GetLocalVictimBuffer(), getmissingattr(), GetPrivateRefCountEntry(), getState(), GetWaitEventExtensionIdentifier(), gistGetNodeBuffer(), gistGetParent(), gistMemorizeParent(), gistRelocateBuildBuffersOnSplit(), hash_object_field_end(), init_sequence(), InitPredicateLocks(), InjectionPointAttach(), InjectionPointDetach(), InjectionPointRun(), InvalidateAttoptCacheCallback(), InvalidateOprCacheCallBack(), InvalidateShippableCacheCallback(), InvalidateTableSpaceCacheCallback(), is_shippable(), JsObjectGetField(), json_unique_check_key(), LocalBufferAlloc(), LockAcquireExtended(), LockHasWaiters(), LockHeldByMe(), LockRelease(), log_invalid_page(), logical_rewrite_log_mapping(), logicalrep_partition_open(), logicalrep_rel_open(), logicalrep_relmap_update(), lookup_C_func(), lookup_collation_cache(), lookup_proof_cache(), lookup_ts_config_cache(), lookup_ts_dictionary_cache(), lookup_ts_parser_cache(), lookup_type_cache(), LookupOpclassInfo(), make_oper_cache_entry(), MarkGUCPrefixReserved(), pa_allocate_worker(), pa_find_worker(), pa_free_worker(), PartitionDirectoryLookup(), pg_tzset(), pgss_store(), plperl_spi_exec_prepared(), plperl_spi_freeplan(), plperl_spi_prepare(), plperl_spi_query_prepared(), plpgsql_HashTableDelete(), plpgsql_HashTableInsert(), plpgsql_HashTableLookup(), pltcl_fetch_interp(), PLy_commit(), PLy_generate_spi_exceptions(), PLy_procedure_get(), PLy_rollback(), PLy_spi_subtransaction_abort(), populate_recordset_object_field_end(), predicatelock_twophase_recover(), PredicateLockExists(), PredicateLockTwoPhaseFinish(), PrefetchLocalBuffer(), process_syncing_tables_for_apply(), ProcessSyncRequests(), prune_element_hashtable(), prune_lexemes_hashtable(), rebuild_database_list(), record_C_func(), RegisterExtensibleNodeEntry(), RegisterPredicateLockingXid(), rel_sync_cache_relation_cb(), RelationPreTruncate(), ReleaseOneSerializableXact(), RelFileLocatorSkippingWAL(), RelfilenumberMapInvalidateCallback(), RelidByRelfilenumber(), RememberSyncRequest(), RemoveLocalLock(), ReorderBufferBuildTupleCidHash(), ReorderBufferCleanupTXN(), ReorderBufferToastAppendChunk(), ReorderBufferToastReplace(), ReorderBufferTXNByXid(), ReservePrivateRefCountEntry(), ResetUnloggedRelationsInDbspaceDir(), ResolveCminCmaxDuringDecoding(), RestoreUncommittedEnums(), rewrite_heap_dead_tuple(), rewrite_heap_tuple(), ri_FetchPreparedPlan(), ri_HashCompareOp(), ri_HashPreparedPlan(), ri_LoadConstraintInfo(), select_perl_context(), SerializePendingSyncs(), set_rtable_names(), ShmemInitStruct(), smgrdestroy(), smgrDoPendingSyncs(), smgropen(), smgrreleaserellocator(), StandbyAcquireAccessExclusiveLock(), StandbyReleaseAllLocks(), StandbyReleaseLocks(), StandbyReleaseOldLocks(), StandbyReleaseXidEntryLocks(), StorePreparedStatement(), table_recheck_autovac(), WaitEventExtensionNew(), XLogPrefetcherAddFilter(), XLogPrefetcherCompleteFilters(), and XLogPrefetcherIsFiltered().

◆ hash_search_with_hash_value()

void* hash_search_with_hash_value ( HTAB hashp,
const void *  keyPtr,
uint32  hashvalue,
HASHACTION  action,
bool foundPtr 
)

Definition at line 968 of file dynahash.c.

973 {
974  HASHHDR *hctl = hashp->hctl;
975  int freelist_idx = FREELIST_IDX(hctl, hashvalue);
976  Size keysize;
977  HASHBUCKET currBucket;
978  HASHBUCKET *prevBucketPtr;
979  HashCompareFunc match;
980 
981 #ifdef HASH_STATISTICS
982  hash_accesses++;
983  hctl->accesses++;
984 #endif
985 
986  /*
987  * If inserting, check if it is time to split a bucket.
988  *
989  * NOTE: failure to expand table is not a fatal error, it just means we
990  * have to run at higher fill factor than we wanted. However, if we're
991  * using the palloc allocator then it will throw error anyway on
992  * out-of-memory, so we must do this before modifying the table.
993  */
995  {
996  /*
997  * Can't split if running in partitioned mode, nor if frozen, nor if
998  * table is the subject of any active hash_seq_search scans.
999  */
1000  if (hctl->freeList[0].nentries > (long) hctl->max_bucket &&
1001  !IS_PARTITIONED(hctl) && !hashp->frozen &&
1002  !has_seq_scans(hashp))
1003  (void) expand_table(hashp);
1004  }
1005 
1006  /*
1007  * Do the initial lookup
1008  */
1009  (void) hash_initial_lookup(hashp, hashvalue, &prevBucketPtr);
1010  currBucket = *prevBucketPtr;
1011 
1012  /*
1013  * Follow collision chain looking for matching key
1014  */
1015  match = hashp->match; /* save one fetch in inner loop */
1016  keysize = hashp->keysize; /* ditto */
1017 
1018  while (currBucket != NULL)
1019  {
1020  if (currBucket->hashvalue == hashvalue &&
1021  match(ELEMENTKEY(currBucket), keyPtr, keysize) == 0)
1022  break;
1023  prevBucketPtr = &(currBucket->link);
1024  currBucket = *prevBucketPtr;
1025 #ifdef HASH_STATISTICS
1026  hash_collisions++;
1027  hctl->collisions++;
1028 #endif
1029  }
1030 
1031  if (foundPtr)
1032  *foundPtr = (bool) (currBucket != NULL);
1033 
1034  /*
1035  * OK, now what?
1036  */
1037  switch (action)
1038  {
1039  case HASH_FIND:
1040  if (currBucket != NULL)
1041  return (void *) ELEMENTKEY(currBucket);
1042  return NULL;
1043 
1044  case HASH_REMOVE:
1045  if (currBucket != NULL)
1046  {
1047  /* if partitioned, must lock to touch nentries and freeList */
1048  if (IS_PARTITIONED(hctl))
1049  SpinLockAcquire(&(hctl->freeList[freelist_idx].mutex));
1050 
1051  /* delete the record from the appropriate nentries counter. */
1052  Assert(hctl->freeList[freelist_idx].nentries > 0);
1053  hctl->freeList[freelist_idx].nentries--;
1054 
1055  /* remove record from hash bucket's chain. */
1056  *prevBucketPtr = currBucket->link;
1057 
1058  /* add the record to the appropriate freelist. */
1059  currBucket->link = hctl->freeList[freelist_idx].freeList;
1060  hctl->freeList[freelist_idx].freeList = currBucket;
1061 
1062  if (IS_PARTITIONED(hctl))
1063  SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1064 
1065  /*
1066  * better hope the caller is synchronizing access to this
1067  * element, because someone else is going to reuse it the next
1068  * time something is added to the table
1069  */
1070  return (void *) ELEMENTKEY(currBucket);
1071  }
1072  return NULL;
1073 
1074  case HASH_ENTER:
1075  case HASH_ENTER_NULL:
1076  /* Return existing element if found, else create one */
1077  if (currBucket != NULL)
1078  return (void *) ELEMENTKEY(currBucket);
1079 
1080  /* disallow inserts if frozen */
1081  if (hashp->frozen)
1082  elog(ERROR, "cannot insert into frozen hashtable \"%s\"",
1083  hashp->tabname);
1084 
1085  currBucket = get_hash_entry(hashp, freelist_idx);
1086  if (currBucket == NULL)
1087  {
1088  /* out of memory */
1089  if (action == HASH_ENTER_NULL)
1090  return NULL;
1091  /* report a generic message */
1092  if (hashp->isshared)
1093  ereport(ERROR,
1094  (errcode(ERRCODE_OUT_OF_MEMORY),
1095  errmsg("out of shared memory")));
1096  else
1097  ereport(ERROR,
1098  (errcode(ERRCODE_OUT_OF_MEMORY),
1099  errmsg("out of memory")));
1100  }
1101 
1102  /* link into hashbucket chain */
1103  *prevBucketPtr = currBucket;
1104  currBucket->link = NULL;
1105 
1106  /* copy key into record */
1107  currBucket->hashvalue = hashvalue;
1108  hashp->keycopy(ELEMENTKEY(currBucket), keyPtr, keysize);
1109 
1110  /*
1111  * Caller is expected to fill the data field on return. DO NOT
1112  * insert any code that could possibly throw error here, as doing
1113  * so would leave the table entry incomplete and hence corrupt the
1114  * caller's data structure.
1115  */
1116 
1117  return (void *) ELEMENTKEY(currBucket);
1118  }
1119 
1120  elog(ERROR, "unrecognized hash action code: %d", (int) action);
1121 
1122  return NULL; /* keep compiler quiet */
1123 }
unsigned char bool
Definition: c.h:456
static HASHBUCKET get_hash_entry(HTAB *hashp, int freelist_idx)
Definition: dynahash.c:1256
static bool expand_table(HTAB *hashp)
Definition: dynahash.c:1508
#define ELEMENTKEY(helem)
Definition: dynahash.c:244
#define FREELIST_IDX(hctl, hashcode)
Definition: dynahash.c:212
static uint32 hash_initial_lookup(HTAB *hashp, uint32 hashvalue, HASHBUCKET **bucketptr)
Definition: dynahash.c:1713
@ HASH_FIND
Definition: hsearch.h:113
@ HASH_REMOVE
Definition: hsearch.h:115
@ HASH_ENTER
Definition: hsearch.h:114
@ HASH_ENTER_NULL
Definition: hsearch.h:116

References generate_unaccent_rules::action, Assert, ELEMENTKEY, elog, ereport, errcode(), errmsg(), ERROR, expand_table(), FreeListData::freeList, HASHHDR::freeList, FREELIST_IDX, HTAB::frozen, get_hash_entry(), has_seq_scans(), HASH_ENTER, HASH_ENTER_NULL, HASH_FIND, hash_initial_lookup(), HASH_REMOVE, HASHELEMENT::hashvalue, HTAB::hctl, IS_PARTITIONED, HTAB::isshared, HTAB::keycopy, HTAB::keysize, HASHELEMENT::link, HTAB::match, HASHHDR::max_bucket, FreeListData::mutex, FreeListData::nentries, SpinLockAcquire, SpinLockRelease, and HTAB::tabname.

Referenced by BufTableDelete(), BufTableInsert(), BufTableLookup(), CheckTargetForConflictsIn(), CleanUpLock(), ClearOldPredicateLocks(), CreatePredicateLock(), DecrementParentLocks(), DeleteChildTargetLocks(), DeleteLockTarget(), DropAllPredicateLocksFromTable(), FastPathGetRelationLockEntry(), GetLockConflicts(), hash_search(), lock_twophase_recover(), LockAcquireExtended(), LockRefindAndRelease(), LockRelease(), LockWaiterCount(), PageIsPredicateLocked(), PredicateLockAcquire(), ReleaseOneSerializableXact(), RemoveScratchTarget(), RemoveTargetIfNoLongerUsed(), RestoreScratchTarget(), SetupLockInTable(), and TransferPredicateLocksToNewTarget().

◆ hash_select_dirsize()

long hash_select_dirsize ( long  num_entries)

Definition at line 830 of file dynahash.c.

831 {
832  long nBuckets,
833  nSegments,
834  nDirEntries;
835 
836  /* estimate number of buckets wanted */
837  nBuckets = next_pow2_long(num_entries);
838  /* # of segments needed for nBuckets */
839  nSegments = next_pow2_long((nBuckets - 1) / DEF_SEGSIZE + 1);
840  /* directory entries */
841  nDirEntries = DEF_DIRSIZE;
842  while (nDirEntries < nSegments)
843  nDirEntries <<= 1; /* dir_alloc doubles dsize at each call */
844 
845  return nDirEntries;
846 }

References DEF_DIRSIZE, DEF_SEGSIZE, and next_pow2_long().

Referenced by ShmemInitHash().

◆ hash_seq_init()

void hash_seq_init ( HASH_SEQ_STATUS status,
HTAB hashp 
)

Definition at line 1385 of file dynahash.c.

1386 {
1387  status->hashp = hashp;
1388  status->curBucket = 0;
1389  status->curEntry = NULL;
1390  if (!hashp->frozen)
1391  register_seq_scan(hashp);
1392 }
static void register_seq_scan(HTAB *hashp)
Definition: dynahash.c:1822
HASHELEMENT * curEntry
Definition: hsearch.h:124
uint32 curBucket
Definition: hsearch.h:123
HTAB * hashp
Definition: hsearch.h:122

References HASH_SEQ_STATUS::curBucket, HASH_SEQ_STATUS::curEntry, HTAB::frozen, HASH_SEQ_STATUS::hashp, and register_seq_scan().

Referenced by AtAbort_Portals(), AtCleanup_Portals(), AtEOSubXact_RelationCache(), AtEOXact_RelationCache(), AtPrepare_Locks(), AtSubAbort_Portals(), AtSubCleanup_Portals(), AtSubCommit_Portals(), BeginReportingGUCOptions(), CheckForBufferLeaks(), CheckForSessionAndXactLocks(), CheckTableForSerializableConflictIn(), cleanup_rel_sync_cache(), compute_array_stats(), compute_tsvector_stats(), dblink_get_connections(), DestroyPartitionDirectory(), disconnect_cached_connections(), DropAllPredicateLocksFromTable(), DropAllPreparedStatements(), end_heap_rewrite(), entry_dealloc(), entry_reset(), ExecuteTruncateGuts(), forget_invalid_pages(), forget_invalid_pages_db(), ForgetPortalSnapshots(), gc_qtexts(), get_guc_variables(), GetLockStatusData(), GetPredicateLockStatusData(), GetRunningTransactionLocks(), GetWaitEventExtensionNames(), HoldPinnedPortals(), InitializeGUCOptions(), InvalidateAttoptCacheCallback(), InvalidateOprCacheCallBack(), InvalidateOprProofCacheCallBack(), InvalidateShippableCacheCallback(), InvalidateTableSpaceCacheCallback(), InvalidateTSCacheCallBack(), LockReassignCurrentOwner(), LockReleaseAll(), LockReleaseCurrentOwner(), LockReleaseSession(), logical_end_heap_rewrite(), logical_heap_rewrite_flush_mappings(), logicalrep_partmap_invalidate_cb(), logicalrep_partmap_reset_relmap(), logicalrep_relmap_invalidate_cb(), MarkGUCPrefixReserved(), packGraph(), pg_cursor(), pg_get_shmem_allocations(), pg_prepared_statement(), pg_stat_statements_internal(), pgfdw_inval_callback(), pgfdw_subxact_callback(), pgfdw_xact_callback(), pgss_shmem_shutdown(), plperl_fini(), PortalErrorCleanup(), PortalHashTableDeleteAll(), postgres_fdw_get_connections(), PostPrepare_Locks(), PreCommit_Portals(), ProcessConfigFileInternal(), ProcessSyncRequests(), prune_element_hashtable(), prune_lexemes_hashtable(), rebuild_database_list(), rel_sync_cache_publication_cb(), rel_sync_cache_relation_cb(), RelationCacheInitializePhase3(), RelationCacheInvalidate(), RelfilenumberMapInvalidateCallback(), RememberSyncRequest(), ReorderBufferToastReset(), selectColorTrigrams(), SerializePendingSyncs(), SerializeUncommittedEnums(), smgrDoPendingSyncs(), smgrreleaseall(), StandbyReleaseAllLocks(), StandbyReleaseOldLocks(), ThereAreNoReadyPortals(), TypeCacheOpcCallback(), TypeCacheRelCallback(), TypeCacheTypCallback(), write_relcache_init_file(), and XLogCheckInvalidPages().

◆ hash_seq_search()

void* hash_seq_search ( HASH_SEQ_STATUS status)

Definition at line 1395 of file dynahash.c.

1396 {
1397  HTAB *hashp;
1398  HASHHDR *hctl;
1399  uint32 max_bucket;
1400  long ssize;
1401  long segment_num;
1402  long segment_ndx;
1403  HASHSEGMENT segp;
1404  uint32 curBucket;
1405  HASHELEMENT *curElem;
1406 
1407  if ((curElem = status->curEntry) != NULL)
1408  {
1409  /* Continuing scan of curBucket... */
1410  status->curEntry = curElem->link;
1411  if (status->curEntry == NULL) /* end of this bucket */
1412  ++status->curBucket;
1413  return (void *) ELEMENTKEY(curElem);
1414  }
1415 
1416  /*
1417  * Search for next nonempty bucket starting at curBucket.
1418  */
1419  curBucket = status->curBucket;
1420  hashp = status->hashp;
1421  hctl = hashp->hctl;
1422  ssize = hashp->ssize;
1423  max_bucket = hctl->max_bucket;
1424 
1425  if (curBucket > max_bucket)
1426  {
1427  hash_seq_term(status);
1428  return NULL; /* search is done */
1429  }
1430 
1431  /*
1432  * first find the right segment in the table directory.
1433  */
1434  segment_num = curBucket >> hashp->sshift;
1435  segment_ndx = MOD(curBucket, ssize);
1436 
1437  segp = hashp->dir[segment_num];
1438 
1439  /*
1440  * Pick up the first item in this bucket's chain. If chain is not empty
1441  * we can begin searching it. Otherwise we have to advance to find the
1442  * next nonempty bucket. We try to optimize that case since searching a
1443  * near-empty hashtable has to iterate this loop a lot.
1444  */
1445  while ((curElem = segp[segment_ndx]) == NULL)
1446  {
1447  /* empty bucket, advance to next */
1448  if (++curBucket > max_bucket)
1449  {
1450  status->curBucket = curBucket;
1451  hash_seq_term(status);
1452  return NULL; /* search is done */
1453  }
1454  if (++segment_ndx >= ssize)
1455  {
1456  segment_num++;
1457  segment_ndx = 0;
1458  segp = hashp->dir[segment_num];
1459  }
1460  }
1461 
1462  /* Begin scan of curBucket... */
1463  status->curEntry = curElem->link;
1464  if (status->curEntry == NULL) /* end of this bucket */
1465  ++curBucket;
1466  status->curBucket = curBucket;
1467  return (void *) ELEMENTKEY(curElem);
1468 }
void hash_seq_term(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1471

References HASH_SEQ_STATUS::curBucket, HASH_SEQ_STATUS::curEntry, HTAB::dir, ELEMENTKEY, hash_seq_term(), HASH_SEQ_STATUS::hashp, HTAB::hctl, HASHELEMENT::link, HASHHDR::max_bucket, MOD, HTAB::sshift, and HTAB::ssize.

Referenced by AtAbort_Portals(), AtCleanup_Portals(), AtEOSubXact_RelationCache(), AtEOXact_RelationCache(), AtPrepare_Locks(), AtSubAbort_Portals(), AtSubCleanup_Portals(), AtSubCommit_Portals(), BeginReportingGUCOptions(), CheckForBufferLeaks(), CheckForSessionAndXactLocks(), CheckTableForSerializableConflictIn(), cleanup_rel_sync_cache(), compute_array_stats(), compute_tsvector_stats(), dblink_get_connections(), DestroyPartitionDirectory(), disconnect_cached_connections(), DropAllPredicateLocksFromTable(), DropAllPreparedStatements(), end_heap_rewrite(), entry_dealloc(), entry_reset(), ExecuteTruncateGuts(), forget_invalid_pages(), forget_invalid_pages_db(), ForgetPortalSnapshots(), gc_qtexts(), get_guc_variables(), GetLockStatusData(), GetPredicateLockStatusData(), GetRunningTransactionLocks(), GetWaitEventExtensionNames(), HoldPinnedPortals(), InitializeGUCOptions(), InvalidateAttoptCacheCallback(), InvalidateOprCacheCallBack(), InvalidateOprProofCacheCallBack(), InvalidateShippableCacheCallback(), InvalidateTableSpaceCacheCallback(), InvalidateTSCacheCallBack(), LockReassignCurrentOwner(), LockReleaseAll(), LockReleaseCurrentOwner(), LockReleaseSession(), logical_end_heap_rewrite(), logical_heap_rewrite_flush_mappings(), logicalrep_partmap_invalidate_cb(), logicalrep_partmap_reset_relmap(), logicalrep_relmap_invalidate_cb(), MarkGUCPrefixReserved(), packGraph(), pg_cursor(), pg_get_shmem_allocations(), pg_prepared_statement(), pg_stat_statements_internal(), pgfdw_inval_callback(), pgfdw_subxact_callback(), pgfdw_xact_callback(), pgss_shmem_shutdown(), plperl_fini(), PortalErrorCleanup(), PortalHashTableDeleteAll(), postgres_fdw_get_connections(), PostPrepare_Locks(), PreCommit_Portals(), ProcessConfigFileInternal(), ProcessSyncRequests(), prune_element_hashtable(), prune_lexemes_hashtable(), rebuild_database_list(), rel_sync_cache_publication_cb(), rel_sync_cache_relation_cb(), RelationCacheInitializePhase3(), RelationCacheInvalidate(), RelfilenumberMapInvalidateCallback(), RememberSyncRequest(), ReorderBufferToastReset(), selectColorTrigrams(), SerializePendingSyncs(), SerializeUncommittedEnums(), smgrDoPendingSyncs(), smgrreleaseall(), StandbyReleaseAllLocks(), StandbyReleaseOldLocks(), ThereAreNoReadyPortals(), TypeCacheOpcCallback(), TypeCacheRelCallback(), TypeCacheTypCallback(), write_relcache_init_file(), and XLogCheckInvalidPages().

◆ hash_seq_term()

void hash_seq_term ( HASH_SEQ_STATUS status)

◆ hash_stats()

void hash_stats ( const char *  where,
HTAB hashp 
)

Definition at line 884 of file dynahash.c.

885 {
886 #ifdef HASH_STATISTICS
887  fprintf(stderr, "%s: this HTAB -- accesses %ld collisions %ld\n",
888  where, hashp->hctl->accesses, hashp->hctl->collisions);
889 
890  fprintf(stderr, "hash_stats: entries %ld keysize %ld maxp %u segmentcount %ld\n",
891  hash_get_num_entries(hashp), (long) hashp->hctl->keysize,
892  hashp->hctl->max_bucket, hashp->hctl->nsegs);
893  fprintf(stderr, "%s: total accesses %ld total collisions %ld\n",
894  where, hash_accesses, hash_collisions);
895  fprintf(stderr, "hash_stats: total expansions %ld\n",
896  hash_expansions);
897 #endif
898 }
long hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1341
#define fprintf
Definition: port.h:242

References fprintf, hash_get_num_entries(), HTAB::hctl, HASHHDR::keysize, HASHHDR::max_bucket, and HASHHDR::nsegs.

Referenced by hash_destroy().

◆ hash_update_hash_key()

bool hash_update_hash_key ( HTAB hashp,
void *  existingEntry,
const void *  newKeyPtr 
)

Definition at line 1145 of file dynahash.c.

1148 {
1149  HASHELEMENT *existingElement = ELEMENT_FROM_KEY(existingEntry);
1150  uint32 newhashvalue;
1151  Size keysize;
1152  uint32 bucket;
1153  uint32 newbucket;
1154  HASHBUCKET currBucket;
1155  HASHBUCKET *prevBucketPtr;
1156  HASHBUCKET *oldPrevPtr;
1157  HashCompareFunc match;
1158 
1159 #ifdef HASH_STATISTICS
1160  hash_accesses++;
1161  hctl->accesses++;
1162 #endif
1163 
1164  /* disallow updates if frozen */
1165  if (hashp->frozen)
1166  elog(ERROR, "cannot update in frozen hashtable \"%s\"",
1167  hashp->tabname);
1168 
1169  /*
1170  * Lookup the existing element using its saved hash value. We need to do
1171  * this to be able to unlink it from its hash chain, but as a side benefit
1172  * we can verify the validity of the passed existingEntry pointer.
1173  */
1174  bucket = hash_initial_lookup(hashp, existingElement->hashvalue,
1175  &prevBucketPtr);
1176  currBucket = *prevBucketPtr;
1177 
1178  while (currBucket != NULL)
1179  {
1180  if (currBucket == existingElement)
1181  break;
1182  prevBucketPtr = &(currBucket->link);
1183  currBucket = *prevBucketPtr;
1184  }
1185 
1186  if (currBucket == NULL)
1187  elog(ERROR, "hash_update_hash_key argument is not in hashtable \"%s\"",
1188  hashp->tabname);
1189 
1190  oldPrevPtr = prevBucketPtr;
1191 
1192  /*
1193  * Now perform the equivalent of a HASH_ENTER operation to locate the hash
1194  * chain we want to put the entry into.
1195  */
1196  newhashvalue = hashp->hash(newKeyPtr, hashp->keysize);
1197  newbucket = hash_initial_lookup(hashp, newhashvalue, &prevBucketPtr);
1198  currBucket = *prevBucketPtr;
1199 
1200  /*
1201  * Follow collision chain looking for matching key
1202  */
1203  match = hashp->match; /* save one fetch in inner loop */
1204  keysize = hashp->keysize; /* ditto */
1205 
1206  while (currBucket != NULL)
1207  {
1208  if (currBucket->hashvalue == newhashvalue &&
1209  match(ELEMENTKEY(currBucket), newKeyPtr, keysize) == 0)
1210  break;
1211  prevBucketPtr = &(currBucket->link);
1212  currBucket = *prevBucketPtr;
1213 #ifdef HASH_STATISTICS
1214  hash_collisions++;
1215  hctl->collisions++;
1216 #endif
1217  }
1218 
1219  if (currBucket != NULL)
1220  return false; /* collision with an existing entry */
1221 
1222  currBucket = existingElement;
1223 
1224  /*
1225  * If old and new hash values belong to the same bucket, we need not
1226  * change any chain links, and indeed should not since this simplistic
1227  * update will corrupt the list if currBucket is the last element. (We
1228  * cannot fall out earlier, however, since we need to scan the bucket to
1229  * check for duplicate keys.)
1230  */
1231  if (bucket != newbucket)
1232  {
1233  /* OK to remove record from old hash bucket's chain. */
1234  *oldPrevPtr = currBucket->link;
1235 
1236  /* link into new hashbucket chain */
1237  *prevBucketPtr = currBucket;
1238  currBucket->link = NULL;
1239  }
1240 
1241  /* copy new key into record */
1242  currBucket->hashvalue = newhashvalue;
1243  hashp->keycopy(ELEMENTKEY(currBucket), newKeyPtr, keysize);
1244 
1245  /* rest of record is untouched */
1246 
1247  return true;
1248 }
#define ELEMENT_FROM_KEY(key)
Definition: dynahash.c:249

References ELEMENT_FROM_KEY, ELEMENTKEY, elog, ERROR, HTAB::frozen, HTAB::hash, hash_initial_lookup(), HASHELEMENT::hashvalue, HTAB::keycopy, HTAB::keysize, HASHELEMENT::link, HTAB::match, and HTAB::tabname.

Referenced by PostPrepare_Locks().

◆ hdefault()

static void hdefault ( HTAB hashp)
static

Definition at line 629 of file dynahash.c.

630 {
631  HASHHDR *hctl = hashp->hctl;
632 
633  MemSet(hctl, 0, sizeof(HASHHDR));
634 
635  hctl->dsize = DEF_DIRSIZE;
636  hctl->nsegs = 0;
637 
638  hctl->num_partitions = 0; /* not partitioned */
639 
640  /* table has no fixed maximum size */
641  hctl->max_dsize = NO_MAX_DSIZE;
642 
643  hctl->ssize = DEF_SEGSIZE;
644  hctl->sshift = DEF_SEGSIZE_SHIFT;
645 
646 #ifdef HASH_STATISTICS
647  hctl->accesses = hctl->collisions = 0;
648 #endif
649 }
#define DEF_SEGSIZE_SHIFT
Definition: dynahash.c:124

References DEF_DIRSIZE, DEF_SEGSIZE, DEF_SEGSIZE_SHIFT, HASHHDR::dsize, HTAB::hctl, HASHHDR::max_dsize, MemSet, NO_MAX_DSIZE, HASHHDR::nsegs, HASHHDR::num_partitions, HASHHDR::sshift, and HASHHDR::ssize.

Referenced by hash_create().

◆ init_htab()

static bool init_htab ( HTAB hashp,
long  nelem 
)
static

Definition at line 689 of file dynahash.c.

690 {
691  HASHHDR *hctl = hashp->hctl;
692  HASHSEGMENT *segp;
693  int nbuckets;
694  int nsegs;
695  int i;
696 
697  /*
698  * initialize mutexes if it's a partitioned table
699  */
700  if (IS_PARTITIONED(hctl))
701  for (i = 0; i < NUM_FREELISTS; i++)
702  SpinLockInit(&(hctl->freeList[i].mutex));
703 
704  /*
705  * Allocate space for the next greater power of two number of buckets,
706  * assuming a desired maximum load factor of 1.
707  */
708  nbuckets = next_pow2_int(nelem);
709 
710  /*
711  * In a partitioned table, nbuckets must be at least equal to
712  * num_partitions; were it less, keys with apparently different partition
713  * numbers would map to the same bucket, breaking partition independence.
714  * (Normally nbuckets will be much bigger; this is just a safety check.)
715  */
716  while (nbuckets < hctl->num_partitions)
717  nbuckets <<= 1;
718 
719  hctl->max_bucket = hctl->low_mask = nbuckets - 1;
720  hctl->high_mask = (nbuckets << 1) - 1;
721 
722  /*
723  * Figure number of directory segments needed, round up to a power of 2
724  */
725  nsegs = (nbuckets - 1) / hctl->ssize + 1;
726  nsegs = next_pow2_int(nsegs);
727 
728  /*
729  * Make sure directory is big enough. If pre-allocated directory is too
730  * small, choke (caller screwed up).
731  */
732  if (nsegs > hctl->dsize)
733  {
734  if (!(hashp->dir))
735  hctl->dsize = nsegs;
736  else
737  return false;
738  }
739 
740  /* Allocate a directory */
741  if (!(hashp->dir))
742  {
743  CurrentDynaHashCxt = hashp->hcxt;
744  hashp->dir = (HASHSEGMENT *)
745  hashp->alloc(hctl->dsize * sizeof(HASHSEGMENT));
746  if (!hashp->dir)
747  return false;
748  }
749 
750  /* Allocate initial segments */
751  for (segp = hashp->dir; hctl->nsegs < nsegs; hctl->nsegs++, segp++)
752  {
753  *segp = seg_alloc(hashp);
754  if (*segp == NULL)
755  return false;
756  }
757 
758  /* Choose number of entries to allocate at a time */
760 
761 #ifdef HASH_DEBUG
762  fprintf(stderr, "init_htab:\n%s%p\n%s%ld\n%s%ld\n%s%d\n%s%ld\n%s%u\n%s%x\n%s%x\n%s%ld\n",
763  "TABLE POINTER ", hashp,
764  "DIRECTORY SIZE ", hctl->dsize,
765  "SEGMENT SIZE ", hctl->ssize,
766  "SEGMENT SHIFT ", hctl->sshift,
767  "MAX BUCKET ", hctl->max_bucket,
768  "HIGH MASK ", hctl->high_mask,
769  "LOW MASK ", hctl->low_mask,
770  "NSEGS ", hctl->nsegs);
771 #endif
772  return true;
773 }
#define SpinLockInit(lock)
Definition: spin.h:60

References HTAB::alloc, choose_nelem_alloc(), CurrentDynaHashCxt, HTAB::dir, HASHHDR::dsize, HASHHDR::entrysize, fprintf, HASHHDR::freeList, HTAB::hctl, HTAB::hcxt, HASHHDR::high_mask, i, IS_PARTITIONED, HASHHDR::low_mask, HASHHDR::max_bucket, FreeListData::mutex, HASHHDR::nelem_alloc, next_pow2_int(), HASHHDR::nsegs, NUM_FREELISTS, seg_alloc(), SpinLockInit, HASHHDR::sshift, and HASHHDR::ssize.

Referenced by hash_create().

◆ my_log2()

int my_log2 ( long  num)

Definition at line 1751 of file dynahash.c.

1752 {
1753  /*
1754  * guard against too-large input, which would be invalid for
1755  * pg_ceil_log2_*()
1756  */
1757  if (num > LONG_MAX / 2)
1758  num = LONG_MAX / 2;
1759 
1760 #if SIZEOF_LONG < 8
1761  return pg_ceil_log2_32(num);
1762 #else
1763  return pg_ceil_log2_64(num);
1764 #endif
1765 }
static uint64 pg_ceil_log2_64(uint64 num)
Definition: pg_bitutils.h:271
static uint32 pg_ceil_log2_32(uint32 num)
Definition: pg_bitutils.h:258

References pg_ceil_log2_32(), and pg_ceil_log2_64().

Referenced by ExecHashTableCreate(), ExecParallelHashTableSetCurrentBatch(), hash_choose_num_partitions(), hash_create(), MultiExecParallelHash(), next_pow2_int(), next_pow2_long(), and subxact_info_read().

◆ next_pow2_int()

static int next_pow2_int ( long  num)
static

Definition at line 1777 of file dynahash.c.

1778 {
1779  if (num > INT_MAX / 2)
1780  num = INT_MAX / 2;
1781  return 1 << my_log2(num);
1782 }

References my_log2().

Referenced by hash_create(), and init_htab().

◆ next_pow2_long()

static long next_pow2_long ( long  num)
static

Definition at line 1769 of file dynahash.c.

1770 {
1771  /* my_log2's internal range check is sufficient */
1772  return 1L << my_log2(num);
1773 }

References my_log2().

Referenced by hash_estimate_size(), and hash_select_dirsize().

◆ register_seq_scan()

static void register_seq_scan ( HTAB hashp)
static

Definition at line 1822 of file dynahash.c.

1823 {
1825  elog(ERROR, "too many active hash_seq_search scans, cannot start one on \"%s\"",
1826  hashp->tabname);
1827  seq_scan_tables[num_seq_scans] = hashp;
1829  num_seq_scans++;
1830 }
#define MAX_SEQ_SCANS
Definition: dynahash.c:1813
int GetCurrentTransactionNestLevel(void)
Definition: xact.c:926

References elog, ERROR, GetCurrentTransactionNestLevel(), MAX_SEQ_SCANS, num_seq_scans, seq_scan_level, seq_scan_tables, and HTAB::tabname.

Referenced by hash_seq_init().

◆ seg_alloc()

static HASHSEGMENT seg_alloc ( HTAB hashp)
static

Definition at line 1644 of file dynahash.c.

1645 {
1646  HASHSEGMENT segp;
1647 
1648  CurrentDynaHashCxt = hashp->hcxt;
1649  segp = (HASHSEGMENT) hashp->alloc(sizeof(HASHBUCKET) * hashp->ssize);
1650 
1651  if (!segp)
1652  return NULL;
1653 
1654  MemSet(segp, 0, sizeof(HASHBUCKET) * hashp->ssize);
1655 
1656  return segp;
1657 }

References HTAB::alloc, CurrentDynaHashCxt, HTAB::hcxt, MemSet, and HTAB::ssize.

Referenced by expand_table(), and init_htab().

◆ string_compare()

static int string_compare ( const char *  key1,
const char *  key2,
Size  keysize 
)
static

Definition at line 307 of file dynahash.c.

308 {
309  return strncmp(key1, key2, keysize - 1);
310 }

Referenced by hash_create().

Variable Documentation

◆ CurrentDynaHashCxt

MemoryContext CurrentDynaHashCxt = NULL
static

Definition at line 288 of file dynahash.c.

Referenced by dir_realloc(), DynaHashAlloc(), element_alloc(), hash_create(), init_htab(), and seg_alloc().

◆ num_seq_scans

int num_seq_scans = 0
static

◆ seq_scan_level

int seq_scan_level[MAX_SEQ_SCANS]
static

Definition at line 1816 of file dynahash.c.

Referenced by AtEOSubXact_HashTables(), deregister_seq_scan(), and register_seq_scan().

◆ seq_scan_tables