PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
dynahash.c File Reference
#include "postgres.h"
#include <limits.h>
#include "access/xact.h"
#include "common/hashfn.h"
#include "port/pg_bitutils.h"
#include "storage/shmem.h"
#include "storage/spin.h"
#include "utils/dynahash.h"
#include "utils/memutils.h"
Include dependency graph for dynahash.c:

Go to the source code of this file.

Data Structures

struct  FreeListData
 
struct  HASHHDR
 
struct  HTAB
 

Macros

#define DEF_SEGSIZE   256
 
#define DEF_SEGSIZE_SHIFT   8 /* must be log2(DEF_SEGSIZE) */
 
#define DEF_DIRSIZE   256
 
#define NUM_FREELISTS   32
 
#define IS_PARTITIONED(hctl)   ((hctl)->num_partitions != 0)
 
#define FREELIST_IDX(hctl, hashcode)    (IS_PARTITIONED(hctl) ? (hashcode) % NUM_FREELISTS : 0)
 
#define ELEMENTKEY(helem)   (((char *)(helem)) + MAXALIGN(sizeof(HASHELEMENT)))
 
#define ELEMENT_FROM_KEY(key)    ((HASHELEMENT *) (((char *) (key)) - MAXALIGN(sizeof(HASHELEMENT))))
 
#define MOD(x, y)   ((x) & ((y)-1))
 
#define MAX_SEQ_SCANS   100
 

Typedefs

typedef HASHELEMENTHASHBUCKET
 
typedef HASHBUCKETHASHSEGMENT
 

Functions

static void * DynaHashAlloc (Size size)
 
static HASHSEGMENT seg_alloc (HTAB *hashp)
 
static bool element_alloc (HTAB *hashp, int nelem, int freelist_idx)
 
static bool dir_realloc (HTAB *hashp)
 
static bool expand_table (HTAB *hashp)
 
static HASHBUCKET get_hash_entry (HTAB *hashp, int freelist_idx)
 
static void hdefault (HTAB *hashp)
 
static int choose_nelem_alloc (Size entrysize)
 
static bool init_htab (HTAB *hashp, long nelem)
 
static pg_noreturn void hash_corrupted (HTAB *hashp)
 
static uint32 hash_initial_lookup (HTAB *hashp, uint32 hashvalue, HASHBUCKET **bucketptr)
 
static long next_pow2_long (long num)
 
static int next_pow2_int (long num)
 
static void register_seq_scan (HTAB *hashp)
 
static void deregister_seq_scan (HTAB *hashp)
 
static bool has_seq_scans (HTAB *hashp)
 
static int string_compare (const char *key1, const char *key2, Size keysize)
 
HTABhash_create (const char *tabname, long nelem, const HASHCTL *info, int flags)
 
Size hash_estimate_size (long num_entries, Size entrysize)
 
long hash_select_dirsize (long num_entries)
 
Size hash_get_shared_size (HASHCTL *info, int flags)
 
void hash_destroy (HTAB *hashp)
 
void hash_stats (const char *where, HTAB *hashp)
 
uint32 get_hash_value (HTAB *hashp, const void *keyPtr)
 
static uint32 calc_bucket (HASHHDR *hctl, uint32 hash_val)
 
void * hash_search (HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
 
void * hash_search_with_hash_value (HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
 
bool hash_update_hash_key (HTAB *hashp, void *existingEntry, const void *newKeyPtr)
 
long hash_get_num_entries (HTAB *hashp)
 
void hash_seq_init (HASH_SEQ_STATUS *status, HTAB *hashp)
 
void hash_seq_init_with_hash_value (HASH_SEQ_STATUS *status, HTAB *hashp, uint32 hashvalue)
 
void * hash_seq_search (HASH_SEQ_STATUS *status)
 
void hash_seq_term (HASH_SEQ_STATUS *status)
 
void hash_freeze (HTAB *hashp)
 
int my_log2 (long num)
 
void AtEOXact_HashTables (bool isCommit)
 
void AtEOSubXact_HashTables (bool isCommit, int nestDepth)
 

Variables

static MemoryContext CurrentDynaHashCxt = NULL
 
static HTABseq_scan_tables [MAX_SEQ_SCANS]
 
static int seq_scan_level [MAX_SEQ_SCANS]
 
static int num_seq_scans = 0
 

Macro Definition Documentation

◆ DEF_DIRSIZE

#define DEF_DIRSIZE   256

Definition at line 125 of file dynahash.c.

◆ DEF_SEGSIZE

#define DEF_SEGSIZE   256

Definition at line 123 of file dynahash.c.

◆ DEF_SEGSIZE_SHIFT

#define DEF_SEGSIZE_SHIFT   8 /* must be log2(DEF_SEGSIZE) */

Definition at line 124 of file dynahash.c.

◆ ELEMENT_FROM_KEY

#define ELEMENT_FROM_KEY (   key)     ((HASHELEMENT *) (((char *) (key)) - MAXALIGN(sizeof(HASHELEMENT))))

Definition at line 249 of file dynahash.c.

◆ ELEMENTKEY

#define ELEMENTKEY (   helem)    (((char *)(helem)) + MAXALIGN(sizeof(HASHELEMENT)))

Definition at line 244 of file dynahash.c.

◆ FREELIST_IDX

#define FREELIST_IDX (   hctl,
  hashcode 
)     (IS_PARTITIONED(hctl) ? (hashcode) % NUM_FREELISTS : 0)

Definition at line 212 of file dynahash.c.

◆ IS_PARTITIONED

#define IS_PARTITIONED (   hctl)    ((hctl)->num_partitions != 0)

Definition at line 210 of file dynahash.c.

◆ MAX_SEQ_SCANS

#define MAX_SEQ_SCANS   100

Definition at line 1857 of file dynahash.c.

◆ MOD

#define MOD (   x,
  y 
)    ((x) & ((y)-1))

Definition at line 255 of file dynahash.c.

◆ NUM_FREELISTS

#define NUM_FREELISTS   32

Definition at line 128 of file dynahash.c.

Typedef Documentation

◆ HASHBUCKET

Definition at line 131 of file dynahash.c.

◆ HASHSEGMENT

Definition at line 134 of file dynahash.c.

Function Documentation

◆ AtEOSubXact_HashTables()

void AtEOSubXact_HashTables ( bool  isCommit,
int  nestDepth 
)

Definition at line 1939 of file dynahash.c.

1940{
1941 int i;
1942
1943 /*
1944 * Search backward to make cleanup easy. Note we must check all entries,
1945 * not only those at the end of the array, because deletion technique
1946 * doesn't keep them in order.
1947 */
1948 for (i = num_seq_scans - 1; i >= 0; i--)
1949 {
1950 if (seq_scan_level[i] >= nestDepth)
1951 {
1952 if (isCommit)
1953 elog(WARNING, "leaked hash_seq_search scan for hash table %p",
1957 num_seq_scans--;
1958 }
1959 }
1960}
static HTAB * seq_scan_tables[MAX_SEQ_SCANS]
Definition: dynahash.c:1859
static int seq_scan_level[MAX_SEQ_SCANS]
Definition: dynahash.c:1860
static int num_seq_scans
Definition: dynahash.c:1861
#define WARNING
Definition: elog.h:36
#define elog(elevel,...)
Definition: elog.h:225
int i
Definition: isn.c:77

References elog, i, num_seq_scans, seq_scan_level, seq_scan_tables, and WARNING.

Referenced by AbortSubTransaction(), and CommitSubTransaction().

◆ AtEOXact_HashTables()

void AtEOXact_HashTables ( bool  isCommit)

Definition at line 1913 of file dynahash.c.

1914{
1915 /*
1916 * During abort cleanup, open scans are expected; just silently clean 'em
1917 * out. An open scan at commit means someone forgot a hash_seq_term()
1918 * call, so complain.
1919 *
1920 * Note: it's tempting to try to print the tabname here, but refrain for
1921 * fear of touching deallocated memory. This isn't a user-facing message
1922 * anyway, so it needn't be pretty.
1923 */
1924 if (isCommit)
1925 {
1926 int i;
1927
1928 for (i = 0; i < num_seq_scans; i++)
1929 {
1930 elog(WARNING, "leaked hash_seq_search scan for hash table %p",
1932 }
1933 }
1934 num_seq_scans = 0;
1935}

References elog, i, num_seq_scans, seq_scan_tables, and WARNING.

Referenced by AbortTransaction(), AutoVacLauncherMain(), BackgroundWriterMain(), CheckpointerMain(), CommitTransaction(), pgarch_archiveXlog(), PrepareTransaction(), WalSummarizerMain(), and WalWriterMain().

◆ calc_bucket()

static uint32 calc_bucket ( HASHHDR hctl,
uint32  hash_val 
)
inlinestatic

Definition at line 919 of file dynahash.c.

920{
921 uint32 bucket;
922
923 bucket = hash_val & hctl->high_mask;
924 if (bucket > hctl->max_bucket)
925 bucket = bucket & hctl->low_mask;
926
927 return bucket;
928}
uint32_t uint32
Definition: c.h:502
uint32 high_mask
Definition: dynahash.c:187
uint32 max_bucket
Definition: dynahash.c:186
uint32 low_mask
Definition: dynahash.c:188

References HASHHDR::high_mask, HASHHDR::low_mask, and HASHHDR::max_bucket.

Referenced by expand_table(), and hash_initial_lookup().

◆ choose_nelem_alloc()

static int choose_nelem_alloc ( Size  entrysize)
static

Definition at line 657 of file dynahash.c.

658{
659 int nelem_alloc;
660 Size elementSize;
661 Size allocSize;
662
663 /* Each element has a HASHELEMENT header plus user data. */
664 /* NB: this had better match element_alloc() */
665 elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(entrysize);
666
667 /*
668 * The idea here is to choose nelem_alloc at least 32, but round up so
669 * that the allocation request will be a power of 2 or just less. This
670 * makes little difference for hash tables in shared memory, but for hash
671 * tables managed by palloc, the allocation request will be rounded up to
672 * a power of 2 anyway. If we fail to take this into account, we'll waste
673 * as much as half the allocated space.
674 */
675 allocSize = 32 * 4; /* assume elementSize at least 8 */
676 do
677 {
678 allocSize <<= 1;
679 nelem_alloc = allocSize / elementSize;
680 } while (nelem_alloc < 32);
681
682 return nelem_alloc;
683}
#define MAXALIGN(LEN)
Definition: c.h:782
size_t Size
Definition: c.h:576

References MAXALIGN.

Referenced by hash_estimate_size(), and init_htab().

◆ deregister_seq_scan()

static void deregister_seq_scan ( HTAB hashp)
static

Definition at line 1878 of file dynahash.c.

1879{
1880 int i;
1881
1882 /* Search backward since it's most likely at the stack top */
1883 for (i = num_seq_scans - 1; i >= 0; i--)
1884 {
1885 if (seq_scan_tables[i] == hashp)
1886 {
1889 num_seq_scans--;
1890 return;
1891 }
1892 }
1893 elog(ERROR, "no hash_seq_search scan for hash table \"%s\"",
1894 hashp->tabname);
1895}
#define ERROR
Definition: elog.h:39
char * tabname
Definition: dynahash.c:228

References elog, ERROR, i, num_seq_scans, seq_scan_level, seq_scan_tables, and HTAB::tabname.

Referenced by hash_seq_term().

◆ dir_realloc()

static bool dir_realloc ( HTAB hashp)
static

Definition at line 1649 of file dynahash.c.

1650{
1651 HASHSEGMENT *p;
1652 HASHSEGMENT *old_p;
1653 long new_dsize;
1654 long old_dirsize;
1655 long new_dirsize;
1656
1657 if (hashp->hctl->max_dsize != NO_MAX_DSIZE)
1658 return false;
1659
1660 /* Reallocate directory */
1661 new_dsize = hashp->hctl->dsize << 1;
1662 old_dirsize = hashp->hctl->dsize * sizeof(HASHSEGMENT);
1663 new_dirsize = new_dsize * sizeof(HASHSEGMENT);
1664
1665 old_p = hashp->dir;
1666 CurrentDynaHashCxt = hashp->hcxt;
1667 p = (HASHSEGMENT *) hashp->alloc((Size) new_dirsize);
1668
1669 if (p != NULL)
1670 {
1671 memcpy(p, old_p, old_dirsize);
1672 MemSet(((char *) p) + old_dirsize, 0, new_dirsize - old_dirsize);
1673 hashp->dir = p;
1674 hashp->hctl->dsize = new_dsize;
1675
1676 /* XXX assume the allocator is palloc, so we know how to free */
1677 Assert(hashp->alloc == DynaHashAlloc);
1678 pfree(old_p);
1679
1680 return true;
1681 }
1682
1683 return false;
1684}
#define MemSet(start, val, len)
Definition: c.h:991
static void * DynaHashAlloc(Size size)
Definition: dynahash.c:291
static MemoryContext CurrentDynaHashCxt
Definition: dynahash.c:288
HASHBUCKET * HASHSEGMENT
Definition: dynahash.c:134
Assert(PointerIsAligned(start, uint64))
#define NO_MAX_DSIZE
Definition: hsearch.h:108
void pfree(void *pointer)
Definition: mcxt.c:1528
long max_dsize
Definition: dynahash.c:194
long dsize
Definition: dynahash.c:184
HASHHDR * hctl
Definition: dynahash.c:221
MemoryContext hcxt
Definition: dynahash.c:227
HashAllocFunc alloc
Definition: dynahash.c:226
HASHSEGMENT * dir
Definition: dynahash.c:222

References HTAB::alloc, Assert(), CurrentDynaHashCxt, HTAB::dir, HASHHDR::dsize, DynaHashAlloc(), HTAB::hctl, HTAB::hcxt, HASHHDR::max_dsize, MemSet, NO_MAX_DSIZE, and pfree().

Referenced by expand_table().

◆ DynaHashAlloc()

static void * DynaHashAlloc ( Size  size)
static

Definition at line 291 of file dynahash.c.

292{
296}
#define MCXT_ALLOC_NO_OOM
Definition: fe_memutils.h:29
void * MemoryContextAllocExtended(MemoryContext context, Size size, int flags)
Definition: mcxt.c:1242
#define MemoryContextIsValid(context)
Definition: memnodes.h:145

References Assert(), CurrentDynaHashCxt, MCXT_ALLOC_NO_OOM, MemoryContextAllocExtended(), and MemoryContextIsValid.

Referenced by dir_realloc(), hash_create(), and hash_destroy().

◆ element_alloc()

static bool element_alloc ( HTAB hashp,
int  nelem,
int  freelist_idx 
)
static

Definition at line 1707 of file dynahash.c.

1708{
1709 HASHHDR *hctl = hashp->hctl;
1710 Size elementSize;
1711 HASHELEMENT *firstElement;
1712 HASHELEMENT *tmpElement;
1713 HASHELEMENT *prevElement;
1714 int i;
1715
1716 if (hashp->isfixed)
1717 return false;
1718
1719 /* Each element has a HASHELEMENT header plus user data. */
1720 elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(hctl->entrysize);
1721
1722 CurrentDynaHashCxt = hashp->hcxt;
1723 firstElement = (HASHELEMENT *) hashp->alloc(nelem * elementSize);
1724
1725 if (!firstElement)
1726 return false;
1727
1728 /* prepare to link all the new entries into the freelist */
1729 prevElement = NULL;
1730 tmpElement = firstElement;
1731 for (i = 0; i < nelem; i++)
1732 {
1733 tmpElement->link = prevElement;
1734 prevElement = tmpElement;
1735 tmpElement = (HASHELEMENT *) (((char *) tmpElement) + elementSize);
1736 }
1737
1738 /* if partitioned, must lock to touch freeList */
1739 if (IS_PARTITIONED(hctl))
1740 SpinLockAcquire(&hctl->freeList[freelist_idx].mutex);
1741
1742 /* freelist could be nonempty if two backends did this concurrently */
1743 firstElement->link = hctl->freeList[freelist_idx].freeList;
1744 hctl->freeList[freelist_idx].freeList = prevElement;
1745
1746 if (IS_PARTITIONED(hctl))
1747 SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1748
1749 return true;
1750}
#define IS_PARTITIONED(hctl)
Definition: dynahash.c:210
#define SpinLockRelease(lock)
Definition: spin.h:61
#define SpinLockAcquire(lock)
Definition: spin.h:59
slock_t mutex
Definition: dynahash.c:155
HASHELEMENT * freeList
Definition: dynahash.c:157
struct HASHELEMENT * link
Definition: hsearch.h:53
FreeListData freeList[NUM_FREELISTS]
Definition: dynahash.c:180
Size entrysize
Definition: dynahash.c:192
bool isfixed
Definition: dynahash.c:230

References HTAB::alloc, CurrentDynaHashCxt, HASHHDR::entrysize, FreeListData::freeList, HASHHDR::freeList, HTAB::hctl, HTAB::hcxt, i, IS_PARTITIONED, HTAB::isfixed, HASHELEMENT::link, MAXALIGN, FreeListData::mutex, SpinLockAcquire, and SpinLockRelease.

Referenced by get_hash_entry(), and hash_create().

◆ expand_table()

static bool expand_table ( HTAB hashp)
static

Definition at line 1552 of file dynahash.c.

1553{
1554 HASHHDR *hctl = hashp->hctl;
1555 HASHSEGMENT old_seg,
1556 new_seg;
1557 long old_bucket,
1558 new_bucket;
1559 long new_segnum,
1560 new_segndx;
1561 long old_segnum,
1562 old_segndx;
1563 HASHBUCKET *oldlink,
1564 *newlink;
1565 HASHBUCKET currElement,
1566 nextElement;
1567
1568 Assert(!IS_PARTITIONED(hctl));
1569
1570#ifdef HASH_STATISTICS
1571 hash_expansions++;
1572#endif
1573
1574 new_bucket = hctl->max_bucket + 1;
1575 new_segnum = new_bucket >> hashp->sshift;
1576 new_segndx = MOD(new_bucket, hashp->ssize);
1577
1578 if (new_segnum >= hctl->nsegs)
1579 {
1580 /* Allocate new segment if necessary -- could fail if dir full */
1581 if (new_segnum >= hctl->dsize)
1582 if (!dir_realloc(hashp))
1583 return false;
1584 if (!(hashp->dir[new_segnum] = seg_alloc(hashp)))
1585 return false;
1586 hctl->nsegs++;
1587 }
1588
1589 /* OK, we created a new bucket */
1590 hctl->max_bucket++;
1591
1592 /*
1593 * *Before* changing masks, find old bucket corresponding to same hash
1594 * values; values in that bucket may need to be relocated to new bucket.
1595 * Note that new_bucket is certainly larger than low_mask at this point,
1596 * so we can skip the first step of the regular hash mask calc.
1597 */
1598 old_bucket = (new_bucket & hctl->low_mask);
1599
1600 /*
1601 * If we crossed a power of 2, readjust masks.
1602 */
1603 if ((uint32) new_bucket > hctl->high_mask)
1604 {
1605 hctl->low_mask = hctl->high_mask;
1606 hctl->high_mask = (uint32) new_bucket | hctl->low_mask;
1607 }
1608
1609 /*
1610 * Relocate records to the new bucket. NOTE: because of the way the hash
1611 * masking is done in calc_bucket, only one old bucket can need to be
1612 * split at this point. With a different way of reducing the hash value,
1613 * that might not be true!
1614 */
1615 old_segnum = old_bucket >> hashp->sshift;
1616 old_segndx = MOD(old_bucket, hashp->ssize);
1617
1618 old_seg = hashp->dir[old_segnum];
1619 new_seg = hashp->dir[new_segnum];
1620
1621 oldlink = &old_seg[old_segndx];
1622 newlink = &new_seg[new_segndx];
1623
1624 for (currElement = *oldlink;
1625 currElement != NULL;
1626 currElement = nextElement)
1627 {
1628 nextElement = currElement->link;
1629 if ((long) calc_bucket(hctl, currElement->hashvalue) == old_bucket)
1630 {
1631 *oldlink = currElement;
1632 oldlink = &currElement->link;
1633 }
1634 else
1635 {
1636 *newlink = currElement;
1637 newlink = &currElement->link;
1638 }
1639 }
1640 /* don't forget to terminate the rebuilt hash chains... */
1641 *oldlink = NULL;
1642 *newlink = NULL;
1643
1644 return true;
1645}
static HASHSEGMENT seg_alloc(HTAB *hashp)
Definition: dynahash.c:1688
#define MOD(x, y)
Definition: dynahash.c:255
static bool dir_realloc(HTAB *hashp)
Definition: dynahash.c:1649
static uint32 calc_bucket(HASHHDR *hctl, uint32 hash_val)
Definition: dynahash.c:919
uint32 hashvalue
Definition: hsearch.h:54
long nsegs
Definition: dynahash.c:185
long ssize
Definition: dynahash.c:237
int sshift
Definition: dynahash.c:238

References Assert(), calc_bucket(), HTAB::dir, dir_realloc(), HASHHDR::dsize, HASHELEMENT::hashvalue, HTAB::hctl, HASHHDR::high_mask, IS_PARTITIONED, HASHELEMENT::link, HASHHDR::low_mask, HASHHDR::max_bucket, MOD, HASHHDR::nsegs, seg_alloc(), HTAB::sshift, and HTAB::ssize.

Referenced by hash_search_with_hash_value().

◆ get_hash_entry()

static HASHBUCKET get_hash_entry ( HTAB hashp,
int  freelist_idx 
)
static

Definition at line 1257 of file dynahash.c.

1258{
1259 HASHHDR *hctl = hashp->hctl;
1260 HASHBUCKET newElement;
1261
1262 for (;;)
1263 {
1264 /* if partitioned, must lock to touch nentries and freeList */
1265 if (IS_PARTITIONED(hctl))
1266 SpinLockAcquire(&hctl->freeList[freelist_idx].mutex);
1267
1268 /* try to get an entry from the freelist */
1269 newElement = hctl->freeList[freelist_idx].freeList;
1270
1271 if (newElement != NULL)
1272 break;
1273
1274 if (IS_PARTITIONED(hctl))
1275 SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1276
1277 /*
1278 * No free elements in this freelist. In a partitioned table, there
1279 * might be entries in other freelists, but to reduce contention we
1280 * prefer to first try to get another chunk of buckets from the main
1281 * shmem allocator. If that fails, though, we *MUST* root through all
1282 * the other freelists before giving up. There are multiple callers
1283 * that assume that they can allocate every element in the initially
1284 * requested table size, or that deleting an element guarantees they
1285 * can insert a new element, even if shared memory is entirely full.
1286 * Failing because the needed element is in a different freelist is
1287 * not acceptable.
1288 */
1289 if (!element_alloc(hashp, hctl->nelem_alloc, freelist_idx))
1290 {
1291 int borrow_from_idx;
1292
1293 if (!IS_PARTITIONED(hctl))
1294 return NULL; /* out of memory */
1295
1296 /* try to borrow element from another freelist */
1297 borrow_from_idx = freelist_idx;
1298 for (;;)
1299 {
1300 borrow_from_idx = (borrow_from_idx + 1) % NUM_FREELISTS;
1301 if (borrow_from_idx == freelist_idx)
1302 break; /* examined all freelists, fail */
1303
1304 SpinLockAcquire(&(hctl->freeList[borrow_from_idx].mutex));
1305 newElement = hctl->freeList[borrow_from_idx].freeList;
1306
1307 if (newElement != NULL)
1308 {
1309 hctl->freeList[borrow_from_idx].freeList = newElement->link;
1310 SpinLockRelease(&(hctl->freeList[borrow_from_idx].mutex));
1311
1312 /* careful: count the new element in its proper freelist */
1313 SpinLockAcquire(&hctl->freeList[freelist_idx].mutex);
1314 hctl->freeList[freelist_idx].nentries++;
1315 SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1316
1317 return newElement;
1318 }
1319
1320 SpinLockRelease(&(hctl->freeList[borrow_from_idx].mutex));
1321 }
1322
1323 /* no elements available to borrow either, so out of memory */
1324 return NULL;
1325 }
1326 }
1327
1328 /* remove entry from freelist, bump nentries */
1329 hctl->freeList[freelist_idx].freeList = newElement->link;
1330 hctl->freeList[freelist_idx].nentries++;
1331
1332 if (IS_PARTITIONED(hctl))
1333 SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1334
1335 return newElement;
1336}
static bool element_alloc(HTAB *hashp, int nelem, int freelist_idx)
Definition: dynahash.c:1707
#define NUM_FREELISTS
Definition: dynahash.c:128
long nentries
Definition: dynahash.c:156
int nelem_alloc
Definition: dynahash.c:197

References element_alloc(), FreeListData::freeList, HASHHDR::freeList, HTAB::hctl, IS_PARTITIONED, HASHELEMENT::link, FreeListData::mutex, HASHHDR::nelem_alloc, FreeListData::nentries, NUM_FREELISTS, SpinLockAcquire, and SpinLockRelease.

Referenced by hash_search_with_hash_value().

◆ get_hash_value()

uint32 get_hash_value ( HTAB hashp,
const void *  keyPtr 
)

Definition at line 912 of file dynahash.c.

913{
914 return hashp->hash(keyPtr, hashp->keysize);
915}
HashValueFunc hash
Definition: dynahash.c:223
Size keysize
Definition: dynahash.c:236

References HTAB::hash, and HTAB::keysize.

Referenced by BufTableHashCode(), LockTagHashCode(), and lookup_type_cache().

◆ has_seq_scans()

static bool has_seq_scans ( HTAB hashp)
static

Definition at line 1899 of file dynahash.c.

1900{
1901 int i;
1902
1903 for (i = 0; i < num_seq_scans; i++)
1904 {
1905 if (seq_scan_tables[i] == hashp)
1906 return true;
1907 }
1908 return false;
1909}

References i, num_seq_scans, and seq_scan_tables.

Referenced by hash_freeze(), and hash_search_with_hash_value().

◆ hash_corrupted()

static void hash_corrupted ( HTAB hashp)
static

Definition at line 1781 of file dynahash.c.

1782{
1783 /*
1784 * If the corruption is in a shared hashtable, we'd better force a
1785 * systemwide restart. Otherwise, just shut down this one backend.
1786 */
1787 if (hashp->isshared)
1788 elog(PANIC, "hash table \"%s\" corrupted", hashp->tabname);
1789 else
1790 elog(FATAL, "hash table \"%s\" corrupted", hashp->tabname);
1791}
#define FATAL
Definition: elog.h:41
#define PANIC
Definition: elog.h:42
bool isshared
Definition: dynahash.c:229

References elog, FATAL, HTAB::isshared, PANIC, and HTAB::tabname.

Referenced by hash_initial_lookup().

◆ hash_create()

HTAB * hash_create ( const char *  tabname,
long  nelem,
const HASHCTL info,
int  flags 
)

Definition at line 352 of file dynahash.c.

353{
354 HTAB *hashp;
355 HASHHDR *hctl;
356
357 /*
358 * Hash tables now allocate space for key and data, but you have to say
359 * how much space to allocate.
360 */
361 Assert(flags & HASH_ELEM);
362 Assert(info->keysize > 0);
363 Assert(info->entrysize >= info->keysize);
364
365 /*
366 * For shared hash tables, we have a local hash header (HTAB struct) that
367 * we allocate in TopMemoryContext; all else is in shared memory.
368 *
369 * For non-shared hash tables, everything including the hash header is in
370 * a memory context created specially for the hash table --- this makes
371 * hash_destroy very simple. The memory context is made a child of either
372 * a context specified by the caller, or TopMemoryContext if nothing is
373 * specified.
374 */
375 if (flags & HASH_SHARED_MEM)
376 {
377 /* Set up to allocate the hash header */
379 }
380 else
381 {
382 /* Create the hash table's private memory context */
383 if (flags & HASH_CONTEXT)
384 CurrentDynaHashCxt = info->hcxt;
385 else
388 "dynahash",
390 }
391
392 /* Initialize the hash header, plus a copy of the table name */
394 sizeof(HTAB) + strlen(tabname) + 1);
395 MemSet(hashp, 0, sizeof(HTAB));
396
397 hashp->tabname = (char *) (hashp + 1);
398 strcpy(hashp->tabname, tabname);
399
400 /* If we have a private context, label it with hashtable's name */
401 if (!(flags & HASH_SHARED_MEM))
403
404 /*
405 * Select the appropriate hash function (see comments at head of file).
406 */
407 if (flags & HASH_FUNCTION)
408 {
409 Assert(!(flags & (HASH_BLOBS | HASH_STRINGS)));
410 hashp->hash = info->hash;
411 }
412 else if (flags & HASH_BLOBS)
413 {
414 Assert(!(flags & HASH_STRINGS));
415 /* We can optimize hashing for common key sizes */
416 if (info->keysize == sizeof(uint32))
417 hashp->hash = uint32_hash;
418 else
419 hashp->hash = tag_hash;
420 }
421 else
422 {
423 /*
424 * string_hash used to be considered the default hash method, and in a
425 * non-assert build it effectively still is. But we now consider it
426 * an assertion error to not say HASH_STRINGS explicitly. To help
427 * catch mistaken usage of HASH_STRINGS, we also insist on a
428 * reasonably long string length: if the keysize is only 4 or 8 bytes,
429 * it's almost certainly an integer or pointer not a string.
430 */
431 Assert(flags & HASH_STRINGS);
432 Assert(info->keysize > 8);
433
434 hashp->hash = string_hash;
435 }
436
437 /*
438 * If you don't specify a match function, it defaults to string_compare if
439 * you used string_hash, and to memcmp otherwise.
440 *
441 * Note: explicitly specifying string_hash is deprecated, because this
442 * might not work for callers in loadable modules on some platforms due to
443 * referencing a trampoline instead of the string_hash function proper.
444 * Specify HASH_STRINGS instead.
445 */
446 if (flags & HASH_COMPARE)
447 hashp->match = info->match;
448 else if (hashp->hash == string_hash)
450 else
451 hashp->match = memcmp;
452
453 /*
454 * Similarly, the key-copying function defaults to strlcpy or memcpy.
455 */
456 if (flags & HASH_KEYCOPY)
457 hashp->keycopy = info->keycopy;
458 else if (hashp->hash == string_hash)
459 {
460 /*
461 * The signature of keycopy is meant for memcpy(), which returns
462 * void*, but strlcpy() returns size_t. Since we never use the return
463 * value of keycopy, and size_t is pretty much always the same size as
464 * void *, this should be safe. The extra cast in the middle is to
465 * avoid warnings from -Wcast-function-type.
466 */
468 }
469 else
470 hashp->keycopy = memcpy;
471
472 /* And select the entry allocation function, too. */
473 if (flags & HASH_ALLOC)
474 hashp->alloc = info->alloc;
475 else
476 hashp->alloc = DynaHashAlloc;
477
478 if (flags & HASH_SHARED_MEM)
479 {
480 /*
481 * ctl structure and directory are preallocated for shared memory
482 * tables. Note that HASH_DIRSIZE and HASH_ALLOC had better be set as
483 * well.
484 */
485 hashp->hctl = info->hctl;
486 hashp->dir = (HASHSEGMENT *) (((char *) info->hctl) + sizeof(HASHHDR));
487 hashp->hcxt = NULL;
488 hashp->isshared = true;
489
490 /* hash table already exists, we're just attaching to it */
491 if (flags & HASH_ATTACH)
492 {
493 /* make local copies of some heavily-used values */
494 hctl = hashp->hctl;
495 hashp->keysize = hctl->keysize;
496 hashp->ssize = hctl->ssize;
497 hashp->sshift = hctl->sshift;
498
499 return hashp;
500 }
501 }
502 else
503 {
504 /* setup hash table defaults */
505 hashp->hctl = NULL;
506 hashp->dir = NULL;
507 hashp->hcxt = CurrentDynaHashCxt;
508 hashp->isshared = false;
509 }
510
511 if (!hashp->hctl)
512 {
513 hashp->hctl = (HASHHDR *) hashp->alloc(sizeof(HASHHDR));
514 if (!hashp->hctl)
516 (errcode(ERRCODE_OUT_OF_MEMORY),
517 errmsg("out of memory")));
518 }
519
520 hashp->frozen = false;
521
522 hdefault(hashp);
523
524 hctl = hashp->hctl;
525
526 if (flags & HASH_PARTITION)
527 {
528 /* Doesn't make sense to partition a local hash table */
529 Assert(flags & HASH_SHARED_MEM);
530
531 /*
532 * The number of partitions had better be a power of 2. Also, it must
533 * be less than INT_MAX (see init_htab()), so call the int version of
534 * next_pow2.
535 */
537
538 hctl->num_partitions = info->num_partitions;
539 }
540
541 if (flags & HASH_SEGMENT)
542 {
543 hctl->ssize = info->ssize;
544 hctl->sshift = my_log2(info->ssize);
545 /* ssize had better be a power of 2 */
546 Assert(hctl->ssize == (1L << hctl->sshift));
547 }
548
549 /*
550 * SHM hash tables have fixed directory size passed by the caller.
551 */
552 if (flags & HASH_DIRSIZE)
553 {
554 hctl->max_dsize = info->max_dsize;
555 hctl->dsize = info->dsize;
556 }
557
558 /* remember the entry sizes, too */
559 hctl->keysize = info->keysize;
560 hctl->entrysize = info->entrysize;
561
562 /* make local copies of heavily-used constant fields */
563 hashp->keysize = hctl->keysize;
564 hashp->ssize = hctl->ssize;
565 hashp->sshift = hctl->sshift;
566
567 /* Build the hash directory structure */
568 if (!init_htab(hashp, nelem))
569 elog(ERROR, "failed to initialize hash table \"%s\"", hashp->tabname);
570
571 /*
572 * For a shared hash table, preallocate the requested number of elements.
573 * This reduces problems with run-time out-of-shared-memory conditions.
574 *
575 * For a non-shared hash table, preallocate the requested number of
576 * elements if it's less than our chosen nelem_alloc. This avoids wasting
577 * space if the caller correctly estimates a small table size.
578 */
579 if ((flags & HASH_SHARED_MEM) ||
580 nelem < hctl->nelem_alloc)
581 {
582 int i,
583 freelist_partitions,
584 nelem_alloc,
585 nelem_alloc_first;
586
587 /*
588 * If hash table is partitioned, give each freelist an equal share of
589 * the initial allocation. Otherwise only freeList[0] is used.
590 */
591 if (IS_PARTITIONED(hashp->hctl))
592 freelist_partitions = NUM_FREELISTS;
593 else
594 freelist_partitions = 1;
595
596 nelem_alloc = nelem / freelist_partitions;
597 if (nelem_alloc <= 0)
598 nelem_alloc = 1;
599
600 /*
601 * Make sure we'll allocate all the requested elements; freeList[0]
602 * gets the excess if the request isn't divisible by NUM_FREELISTS.
603 */
604 if (nelem_alloc * freelist_partitions < nelem)
605 nelem_alloc_first =
606 nelem - nelem_alloc * (freelist_partitions - 1);
607 else
608 nelem_alloc_first = nelem_alloc;
609
610 for (i = 0; i < freelist_partitions; i++)
611 {
612 int temp = (i == 0) ? nelem_alloc_first : nelem_alloc;
613
614 if (!element_alloc(hashp, temp, i))
616 (errcode(ERRCODE_OUT_OF_MEMORY),
617 errmsg("out of memory")));
618 }
619 }
620
621 if (flags & HASH_FIXED_SIZE)
622 hashp->isfixed = true;
623 return hashp;
624}
void(* pg_funcptr_t)(void)
Definition: c.h:424
static bool init_htab(HTAB *hashp, long nelem)
Definition: dynahash.c:690
static int next_pow2_int(long num)
Definition: dynahash.c:1821
static int string_compare(const char *key1, const char *key2, Size keysize)
Definition: dynahash.c:307
static void hdefault(HTAB *hashp)
Definition: dynahash.c:630
int my_log2(long num)
Definition: dynahash.c:1795
int errcode(int sqlerrcode)
Definition: elog.c:854
int errmsg(const char *fmt,...)
Definition: elog.c:1071
#define ereport(elevel,...)
Definition: elog.h:149
uint32 tag_hash(const void *key, Size keysize)
Definition: hashfn.c:677
uint32 uint32_hash(const void *key, Size keysize)
Definition: hashfn.c:688
uint32 string_hash(const void *key, Size keysize)
Definition: hashfn.c:660
#define HASH_KEYCOPY
Definition: hsearch.h:100
#define HASH_STRINGS
Definition: hsearch.h:96
int(* HashCompareFunc)(const void *key1, const void *key2, Size keysize)
Definition: hsearch.h:29
#define HASH_CONTEXT
Definition: hsearch.h:102
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_ALLOC
Definition: hsearch.h:101
#define HASH_DIRSIZE
Definition: hsearch.h:94
#define HASH_SEGMENT
Definition: hsearch.h:93
#define HASH_ATTACH
Definition: hsearch.h:104
#define HASH_COMPARE
Definition: hsearch.h:99
#define HASH_FUNCTION
Definition: hsearch.h:98
#define HASH_BLOBS
Definition: hsearch.h:97
#define HASH_SHARED_MEM
Definition: hsearch.h:103
#define HASH_FIXED_SIZE
Definition: hsearch.h:105
#define HASH_PARTITION
Definition: hsearch.h:92
void *(* HashCopyFunc)(void *dest, const void *src, Size keysize)
Definition: hsearch.h:37
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1185
MemoryContext TopMemoryContext
Definition: mcxt.c:149
void MemoryContextSetIdentifier(MemoryContext context, const char *id)
Definition: mcxt.c:612
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:160
size_t strlcpy(char *dst, const char *src, size_t siz)
Definition: strlcpy.c:45
long ssize
Definition: hsearch.h:70
HashAllocFunc alloc
Definition: hsearch.h:84
Size keysize
Definition: hsearch.h:75
HashValueFunc hash
Definition: hsearch.h:78
Size entrysize
Definition: hsearch.h:76
long dsize
Definition: hsearch.h:72
HashCompareFunc match
Definition: hsearch.h:80
HASHHDR * hctl
Definition: hsearch.h:88
MemoryContext hcxt
Definition: hsearch.h:86
long num_partitions
Definition: hsearch.h:68
HashCopyFunc keycopy
Definition: hsearch.h:82
long max_dsize
Definition: hsearch.h:73
long num_partitions
Definition: dynahash.c:193
Size keysize
Definition: dynahash.c:191
int sshift
Definition: dynahash.c:196
long ssize
Definition: dynahash.c:195
Definition: dynahash.c:220
HashCompareFunc match
Definition: dynahash.c:224
HashCopyFunc keycopy
Definition: dynahash.c:225
bool frozen
Definition: dynahash.c:233

References HTAB::alloc, HASHCTL::alloc, ALLOCSET_DEFAULT_SIZES, AllocSetContextCreate, Assert(), CurrentDynaHashCxt, HTAB::dir, HASHHDR::dsize, HASHCTL::dsize, DynaHashAlloc(), element_alloc(), elog, HASHHDR::entrysize, HASHCTL::entrysize, ereport, errcode(), errmsg(), ERROR, HTAB::frozen, HTAB::hash, HASHCTL::hash, HASH_ALLOC, HASH_ATTACH, HASH_BLOBS, HASH_COMPARE, HASH_CONTEXT, HASH_DIRSIZE, HASH_ELEM, HASH_FIXED_SIZE, HASH_FUNCTION, HASH_KEYCOPY, HASH_PARTITION, HASH_SEGMENT, HASH_SHARED_MEM, HASH_STRINGS, HTAB::hctl, HASHCTL::hctl, HTAB::hcxt, HASHCTL::hcxt, hdefault(), i, init_htab(), IS_PARTITIONED, HTAB::isfixed, HTAB::isshared, HTAB::keycopy, HASHCTL::keycopy, HASHHDR::keysize, HTAB::keysize, HASHCTL::keysize, HTAB::match, HASHCTL::match, HASHHDR::max_dsize, HASHCTL::max_dsize, MemoryContextAlloc(), MemoryContextSetIdentifier(), MemSet, my_log2(), next_pow2_int(), NUM_FREELISTS, HASHHDR::num_partitions, HASHCTL::num_partitions, HASHHDR::sshift, HTAB::sshift, HASHHDR::ssize, HTAB::ssize, HASHCTL::ssize, string_compare(), string_hash(), strlcpy(), HTAB::tabname, tag_hash(), TopMemoryContext, and uint32_hash().

Referenced by _hash_finish_split(), _PG_init(), AddEventToPendingNotifies(), AddPendingSync(), assign_record_type_typmod(), begin_heap_rewrite(), build_colinfo_names_hash(), build_guc_variables(), build_join_rel_hash(), BuildEventTriggerCache(), cfunc_hashtable_init(), CheckForSessionAndXactLocks(), CompactCheckpointerRequestQueue(), compute_array_stats(), compute_tsvector_stats(), create_seq_hashtable(), createConnHash(), CreateLocalPredicateLockHash(), CreatePartitionDirectory(), do_autovacuum(), EnablePortalManager(), ExecInitModifyTable(), ExecuteTruncateGuts(), find_all_inheritors(), find_oper_cache_entry(), find_rendezvous_variable(), get_json_object_as_hash(), GetComboCommandId(), GetConnection(), gistInitBuildBuffers(), gistInitParentMap(), init_missing_cache(), init_procedure_caches(), init_rel_sync_cache(), init_timezone_hashtable(), init_ts_config_cache(), init_uncommitted_enum_types(), init_uncommitted_enum_values(), InitBufferManagerAccess(), InitializeAttoptCache(), InitializeRelfilenumberMap(), InitializeShippableCache(), InitializeTableSpaceCache(), InitLocalBuffers(), InitLockManagerAccess(), InitQueryHashTable(), InitRecoveryTransactionEnvironment(), InitSync(), json_unique_check_init(), load_categories_hash(), log_invalid_page(), logical_begin_heap_rewrite(), logicalrep_partmap_init(), logicalrep_relmap_init(), lookup_proof_cache(), lookup_ts_dictionary_cache(), lookup_ts_parser_cache(), lookup_type_cache(), LookupOpclassInfo(), pa_allocate_worker(), pg_get_backend_memory_contexts(), plpgsql_estate_setup(), PLy_add_exceptions(), populate_recordset_object_start(), process_syncing_tables_for_apply(), rebuild_database_list(), record_C_func(), RegisterExtensibleNodeEntry(), RelationCacheInitialize(), ReorderBufferAllocate(), ReorderBufferBuildTupleCidHash(), ReorderBufferToastInitHash(), ResetUnloggedRelationsInDbspaceDir(), ri_InitHashTables(), select_perl_context(), SerializePendingSyncs(), set_rtable_names(), ShmemInitHash(), smgropen(), transformGraph(), and XLogPrefetcherAllocate().

◆ hash_destroy()

void hash_destroy ( HTAB hashp)

Definition at line 866 of file dynahash.c.

867{
868 if (hashp != NULL)
869 {
870 /* allocation method must be one we know how to free, too */
871 Assert(hashp->alloc == DynaHashAlloc);
872 /* so this hashtable must have its own context */
873 Assert(hashp->hcxt != NULL);
874
875 hash_stats("destroy", hashp);
876
877 /*
878 * Free everything by destroying the hash table's memory context.
879 */
881 }
882}
void hash_stats(const char *where, HTAB *hashp)
Definition: dynahash.c:885
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:454

References HTAB::alloc, Assert(), DynaHashAlloc(), hash_stats(), HTAB::hcxt, and MemoryContextDelete().

Referenced by _hash_finish_split(), CheckForSessionAndXactLocks(), CompactCheckpointerRequestQueue(), destroy_colinfo_names_hash(), ExecuteTruncateGuts(), find_all_inheritors(), get_json_object_as_hash(), pg_get_backend_memory_contexts(), pgoutput_shutdown(), populate_recordset_object_end(), PostPrepare_PredicateLocks(), process_syncing_tables_for_apply(), ReleasePredicateLocksLocal(), ReorderBufferFreeTXN(), ReorderBufferToastReset(), ReorderBufferTruncateTXN(), ResetSequenceCaches(), ResetUnloggedRelationsInDbspaceDir(), SerializePendingSyncs(), set_rtable_names(), ShutdownRecoveryTransactionEnvironment(), XLogCheckInvalidPages(), and XLogPrefetcherFree().

◆ hash_estimate_size()

Size hash_estimate_size ( long  num_entries,
Size  entrysize 
)

Definition at line 784 of file dynahash.c.

785{
786 Size size;
787 long nBuckets,
788 nSegments,
789 nDirEntries,
790 nElementAllocs,
791 elementSize,
792 elementAllocCnt;
793
794 /* estimate number of buckets wanted */
795 nBuckets = next_pow2_long(num_entries);
796 /* # of segments needed for nBuckets */
797 nSegments = next_pow2_long((nBuckets - 1) / DEF_SEGSIZE + 1);
798 /* directory entries */
799 nDirEntries = DEF_DIRSIZE;
800 while (nDirEntries < nSegments)
801 nDirEntries <<= 1; /* dir_alloc doubles dsize at each call */
802
803 /* fixed control info */
804 size = MAXALIGN(sizeof(HASHHDR)); /* but not HTAB, per above */
805 /* directory */
806 size = add_size(size, mul_size(nDirEntries, sizeof(HASHSEGMENT)));
807 /* segments */
808 size = add_size(size, mul_size(nSegments,
809 MAXALIGN(DEF_SEGSIZE * sizeof(HASHBUCKET))));
810 /* elements --- allocated in groups of choose_nelem_alloc() entries */
811 elementAllocCnt = choose_nelem_alloc(entrysize);
812 nElementAllocs = (num_entries - 1) / elementAllocCnt + 1;
813 elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(entrysize);
814 size = add_size(size,
815 mul_size(nElementAllocs,
816 mul_size(elementAllocCnt, elementSize)));
817
818 return size;
819}
#define DEF_DIRSIZE
Definition: dynahash.c:125
static int choose_nelem_alloc(Size entrysize)
Definition: dynahash.c:657
#define DEF_SEGSIZE
Definition: dynahash.c:123
static long next_pow2_long(long num)
Definition: dynahash.c:1813
Size add_size(Size s1, Size s2)
Definition: shmem.c:493
Size mul_size(Size s1, Size s2)
Definition: shmem.c:510

References add_size(), choose_nelem_alloc(), DEF_DIRSIZE, DEF_SEGSIZE, MAXALIGN, mul_size(), and next_pow2_long().

Referenced by BufTableShmemSize(), CalculateShmemSize(), LockManagerShmemSize(), pgss_memsize(), PredicateLockShmemSize(), and WaitEventCustomShmemSize().

◆ hash_freeze()

void hash_freeze ( HTAB hashp)

Definition at line 1535 of file dynahash.c.

1536{
1537 if (hashp->isshared)
1538 elog(ERROR, "cannot freeze shared hashtable \"%s\"", hashp->tabname);
1539 if (!hashp->frozen && has_seq_scans(hashp))
1540 elog(ERROR, "cannot freeze hashtable \"%s\" because it has active scans",
1541 hashp->tabname);
1542 hashp->frozen = true;
1543}
static bool has_seq_scans(HTAB *hashp)
Definition: dynahash.c:1899

References elog, ERROR, HTAB::frozen, has_seq_scans(), HTAB::isshared, and HTAB::tabname.

◆ hash_get_num_entries()

long hash_get_num_entries ( HTAB hashp)

Definition at line 1342 of file dynahash.c.

1343{
1344 int i;
1345 long sum = hashp->hctl->freeList[0].nentries;
1346
1347 /*
1348 * We currently don't bother with acquiring the mutexes; it's only
1349 * sensible to call this function if you've got lock on all partitions of
1350 * the table.
1351 */
1352 if (IS_PARTITIONED(hashp->hctl))
1353 {
1354 for (i = 1; i < NUM_FREELISTS; i++)
1355 sum += hashp->hctl->freeList[i].nentries;
1356 }
1357
1358 return sum;
1359}

References HASHHDR::freeList, HTAB::hctl, i, IS_PARTITIONED, FreeListData::nentries, and NUM_FREELISTS.

Referenced by build_guc_variables(), compute_array_stats(), compute_tsvector_stats(), entry_alloc(), entry_dealloc(), entry_reset(), EstimatePendingSyncsSpace(), EstimateUncommittedEnumsSpace(), get_crosstab_tuplestore(), get_explain_guc_options(), get_guc_variables(), GetLockStatusData(), GetPredicateLockStatusData(), GetRunningTransactionLocks(), GetWaitEventCustomNames(), hash_stats(), pgss_shmem_shutdown(), ResetUnloggedRelationsInDbspaceDir(), SerializePendingSyncs(), transformGraph(), and XLogHaveInvalidPages().

◆ hash_get_shared_size()

Size hash_get_shared_size ( HASHCTL info,
int  flags 
)

Definition at line 855 of file dynahash.c.

856{
857 Assert(flags & HASH_DIRSIZE);
858 Assert(info->dsize == info->max_dsize);
859 return sizeof(HASHHDR) + info->dsize * sizeof(HASHSEGMENT);
860}
struct HASHHDR HASHHDR
Definition: hsearch.h:58

References Assert(), HASHCTL::dsize, HASH_DIRSIZE, and HASHCTL::max_dsize.

Referenced by ShmemInitHash().

◆ hash_initial_lookup()

static uint32 hash_initial_lookup ( HTAB hashp,
uint32  hashvalue,
HASHBUCKET **  bucketptr 
)
inlinestatic

Definition at line 1757 of file dynahash.c.

1758{
1759 HASHHDR *hctl = hashp->hctl;
1760 HASHSEGMENT segp;
1761 long segment_num;
1762 long segment_ndx;
1763 uint32 bucket;
1764
1765 bucket = calc_bucket(hctl, hashvalue);
1766
1767 segment_num = bucket >> hashp->sshift;
1768 segment_ndx = MOD(bucket, hashp->ssize);
1769
1770 segp = hashp->dir[segment_num];
1771
1772 if (segp == NULL)
1773 hash_corrupted(hashp);
1774
1775 *bucketptr = &segp[segment_ndx];
1776 return bucket;
1777}
static pg_noreturn void hash_corrupted(HTAB *hashp)
Definition: dynahash.c:1781

References calc_bucket(), HTAB::dir, hash_corrupted(), HTAB::hctl, MOD, HTAB::sshift, and HTAB::ssize.

Referenced by hash_search_with_hash_value(), hash_seq_init_with_hash_value(), and hash_update_hash_key().

◆ hash_search()

void * hash_search ( HTAB hashp,
const void *  keyPtr,
HASHACTION  action,
bool *  foundPtr 
)

Definition at line 956 of file dynahash.c.

960{
961 return hash_search_with_hash_value(hashp,
962 keyPtr,
963 hashp->hash(keyPtr, hashp->keysize),
964 action,
965 foundPtr);
966}
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:969

References generate_unaccent_rules::action, HTAB::hash, hash_search_with_hash_value(), and HTAB::keysize.

Referenced by _hash_finish_split(), _hash_splitbucket(), add_guc_variable(), add_join_rel(), add_to_names_hash(), AddEnumLabel(), AddEventToPendingNotifies(), AddPendingSync(), ApplyLogicalMappingFile(), assign_record_type_typmod(), AsyncExistsPendingNotify(), AtEOSubXact_RelationCache(), AtEOXact_RelationCache(), build_guc_variables(), build_join_rel_hash(), BuildEventTriggerCache(), cfunc_hashtable_delete(), cfunc_hashtable_insert(), cfunc_hashtable_lookup(), CheckAndPromotePredicateLockRequest(), CheckForSerializableConflictOut(), CheckForSessionAndXactLocks(), colname_is_unique(), CompactCheckpointerRequestQueue(), compile_plperl_function(), compile_pltcl_function(), compute_array_stats(), compute_tsvector_stats(), createNewConnection(), define_custom_variable(), delete_rel_type_cache_if_needed(), deleteConnection(), do_autovacuum(), DropAllPredicateLocksFromTable(), DropAllPreparedStatements(), DropPreparedStatement(), entry_alloc(), entry_dealloc(), entry_reset(), EnumTypeUncommitted(), EnumUncommitted(), EnumValuesCreate(), EventCacheLookup(), ExecInitModifyTable(), ExecLookupResultRelByOid(), ExecuteTruncateGuts(), ExtendBufferedRelLocal(), FetchPreparedStatement(), finalize_in_progress_typentries(), find_all_inheritors(), find_join_rel(), find_oper_cache_entry(), find_option(), find_rendezvous_variable(), forget_invalid_pages(), forget_invalid_pages_db(), ForgetPrivateRefCountEntry(), get_attribute_options(), get_cast_hashentry(), get_rel_sync_entry(), get_tablespace(), GetComboCommandId(), GetConnection(), getConnectionByName(), GetExtensibleNodeEntry(), getmissingattr(), GetPrivateRefCountEntry(), getState(), GetWaitEventCustomIdentifier(), gistGetNodeBuffer(), gistGetParent(), gistMemorizeParent(), gistRelocateBuildBuffersOnSplit(), hash_object_field_end(), init_sequence(), insert_rel_type_cache_if_needed(), InvalidateAttoptCacheCallback(), InvalidateLocalBuffer(), InvalidateOprCacheCallBack(), InvalidateShippableCacheCallback(), InvalidateTableSpaceCacheCallback(), is_shippable(), JsObjectGetField(), json_unique_check_key(), LocalBufferAlloc(), LockAcquireExtended(), LockHasWaiters(), LockHeldByMe(), LockRelease(), log_invalid_page(), logical_rewrite_log_mapping(), logicalrep_partition_open(), logicalrep_rel_open(), logicalrep_relmap_update(), lookup_C_func(), lookup_proof_cache(), lookup_ts_config_cache(), lookup_ts_dictionary_cache(), lookup_ts_parser_cache(), lookup_type_cache(), LookupOpclassInfo(), make_oper_cache_entry(), MarkGUCPrefixReserved(), pa_allocate_worker(), pa_find_worker(), pa_free_worker(), PartitionDirectoryLookup(), pg_get_backend_memory_contexts(), pg_tzset(), pgss_store(), plperl_spi_exec_prepared(), plperl_spi_freeplan(), plperl_spi_prepare(), plperl_spi_query_prepared(), pltcl_fetch_interp(), PLy_commit(), PLy_generate_spi_exceptions(), PLy_procedure_get(), PLy_rollback(), PLy_spi_subtransaction_abort(), populate_recordset_object_field_end(), predicatelock_twophase_recover(), PredicateLockExists(), PredicateLockShmemInit(), PredicateLockTwoPhaseFinish(), PrefetchLocalBuffer(), process_syncing_tables_for_apply(), ProcessSyncRequests(), prune_element_hashtable(), prune_lexemes_hashtable(), PutMemoryContextsStatsTupleStore(), rebuild_database_list(), record_C_func(), RegisterExtensibleNodeEntry(), RegisterPredicateLockingXid(), rel_sync_cache_relation_cb(), RelationPreTruncate(), ReleaseOneSerializableXact(), RelFileLocatorSkippingWAL(), RelfilenumberMapInvalidateCallback(), RelidByRelfilenumber(), RememberSyncRequest(), RemoveLocalLock(), ReorderBufferBuildTupleCidHash(), ReorderBufferCleanupTXN(), ReorderBufferToastAppendChunk(), ReorderBufferToastReplace(), ReorderBufferTXNByXid(), ReservePrivateRefCountEntry(), ResetUnloggedRelationsInDbspaceDir(), ResolveCminCmaxDuringDecoding(), RestoreUncommittedEnums(), rewrite_heap_dead_tuple(), rewrite_heap_tuple(), ri_FetchPreparedPlan(), ri_HashCompareOp(), ri_HashPreparedPlan(), ri_LoadConstraintInfo(), select_perl_context(), SerializePendingSyncs(), set_rtable_names(), ShmemInitStruct(), smgrdestroy(), smgrDoPendingSyncs(), smgropen(), smgrreleaserellocator(), StandbyAcquireAccessExclusiveLock(), StandbyReleaseAllLocks(), StandbyReleaseLocks(), StandbyReleaseOldLocks(), StandbyReleaseXidEntryLocks(), StorePreparedStatement(), table_recheck_autovac(), TypeCacheRelCallback(), WaitEventCustomNew(), XLogPrefetcherAddFilter(), XLogPrefetcherCompleteFilters(), and XLogPrefetcherIsFiltered().

◆ hash_search_with_hash_value()

void * hash_search_with_hash_value ( HTAB hashp,
const void *  keyPtr,
uint32  hashvalue,
HASHACTION  action,
bool *  foundPtr 
)

Definition at line 969 of file dynahash.c.

974{
975 HASHHDR *hctl = hashp->hctl;
976 int freelist_idx = FREELIST_IDX(hctl, hashvalue);
977 Size keysize;
978 HASHBUCKET currBucket;
979 HASHBUCKET *prevBucketPtr;
980 HashCompareFunc match;
981
982#ifdef HASH_STATISTICS
983 hash_accesses++;
984 hctl->accesses++;
985#endif
986
987 /*
988 * If inserting, check if it is time to split a bucket.
989 *
990 * NOTE: failure to expand table is not a fatal error, it just means we
991 * have to run at higher fill factor than we wanted. However, if we're
992 * using the palloc allocator then it will throw error anyway on
993 * out-of-memory, so we must do this before modifying the table.
994 */
996 {
997 /*
998 * Can't split if running in partitioned mode, nor if frozen, nor if
999 * table is the subject of any active hash_seq_search scans.
1000 */
1001 if (hctl->freeList[0].nentries > (long) hctl->max_bucket &&
1002 !IS_PARTITIONED(hctl) && !hashp->frozen &&
1003 !has_seq_scans(hashp))
1004 (void) expand_table(hashp);
1005 }
1006
1007 /*
1008 * Do the initial lookup
1009 */
1010 (void) hash_initial_lookup(hashp, hashvalue, &prevBucketPtr);
1011 currBucket = *prevBucketPtr;
1012
1013 /*
1014 * Follow collision chain looking for matching key
1015 */
1016 match = hashp->match; /* save one fetch in inner loop */
1017 keysize = hashp->keysize; /* ditto */
1018
1019 while (currBucket != NULL)
1020 {
1021 if (currBucket->hashvalue == hashvalue &&
1022 match(ELEMENTKEY(currBucket), keyPtr, keysize) == 0)
1023 break;
1024 prevBucketPtr = &(currBucket->link);
1025 currBucket = *prevBucketPtr;
1026#ifdef HASH_STATISTICS
1027 hash_collisions++;
1028 hctl->collisions++;
1029#endif
1030 }
1031
1032 if (foundPtr)
1033 *foundPtr = (bool) (currBucket != NULL);
1034
1035 /*
1036 * OK, now what?
1037 */
1038 switch (action)
1039 {
1040 case HASH_FIND:
1041 if (currBucket != NULL)
1042 return ELEMENTKEY(currBucket);
1043 return NULL;
1044
1045 case HASH_REMOVE:
1046 if (currBucket != NULL)
1047 {
1048 /* if partitioned, must lock to touch nentries and freeList */
1049 if (IS_PARTITIONED(hctl))
1050 SpinLockAcquire(&(hctl->freeList[freelist_idx].mutex));
1051
1052 /* delete the record from the appropriate nentries counter. */
1053 Assert(hctl->freeList[freelist_idx].nentries > 0);
1054 hctl->freeList[freelist_idx].nentries--;
1055
1056 /* remove record from hash bucket's chain. */
1057 *prevBucketPtr = currBucket->link;
1058
1059 /* add the record to the appropriate freelist. */
1060 currBucket->link = hctl->freeList[freelist_idx].freeList;
1061 hctl->freeList[freelist_idx].freeList = currBucket;
1062
1063 if (IS_PARTITIONED(hctl))
1064 SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1065
1066 /*
1067 * better hope the caller is synchronizing access to this
1068 * element, because someone else is going to reuse it the next
1069 * time something is added to the table
1070 */
1071 return ELEMENTKEY(currBucket);
1072 }
1073 return NULL;
1074
1075 case HASH_ENTER:
1076 case HASH_ENTER_NULL:
1077 /* Return existing element if found, else create one */
1078 if (currBucket != NULL)
1079 return ELEMENTKEY(currBucket);
1080
1081 /* disallow inserts if frozen */
1082 if (hashp->frozen)
1083 elog(ERROR, "cannot insert into frozen hashtable \"%s\"",
1084 hashp->tabname);
1085
1086 currBucket = get_hash_entry(hashp, freelist_idx);
1087 if (currBucket == NULL)
1088 {
1089 /* out of memory */
1090 if (action == HASH_ENTER_NULL)
1091 return NULL;
1092 /* report a generic message */
1093 if (hashp->isshared)
1094 ereport(ERROR,
1095 (errcode(ERRCODE_OUT_OF_MEMORY),
1096 errmsg("out of shared memory")));
1097 else
1098 ereport(ERROR,
1099 (errcode(ERRCODE_OUT_OF_MEMORY),
1100 errmsg("out of memory")));
1101 }
1102
1103 /* link into hashbucket chain */
1104 *prevBucketPtr = currBucket;
1105 currBucket->link = NULL;
1106
1107 /* copy key into record */
1108 currBucket->hashvalue = hashvalue;
1109 hashp->keycopy(ELEMENTKEY(currBucket), keyPtr, keysize);
1110
1111 /*
1112 * Caller is expected to fill the data field on return. DO NOT
1113 * insert any code that could possibly throw error here, as doing
1114 * so would leave the table entry incomplete and hence corrupt the
1115 * caller's data structure.
1116 */
1117
1118 return ELEMENTKEY(currBucket);
1119 }
1120
1121 elog(ERROR, "unrecognized hash action code: %d", (int) action);
1122
1123 return NULL; /* keep compiler quiet */
1124}
static HASHBUCKET get_hash_entry(HTAB *hashp, int freelist_idx)
Definition: dynahash.c:1257
static bool expand_table(HTAB *hashp)
Definition: dynahash.c:1552
#define ELEMENTKEY(helem)
Definition: dynahash.c:244
#define FREELIST_IDX(hctl, hashcode)
Definition: dynahash.c:212
static uint32 hash_initial_lookup(HTAB *hashp, uint32 hashvalue, HASHBUCKET **bucketptr)
Definition: dynahash.c:1757
@ HASH_FIND
Definition: hsearch.h:113
@ HASH_REMOVE
Definition: hsearch.h:115
@ HASH_ENTER
Definition: hsearch.h:114
@ HASH_ENTER_NULL
Definition: hsearch.h:116

References generate_unaccent_rules::action, Assert(), ELEMENTKEY, elog, ereport, errcode(), errmsg(), ERROR, expand_table(), FreeListData::freeList, HASHHDR::freeList, FREELIST_IDX, HTAB::frozen, get_hash_entry(), has_seq_scans(), HASH_ENTER, HASH_ENTER_NULL, HASH_FIND, hash_initial_lookup(), HASH_REMOVE, HASHELEMENT::hashvalue, HTAB::hctl, IS_PARTITIONED, HTAB::isshared, HTAB::keycopy, HTAB::keysize, HASHELEMENT::link, HTAB::match, HASHHDR::max_bucket, FreeListData::mutex, FreeListData::nentries, SpinLockAcquire, SpinLockRelease, and HTAB::tabname.

Referenced by BufTableDelete(), BufTableInsert(), BufTableLookup(), CheckTargetForConflictsIn(), CleanUpLock(), ClearOldPredicateLocks(), CreatePredicateLock(), DecrementParentLocks(), DeleteChildTargetLocks(), DeleteLockTarget(), DropAllPredicateLocksFromTable(), FastPathGetRelationLockEntry(), GetLockConflicts(), hash_search(), lock_twophase_recover(), LockAcquireExtended(), LockRefindAndRelease(), LockRelease(), LockWaiterCount(), PageIsPredicateLocked(), PredicateLockAcquire(), ReleaseOneSerializableXact(), RemoveScratchTarget(), RemoveTargetIfNoLongerUsed(), RestoreScratchTarget(), SetupLockInTable(), and TransferPredicateLocksToNewTarget().

◆ hash_select_dirsize()

long hash_select_dirsize ( long  num_entries)

Definition at line 831 of file dynahash.c.

832{
833 long nBuckets,
834 nSegments,
835 nDirEntries;
836
837 /* estimate number of buckets wanted */
838 nBuckets = next_pow2_long(num_entries);
839 /* # of segments needed for nBuckets */
840 nSegments = next_pow2_long((nBuckets - 1) / DEF_SEGSIZE + 1);
841 /* directory entries */
842 nDirEntries = DEF_DIRSIZE;
843 while (nDirEntries < nSegments)
844 nDirEntries <<= 1; /* dir_alloc doubles dsize at each call */
845
846 return nDirEntries;
847}

References DEF_DIRSIZE, DEF_SEGSIZE, and next_pow2_long().

Referenced by ShmemInitHash().

◆ hash_seq_init()

void hash_seq_init ( HASH_SEQ_STATUS status,
HTAB hashp 
)

Definition at line 1386 of file dynahash.c.

1387{
1388 status->hashp = hashp;
1389 status->curBucket = 0;
1390 status->curEntry = NULL;
1391 status->hasHashvalue = false;
1392 if (!hashp->frozen)
1393 register_seq_scan(hashp);
1394}
static void register_seq_scan(HTAB *hashp)
Definition: dynahash.c:1866
HASHELEMENT * curEntry
Definition: hsearch.h:124
uint32 curBucket
Definition: hsearch.h:123
HTAB * hashp
Definition: hsearch.h:122
bool hasHashvalue
Definition: hsearch.h:125

References HASH_SEQ_STATUS::curBucket, HASH_SEQ_STATUS::curEntry, HTAB::frozen, HASH_SEQ_STATUS::hasHashvalue, HASH_SEQ_STATUS::hashp, and register_seq_scan().

Referenced by AtAbort_Portals(), AtCleanup_Portals(), AtEOSubXact_RelationCache(), AtEOXact_RelationCache(), AtPrepare_Locks(), AtSubAbort_Portals(), AtSubCleanup_Portals(), AtSubCommit_Portals(), BeginReportingGUCOptions(), CheckForBufferLeaks(), CheckForSessionAndXactLocks(), CheckTableForSerializableConflictIn(), cleanup_rel_sync_cache(), compute_array_stats(), compute_tsvector_stats(), dblink_get_connections(), DestroyPartitionDirectory(), disconnect_cached_connections(), DropAllPredicateLocksFromTable(), DropAllPreparedStatements(), end_heap_rewrite(), entry_dealloc(), entry_reset(), ExecuteTruncateGuts(), forget_invalid_pages(), forget_invalid_pages_db(), ForgetPortalSnapshots(), gc_qtexts(), get_guc_variables(), GetLockStatusData(), GetPredicateLockStatusData(), GetRunningTransactionLocks(), GetWaitEventCustomNames(), hash_seq_init_with_hash_value(), HoldPinnedPortals(), InitializeGUCOptions(), InvalidateAttoptCacheCallback(), InvalidateOprCacheCallBack(), InvalidateOprProofCacheCallBack(), InvalidateShippableCacheCallback(), InvalidateTableSpaceCacheCallback(), InvalidateTSCacheCallBack(), LockReassignCurrentOwner(), LockReleaseAll(), LockReleaseCurrentOwner(), LockReleaseSession(), logical_end_heap_rewrite(), logical_heap_rewrite_flush_mappings(), logicalrep_partmap_invalidate_cb(), logicalrep_partmap_reset_relmap(), logicalrep_relmap_invalidate_cb(), MarkGUCPrefixReserved(), packGraph(), pg_cursor(), pg_get_shmem_allocations(), pg_get_shmem_allocations_numa(), pg_prepared_statement(), pg_stat_statements_internal(), pgfdw_inval_callback(), pgfdw_subxact_callback(), pgfdw_xact_callback(), pgss_shmem_shutdown(), plperl_fini(), PortalErrorCleanup(), PortalHashTableDeleteAll(), postgres_fdw_get_connections_internal(), PostPrepare_Locks(), PreCommit_Portals(), ProcessConfigFileInternal(), ProcessSyncRequests(), prune_element_hashtable(), prune_lexemes_hashtable(), rebuild_database_list(), rel_sync_cache_publication_cb(), rel_sync_cache_relation_cb(), RelationCacheInitializePhase3(), RelationCacheInvalidate(), RelfilenumberMapInvalidateCallback(), RememberSyncRequest(), ReorderBufferToastReset(), selectColorTrigrams(), SerializePendingSyncs(), SerializeUncommittedEnums(), smgrDoPendingSyncs(), smgrreleaseall(), StandbyReleaseAllLocks(), StandbyReleaseOldLocks(), ThereAreNoReadyPortals(), TypeCacheOpcCallback(), TypeCacheRelCallback(), TypeCacheTypCallback(), write_relcache_init_file(), and XLogCheckInvalidPages().

◆ hash_seq_init_with_hash_value()

void hash_seq_init_with_hash_value ( HASH_SEQ_STATUS status,
HTAB hashp,
uint32  hashvalue 
)

Definition at line 1406 of file dynahash.c.

1408{
1409 HASHBUCKET *bucketPtr;
1410
1411 hash_seq_init(status, hashp);
1412
1413 status->hasHashvalue = true;
1414 status->hashvalue = hashvalue;
1415
1416 status->curBucket = hash_initial_lookup(hashp, hashvalue, &bucketPtr);
1417 status->curEntry = *bucketPtr;
1418}
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1386
uint32 hashvalue
Definition: hsearch.h:126

References HASH_SEQ_STATUS::curBucket, HASH_SEQ_STATUS::curEntry, hash_initial_lookup(), hash_seq_init(), HASH_SEQ_STATUS::hasHashvalue, and HASH_SEQ_STATUS::hashvalue.

Referenced by InvalidateAttoptCacheCallback(), and TypeCacheTypCallback().

◆ hash_seq_search()

void * hash_seq_search ( HASH_SEQ_STATUS status)

Definition at line 1421 of file dynahash.c.

1422{
1423 HTAB *hashp;
1424 HASHHDR *hctl;
1425 uint32 max_bucket;
1426 long ssize;
1427 long segment_num;
1428 long segment_ndx;
1429 HASHSEGMENT segp;
1430 uint32 curBucket;
1431 HASHELEMENT *curElem;
1432
1433 if (status->hasHashvalue)
1434 {
1435 /*
1436 * Scan entries only in the current bucket because only this bucket
1437 * can contain entries with the given hash value.
1438 */
1439 while ((curElem = status->curEntry) != NULL)
1440 {
1441 status->curEntry = curElem->link;
1442 if (status->hashvalue != curElem->hashvalue)
1443 continue;
1444 return (void *) ELEMENTKEY(curElem);
1445 }
1446
1447 hash_seq_term(status);
1448 return NULL;
1449 }
1450
1451 if ((curElem = status->curEntry) != NULL)
1452 {
1453 /* Continuing scan of curBucket... */
1454 status->curEntry = curElem->link;
1455 if (status->curEntry == NULL) /* end of this bucket */
1456 ++status->curBucket;
1457 return ELEMENTKEY(curElem);
1458 }
1459
1460 /*
1461 * Search for next nonempty bucket starting at curBucket.
1462 */
1463 curBucket = status->curBucket;
1464 hashp = status->hashp;
1465 hctl = hashp->hctl;
1466 ssize = hashp->ssize;
1467 max_bucket = hctl->max_bucket;
1468
1469 if (curBucket > max_bucket)
1470 {
1471 hash_seq_term(status);
1472 return NULL; /* search is done */
1473 }
1474
1475 /*
1476 * first find the right segment in the table directory.
1477 */
1478 segment_num = curBucket >> hashp->sshift;
1479 segment_ndx = MOD(curBucket, ssize);
1480
1481 segp = hashp->dir[segment_num];
1482
1483 /*
1484 * Pick up the first item in this bucket's chain. If chain is not empty
1485 * we can begin searching it. Otherwise we have to advance to find the
1486 * next nonempty bucket. We try to optimize that case since searching a
1487 * near-empty hashtable has to iterate this loop a lot.
1488 */
1489 while ((curElem = segp[segment_ndx]) == NULL)
1490 {
1491 /* empty bucket, advance to next */
1492 if (++curBucket > max_bucket)
1493 {
1494 status->curBucket = curBucket;
1495 hash_seq_term(status);
1496 return NULL; /* search is done */
1497 }
1498 if (++segment_ndx >= ssize)
1499 {
1500 segment_num++;
1501 segment_ndx = 0;
1502 segp = hashp->dir[segment_num];
1503 }
1504 }
1505
1506 /* Begin scan of curBucket... */
1507 status->curEntry = curElem->link;
1508 if (status->curEntry == NULL) /* end of this bucket */
1509 ++curBucket;
1510 status->curBucket = curBucket;
1511 return ELEMENTKEY(curElem);
1512}
void hash_seq_term(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1515

References HASH_SEQ_STATUS::curBucket, HASH_SEQ_STATUS::curEntry, HTAB::dir, ELEMENTKEY, hash_seq_term(), HASH_SEQ_STATUS::hasHashvalue, HASH_SEQ_STATUS::hashp, HASHELEMENT::hashvalue, HASH_SEQ_STATUS::hashvalue, HTAB::hctl, HASHELEMENT::link, HASHHDR::max_bucket, MOD, HTAB::sshift, and HTAB::ssize.

Referenced by AtAbort_Portals(), AtCleanup_Portals(), AtEOSubXact_RelationCache(), AtEOXact_RelationCache(), AtPrepare_Locks(), AtSubAbort_Portals(), AtSubCleanup_Portals(), AtSubCommit_Portals(), BeginReportingGUCOptions(), CheckForBufferLeaks(), CheckForSessionAndXactLocks(), CheckTableForSerializableConflictIn(), cleanup_rel_sync_cache(), compute_array_stats(), compute_tsvector_stats(), dblink_get_connections(), DestroyPartitionDirectory(), disconnect_cached_connections(), DropAllPredicateLocksFromTable(), DropAllPreparedStatements(), end_heap_rewrite(), entry_dealloc(), entry_reset(), ExecuteTruncateGuts(), forget_invalid_pages(), forget_invalid_pages_db(), ForgetPortalSnapshots(), gc_qtexts(), get_guc_variables(), GetLockStatusData(), GetPredicateLockStatusData(), GetRunningTransactionLocks(), GetWaitEventCustomNames(), HoldPinnedPortals(), InitializeGUCOptions(), InvalidateAttoptCacheCallback(), InvalidateOprCacheCallBack(), InvalidateOprProofCacheCallBack(), InvalidateShippableCacheCallback(), InvalidateTableSpaceCacheCallback(), InvalidateTSCacheCallBack(), LockReassignCurrentOwner(), LockReleaseAll(), LockReleaseCurrentOwner(), LockReleaseSession(), logical_end_heap_rewrite(), logical_heap_rewrite_flush_mappings(), logicalrep_partmap_invalidate_cb(), logicalrep_partmap_reset_relmap(), logicalrep_relmap_invalidate_cb(), MarkGUCPrefixReserved(), packGraph(), pg_cursor(), pg_get_shmem_allocations(), pg_get_shmem_allocations_numa(), pg_prepared_statement(), pg_stat_statements_internal(), pgfdw_inval_callback(), pgfdw_subxact_callback(), pgfdw_xact_callback(), pgss_shmem_shutdown(), plperl_fini(), PortalErrorCleanup(), PortalHashTableDeleteAll(), postgres_fdw_get_connections_internal(), PostPrepare_Locks(), PreCommit_Portals(), ProcessConfigFileInternal(), ProcessSyncRequests(), prune_element_hashtable(), prune_lexemes_hashtable(), rebuild_database_list(), rel_sync_cache_publication_cb(), rel_sync_cache_relation_cb(), RelationCacheInitializePhase3(), RelationCacheInvalidate(), RelfilenumberMapInvalidateCallback(), RememberSyncRequest(), ReorderBufferToastReset(), selectColorTrigrams(), SerializePendingSyncs(), SerializeUncommittedEnums(), smgrDoPendingSyncs(), smgrreleaseall(), StandbyReleaseAllLocks(), StandbyReleaseOldLocks(), ThereAreNoReadyPortals(), TypeCacheOpcCallback(), TypeCacheRelCallback(), TypeCacheTypCallback(), write_relcache_init_file(), and XLogCheckInvalidPages().

◆ hash_seq_term()

◆ hash_stats()

void hash_stats ( const char *  where,
HTAB hashp 
)

Definition at line 885 of file dynahash.c.

886{
887#ifdef HASH_STATISTICS
888 fprintf(stderr, "%s: this HTAB -- accesses %ld collisions %ld\n",
889 where, hashp->hctl->accesses, hashp->hctl->collisions);
890
891 fprintf(stderr, "hash_stats: entries %ld keysize %ld maxp %u segmentcount %ld\n",
892 hash_get_num_entries(hashp), (long) hashp->hctl->keysize,
893 hashp->hctl->max_bucket, hashp->hctl->nsegs);
894 fprintf(stderr, "%s: total accesses %ld total collisions %ld\n",
895 where, hash_accesses, hash_collisions);
896 fprintf(stderr, "hash_stats: total expansions %ld\n",
897 hash_expansions);
898#endif
899}
#define fprintf(file, fmt, msg)
Definition: cubescan.l:21
long hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1342

References fprintf, hash_get_num_entries(), HTAB::hctl, HASHHDR::keysize, HASHHDR::max_bucket, and HASHHDR::nsegs.

Referenced by hash_destroy().

◆ hash_update_hash_key()

bool hash_update_hash_key ( HTAB hashp,
void *  existingEntry,
const void *  newKeyPtr 
)

Definition at line 1146 of file dynahash.c.

1149{
1150 HASHELEMENT *existingElement = ELEMENT_FROM_KEY(existingEntry);
1151 uint32 newhashvalue;
1152 Size keysize;
1153 uint32 bucket;
1154 uint32 newbucket;
1155 HASHBUCKET currBucket;
1156 HASHBUCKET *prevBucketPtr;
1157 HASHBUCKET *oldPrevPtr;
1158 HashCompareFunc match;
1159
1160#ifdef HASH_STATISTICS
1161 hash_accesses++;
1162 hctl->accesses++;
1163#endif
1164
1165 /* disallow updates if frozen */
1166 if (hashp->frozen)
1167 elog(ERROR, "cannot update in frozen hashtable \"%s\"",
1168 hashp->tabname);
1169
1170 /*
1171 * Lookup the existing element using its saved hash value. We need to do
1172 * this to be able to unlink it from its hash chain, but as a side benefit
1173 * we can verify the validity of the passed existingEntry pointer.
1174 */
1175 bucket = hash_initial_lookup(hashp, existingElement->hashvalue,
1176 &prevBucketPtr);
1177 currBucket = *prevBucketPtr;
1178
1179 while (currBucket != NULL)
1180 {
1181 if (currBucket == existingElement)
1182 break;
1183 prevBucketPtr = &(currBucket->link);
1184 currBucket = *prevBucketPtr;
1185 }
1186
1187 if (currBucket == NULL)
1188 elog(ERROR, "hash_update_hash_key argument is not in hashtable \"%s\"",
1189 hashp->tabname);
1190
1191 oldPrevPtr = prevBucketPtr;
1192
1193 /*
1194 * Now perform the equivalent of a HASH_ENTER operation to locate the hash
1195 * chain we want to put the entry into.
1196 */
1197 newhashvalue = hashp->hash(newKeyPtr, hashp->keysize);
1198 newbucket = hash_initial_lookup(hashp, newhashvalue, &prevBucketPtr);
1199 currBucket = *prevBucketPtr;
1200
1201 /*
1202 * Follow collision chain looking for matching key
1203 */
1204 match = hashp->match; /* save one fetch in inner loop */
1205 keysize = hashp->keysize; /* ditto */
1206
1207 while (currBucket != NULL)
1208 {
1209 if (currBucket->hashvalue == newhashvalue &&
1210 match(ELEMENTKEY(currBucket), newKeyPtr, keysize) == 0)
1211 break;
1212 prevBucketPtr = &(currBucket->link);
1213 currBucket = *prevBucketPtr;
1214#ifdef HASH_STATISTICS
1215 hash_collisions++;
1216 hctl->collisions++;
1217#endif
1218 }
1219
1220 if (currBucket != NULL)
1221 return false; /* collision with an existing entry */
1222
1223 currBucket = existingElement;
1224
1225 /*
1226 * If old and new hash values belong to the same bucket, we need not
1227 * change any chain links, and indeed should not since this simplistic
1228 * update will corrupt the list if currBucket is the last element. (We
1229 * cannot fall out earlier, however, since we need to scan the bucket to
1230 * check for duplicate keys.)
1231 */
1232 if (bucket != newbucket)
1233 {
1234 /* OK to remove record from old hash bucket's chain. */
1235 *oldPrevPtr = currBucket->link;
1236
1237 /* link into new hashbucket chain */
1238 *prevBucketPtr = currBucket;
1239 currBucket->link = NULL;
1240 }
1241
1242 /* copy new key into record */
1243 currBucket->hashvalue = newhashvalue;
1244 hashp->keycopy(ELEMENTKEY(currBucket), newKeyPtr, keysize);
1245
1246 /* rest of record is untouched */
1247
1248 return true;
1249}
#define ELEMENT_FROM_KEY(key)
Definition: dynahash.c:249

References ELEMENT_FROM_KEY, ELEMENTKEY, elog, ERROR, HTAB::frozen, HTAB::hash, hash_initial_lookup(), HASHELEMENT::hashvalue, HTAB::keycopy, HTAB::keysize, HASHELEMENT::link, HTAB::match, and HTAB::tabname.

Referenced by PostPrepare_Locks().

◆ hdefault()

static void hdefault ( HTAB hashp)
static

Definition at line 630 of file dynahash.c.

631{
632 HASHHDR *hctl = hashp->hctl;
633
634 MemSet(hctl, 0, sizeof(HASHHDR));
635
636 hctl->dsize = DEF_DIRSIZE;
637 hctl->nsegs = 0;
638
639 hctl->num_partitions = 0; /* not partitioned */
640
641 /* table has no fixed maximum size */
642 hctl->max_dsize = NO_MAX_DSIZE;
643
644 hctl->ssize = DEF_SEGSIZE;
646
647#ifdef HASH_STATISTICS
648 hctl->accesses = hctl->collisions = 0;
649#endif
650}
#define DEF_SEGSIZE_SHIFT
Definition: dynahash.c:124

References DEF_DIRSIZE, DEF_SEGSIZE, DEF_SEGSIZE_SHIFT, HASHHDR::dsize, HTAB::hctl, HASHHDR::max_dsize, MemSet, NO_MAX_DSIZE, HASHHDR::nsegs, HASHHDR::num_partitions, HASHHDR::sshift, and HASHHDR::ssize.

Referenced by hash_create().

◆ init_htab()

static bool init_htab ( HTAB hashp,
long  nelem 
)
static

Definition at line 690 of file dynahash.c.

691{
692 HASHHDR *hctl = hashp->hctl;
693 HASHSEGMENT *segp;
694 int nbuckets;
695 int nsegs;
696 int i;
697
698 /*
699 * initialize mutexes if it's a partitioned table
700 */
701 if (IS_PARTITIONED(hctl))
702 for (i = 0; i < NUM_FREELISTS; i++)
703 SpinLockInit(&(hctl->freeList[i].mutex));
704
705 /*
706 * Allocate space for the next greater power of two number of buckets,
707 * assuming a desired maximum load factor of 1.
708 */
709 nbuckets = next_pow2_int(nelem);
710
711 /*
712 * In a partitioned table, nbuckets must be at least equal to
713 * num_partitions; were it less, keys with apparently different partition
714 * numbers would map to the same bucket, breaking partition independence.
715 * (Normally nbuckets will be much bigger; this is just a safety check.)
716 */
717 while (nbuckets < hctl->num_partitions)
718 nbuckets <<= 1;
719
720 hctl->max_bucket = hctl->low_mask = nbuckets - 1;
721 hctl->high_mask = (nbuckets << 1) - 1;
722
723 /*
724 * Figure number of directory segments needed, round up to a power of 2
725 */
726 nsegs = (nbuckets - 1) / hctl->ssize + 1;
727 nsegs = next_pow2_int(nsegs);
728
729 /*
730 * Make sure directory is big enough. If pre-allocated directory is too
731 * small, choke (caller screwed up).
732 */
733 if (nsegs > hctl->dsize)
734 {
735 if (!(hashp->dir))
736 hctl->dsize = nsegs;
737 else
738 return false;
739 }
740
741 /* Allocate a directory */
742 if (!(hashp->dir))
743 {
744 CurrentDynaHashCxt = hashp->hcxt;
745 hashp->dir = (HASHSEGMENT *)
746 hashp->alloc(hctl->dsize * sizeof(HASHSEGMENT));
747 if (!hashp->dir)
748 return false;
749 }
750
751 /* Allocate initial segments */
752 for (segp = hashp->dir; hctl->nsegs < nsegs; hctl->nsegs++, segp++)
753 {
754 *segp = seg_alloc(hashp);
755 if (*segp == NULL)
756 return false;
757 }
758
759 /* Choose number of entries to allocate at a time */
761
762#ifdef HASH_DEBUG
763 fprintf(stderr, "init_htab:\n%s%p\n%s%ld\n%s%ld\n%s%d\n%s%ld\n%s%u\n%s%x\n%s%x\n%s%ld\n",
764 "TABLE POINTER ", hashp,
765 "DIRECTORY SIZE ", hctl->dsize,
766 "SEGMENT SIZE ", hctl->ssize,
767 "SEGMENT SHIFT ", hctl->sshift,
768 "MAX BUCKET ", hctl->max_bucket,
769 "HIGH MASK ", hctl->high_mask,
770 "LOW MASK ", hctl->low_mask,
771 "NSEGS ", hctl->nsegs);
772#endif
773 return true;
774}
#define SpinLockInit(lock)
Definition: spin.h:57

References HTAB::alloc, choose_nelem_alloc(), CurrentDynaHashCxt, HTAB::dir, HASHHDR::dsize, HASHHDR::entrysize, fprintf, HASHHDR::freeList, HTAB::hctl, HTAB::hcxt, HASHHDR::high_mask, i, IS_PARTITIONED, HASHHDR::low_mask, HASHHDR::max_bucket, FreeListData::mutex, HASHHDR::nelem_alloc, next_pow2_int(), HASHHDR::nsegs, NUM_FREELISTS, seg_alloc(), SpinLockInit, HASHHDR::sshift, and HASHHDR::ssize.

Referenced by hash_create().

◆ my_log2()

int my_log2 ( long  num)

Definition at line 1795 of file dynahash.c.

1796{
1797 /*
1798 * guard against too-large input, which would be invalid for
1799 * pg_ceil_log2_*()
1800 */
1801 if (num > LONG_MAX / 2)
1802 num = LONG_MAX / 2;
1803
1804#if SIZEOF_LONG < 8
1805 return pg_ceil_log2_32(num);
1806#else
1807 return pg_ceil_log2_64(num);
1808#endif
1809}
static uint64 pg_ceil_log2_64(uint64 num)
Definition: pg_bitutils.h:271
static uint32 pg_ceil_log2_32(uint32 num)
Definition: pg_bitutils.h:258

References pg_ceil_log2_32(), and pg_ceil_log2_64().

Referenced by ExecHashTableCreate(), ExecParallelHashTableSetCurrentBatch(), hash_choose_num_partitions(), hash_create(), MultiExecParallelHash(), next_pow2_int(), next_pow2_long(), and subxact_info_read().

◆ next_pow2_int()

static int next_pow2_int ( long  num)
static

Definition at line 1821 of file dynahash.c.

1822{
1823 if (num > INT_MAX / 2)
1824 num = INT_MAX / 2;
1825 return 1 << my_log2(num);
1826}

References my_log2().

Referenced by hash_create(), and init_htab().

◆ next_pow2_long()

static long next_pow2_long ( long  num)
static

Definition at line 1813 of file dynahash.c.

1814{
1815 /* my_log2's internal range check is sufficient */
1816 return 1L << my_log2(num);
1817}

References my_log2().

Referenced by hash_estimate_size(), and hash_select_dirsize().

◆ register_seq_scan()

static void register_seq_scan ( HTAB hashp)
static

Definition at line 1866 of file dynahash.c.

1867{
1869 elog(ERROR, "too many active hash_seq_search scans, cannot start one on \"%s\"",
1870 hashp->tabname);
1873 num_seq_scans++;
1874}
#define MAX_SEQ_SCANS
Definition: dynahash.c:1857
int GetCurrentTransactionNestLevel(void)
Definition: xact.c:929

References elog, ERROR, GetCurrentTransactionNestLevel(), MAX_SEQ_SCANS, num_seq_scans, seq_scan_level, seq_scan_tables, and HTAB::tabname.

Referenced by hash_seq_init().

◆ seg_alloc()

static HASHSEGMENT seg_alloc ( HTAB hashp)
static

Definition at line 1688 of file dynahash.c.

1689{
1690 HASHSEGMENT segp;
1691
1692 CurrentDynaHashCxt = hashp->hcxt;
1693 segp = (HASHSEGMENT) hashp->alloc(sizeof(HASHBUCKET) * hashp->ssize);
1694
1695 if (!segp)
1696 return NULL;
1697
1698 MemSet(segp, 0, sizeof(HASHBUCKET) * hashp->ssize);
1699
1700 return segp;
1701}

References HTAB::alloc, CurrentDynaHashCxt, HTAB::hcxt, MemSet, and HTAB::ssize.

Referenced by expand_table(), and init_htab().

◆ string_compare()

static int string_compare ( const char *  key1,
const char *  key2,
Size  keysize 
)
static

Definition at line 307 of file dynahash.c.

308{
309 return strncmp(key1, key2, keysize - 1);
310}

Referenced by hash_create().

Variable Documentation

◆ CurrentDynaHashCxt

MemoryContext CurrentDynaHashCxt = NULL
static

Definition at line 288 of file dynahash.c.

Referenced by dir_realloc(), DynaHashAlloc(), element_alloc(), hash_create(), init_htab(), and seg_alloc().

◆ num_seq_scans

int num_seq_scans = 0
static

◆ seq_scan_level

int seq_scan_level[MAX_SEQ_SCANS]
static

Definition at line 1860 of file dynahash.c.

Referenced by AtEOSubXact_HashTables(), deregister_seq_scan(), and register_seq_scan().

◆ seq_scan_tables