PostgreSQL Source Code  git master
nodeMemoize.c File Reference
#include "postgres.h"
#include "common/hashfn.h"
#include "executor/executor.h"
#include "executor/nodeMemoize.h"
#include "lib/ilist.h"
#include "miscadmin.h"
#include "utils/datum.h"
#include "utils/lsyscache.h"
#include "lib/simplehash.h"
Include dependency graph for nodeMemoize.c:

Go to the source code of this file.

Data Structures

struct  MemoizeTuple
 
struct  MemoizeKey
 
struct  MemoizeEntry
 

Macros

#define MEMO_CACHE_LOOKUP   1 /* Attempt to perform a cache lookup */
 
#define MEMO_CACHE_FETCH_NEXT_TUPLE   2 /* Get another tuple from the cache */
 
#define MEMO_FILLING_CACHE   3 /* Read outer node to fill cache */
 
#define MEMO_CACHE_BYPASS_MODE
 
#define MEMO_END_OF_SCAN   5 /* Ready for rescan */
 
#define EMPTY_ENTRY_MEMORY_BYTES(e)
 
#define CACHE_TUPLE_BYTES(t)
 
#define SH_PREFIX   memoize
 
#define SH_ELEMENT_TYPE   MemoizeEntry
 
#define SH_KEY_TYPE   MemoizeKey *
 
#define SH_SCOPE   static inline
 
#define SH_DECLARE
 
#define SH_PREFIX   memoize
 
#define SH_ELEMENT_TYPE   MemoizeEntry
 
#define SH_KEY_TYPE   MemoizeKey *
 
#define SH_KEY   key
 
#define SH_HASH_KEY(tb, key)   MemoizeHash_hash(tb, key)
 
#define SH_EQUAL(tb, a, b)   MemoizeHash_equal(tb, a, b)
 
#define SH_SCOPE   static inline
 
#define SH_STORE_HASH
 
#define SH_GET_HASH(tb, a)   a->hash
 
#define SH_DEFINE
 

Typedefs

typedef struct MemoizeTuple MemoizeTuple
 
typedef struct MemoizeKey MemoizeKey
 
typedef struct MemoizeEntry MemoizeEntry
 

Functions

static uint32 MemoizeHash_hash (struct memoize_hash *tb, const MemoizeKey *key)
 
static bool MemoizeHash_equal (struct memoize_hash *tb, const MemoizeKey *params1, const MemoizeKey *params2)
 
static void build_hash_table (MemoizeState *mstate, uint32 size)
 
static void prepare_probe_slot (MemoizeState *mstate, MemoizeKey *key)
 
static void entry_purge_tuples (MemoizeState *mstate, MemoizeEntry *entry)
 
static void remove_cache_entry (MemoizeState *mstate, MemoizeEntry *entry)
 
static void cache_purge_all (MemoizeState *mstate)
 
static bool cache_reduce_memory (MemoizeState *mstate, MemoizeKey *specialkey)
 
static MemoizeEntrycache_lookup (MemoizeState *mstate, bool *found)
 
static bool cache_store_tuple (MemoizeState *mstate, TupleTableSlot *slot)
 
static TupleTableSlotExecMemoize (PlanState *pstate)
 
MemoizeStateExecInitMemoize (Memoize *node, EState *estate, int eflags)
 
void ExecEndMemoize (MemoizeState *node)
 
void ExecReScanMemoize (MemoizeState *node)
 
double ExecEstimateCacheEntryOverheadBytes (double ntuples)
 
void ExecMemoizeEstimate (MemoizeState *node, ParallelContext *pcxt)
 
void ExecMemoizeInitializeDSM (MemoizeState *node, ParallelContext *pcxt)
 
void ExecMemoizeInitializeWorker (MemoizeState *node, ParallelWorkerContext *pwcxt)
 
void ExecMemoizeRetrieveInstrumentation (MemoizeState *node)
 

Macro Definition Documentation

◆ CACHE_TUPLE_BYTES

#define CACHE_TUPLE_BYTES (   t)
Value:
(sizeof(MemoizeTuple) + \
(t)->mintuple->t_len)
struct MemoizeTuple MemoizeTuple

Definition at line 89 of file nodeMemoize.c.

◆ EMPTY_ENTRY_MEMORY_BYTES

#define EMPTY_ENTRY_MEMORY_BYTES (   e)
Value:
(sizeof(MemoizeEntry) + \
sizeof(MemoizeKey) + \
(e)->key->params->t_len);
struct MemoizeEntry MemoizeEntry
e
Definition: preproc-init.c:82

Definition at line 86 of file nodeMemoize.c.

◆ MEMO_CACHE_BYPASS_MODE

#define MEMO_CACHE_BYPASS_MODE
Value:
4 /* Bypass mode. Just read from our
* subplan without caching anything */

Definition at line 81 of file nodeMemoize.c.

◆ MEMO_CACHE_FETCH_NEXT_TUPLE

#define MEMO_CACHE_FETCH_NEXT_TUPLE   2 /* Get another tuple from the cache */

Definition at line 79 of file nodeMemoize.c.

◆ MEMO_CACHE_LOOKUP

#define MEMO_CACHE_LOOKUP   1 /* Attempt to perform a cache lookup */

Definition at line 78 of file nodeMemoize.c.

◆ MEMO_END_OF_SCAN

#define MEMO_END_OF_SCAN   5 /* Ready for rescan */

Definition at line 82 of file nodeMemoize.c.

◆ MEMO_FILLING_CACHE

#define MEMO_FILLING_CACHE   3 /* Read outer node to fill cache */

Definition at line 80 of file nodeMemoize.c.

◆ SH_DECLARE

#define SH_DECLARE

Definition at line 129 of file nodeMemoize.c.

◆ SH_DEFINE

#define SH_DEFINE

Definition at line 147 of file nodeMemoize.c.

◆ SH_ELEMENT_TYPE [1/2]

#define SH_ELEMENT_TYPE   MemoizeEntry

Definition at line 139 of file nodeMemoize.c.

◆ SH_ELEMENT_TYPE [2/2]

#define SH_ELEMENT_TYPE   MemoizeEntry

Definition at line 139 of file nodeMemoize.c.

◆ SH_EQUAL

#define SH_EQUAL (   tb,
  a,
  b 
)    MemoizeHash_equal(tb, a, b)

Definition at line 143 of file nodeMemoize.c.

◆ SH_GET_HASH

#define SH_GET_HASH (   tb,
  a 
)    a->hash

Definition at line 146 of file nodeMemoize.c.

◆ SH_HASH_KEY

#define SH_HASH_KEY (   tb,
  key 
)    MemoizeHash_hash(tb, key)

Definition at line 142 of file nodeMemoize.c.

◆ SH_KEY

#define SH_KEY   key

Definition at line 141 of file nodeMemoize.c.

◆ SH_KEY_TYPE [1/2]

#define SH_KEY_TYPE   MemoizeKey *

Definition at line 140 of file nodeMemoize.c.

◆ SH_KEY_TYPE [2/2]

#define SH_KEY_TYPE   MemoizeKey *

Definition at line 140 of file nodeMemoize.c.

◆ SH_PREFIX [1/2]

#define SH_PREFIX   memoize

Definition at line 138 of file nodeMemoize.c.

◆ SH_PREFIX [2/2]

#define SH_PREFIX   memoize

Definition at line 138 of file nodeMemoize.c.

◆ SH_SCOPE [1/2]

#define SH_SCOPE   static inline

Definition at line 144 of file nodeMemoize.c.

◆ SH_SCOPE [2/2]

#define SH_SCOPE   static inline

Definition at line 144 of file nodeMemoize.c.

◆ SH_STORE_HASH

#define SH_STORE_HASH

Definition at line 145 of file nodeMemoize.c.

Typedef Documentation

◆ MemoizeEntry

typedef struct MemoizeEntry MemoizeEntry

◆ MemoizeKey

typedef struct MemoizeKey MemoizeKey

◆ MemoizeTuple

typedef struct MemoizeTuple MemoizeTuple

Function Documentation

◆ build_hash_table()

static void build_hash_table ( MemoizeState mstate,
uint32  size 
)
static

Definition at line 264 of file nodeMemoize.c.

266 {
267  /* Make a guess at a good size when we're not given a valid size. */
268  if (size == 0)
269  size = 1024;
270 
271  /* memoize_create will convert the size to a power of 2 */
272  mstate->hashtable = memoize_create(mstate->tableContext, size, mstate);
MemoryContext tableContext
Definition: execnodes.h:2106
struct memoize_hash * hashtable
Definition: execnodes.h:2095

References MemoizeState::hashtable, and MemoizeState::tableContext.

Referenced by cache_purge_all(), and ExecInitMemoize().

◆ cache_lookup()

static MemoizeEntry* cache_lookup ( MemoizeState mstate,
bool found 
)
static

Definition at line 494 of file nodeMemoize.c.

496 {
497  MemoizeKey *key;
498  MemoizeEntry *entry;
499  MemoryContext oldcontext;
500 
501  /* prepare the probe slot with the current scan parameters */
502  prepare_probe_slot(mstate, NULL);
503 
504  /*
505  * Add the new entry to the cache. No need to pass a valid key since the
506  * hash function uses mstate's probeslot, which we populated above.
507  */
508  entry = memoize_insert(mstate->hashtable, NULL, found);
509 
510  if (*found)
511  {
512  /*
513  * Move existing entry to the tail of the LRU list to mark it as the
514  * most recently used item.
515  */
516  dlist_move_tail(&mstate->lru_list, &entry->key->lru_node);
517 
518  return entry;
519  }
520 
521  oldcontext = MemoryContextSwitchTo(mstate->tableContext);
522 
523  /* Allocate a new key */
524  entry->key = key = (MemoizeKey *) palloc(sizeof(MemoizeKey));
525  key->params = ExecCopySlotMinimalTuple(mstate->probeslot);
526 
527  /* Update the total cache memory utilization */
528  mstate->mem_used += EMPTY_ENTRY_MEMORY_BYTES(entry);
529 
530  /* Initialize this entry */
531  entry->complete = false;
532  entry->tuplehead = NULL;
533 
534  /*
535  * Since this is the most recently used entry, push this entry onto the
536  * end of the LRU list.
537  */
538  dlist_push_tail(&mstate->lru_list, &entry->key->lru_node);
539 
540  mstate->last_tuple = NULL;
541 
542  MemoryContextSwitchTo(oldcontext);
543 
544  /*
545  * If we've gone over our memory budget, then we'll free up some space in
546  * the cache.
547  */
548  if (mstate->mem_used > mstate->mem_limit)
549  {
550  /*
551  * Try to free up some memory. It's highly unlikely that we'll fail
552  * to do so here since the entry we've just added is yet to contain
553  * any tuples and we're able to remove any other entry to reduce the
554  * memory consumption.
555  */
556  if (unlikely(!cache_reduce_memory(mstate, key)))
557  return NULL;
558 
559  /*
560  * The process of removing entries from the cache may have caused the
561  * code in simplehash.h to shuffle elements to earlier buckets in the
562  * hash table. If it has, we'll need to find the entry again by
563  * performing a lookup. Fortunately, we can detect if this has
564  * happened by seeing if the entry is still in use and that the key
565  * pointer matches our expected key.
566  */
567  if (entry->status != memoize_SH_IN_USE || entry->key != key)
568  {
569  /*
570  * We need to repopulate the probeslot as lookups performed during
571  * the cache evictions above will have stored some other key.
572  */
573  prepare_probe_slot(mstate, key);
574 
575  /* Re-find the newly added entry */
576  entry = memoize_lookup(mstate->hashtable, NULL);
577  Assert(entry != NULL);
578  }
579  }
580 
581  return entry;
#define unlikely(x)
Definition: c.h:273
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:317
static void dlist_move_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:404
Assert(fmt[strlen(fmt) - 1] !='\n')
void * palloc(Size size)
Definition: mcxt.c:1062
static bool cache_reduce_memory(MemoizeState *mstate, MemoizeKey *specialkey)
Definition: nodeMemoize.c:410
static void prepare_probe_slot(MemoizeState *mstate, MemoizeKey *key)
Definition: nodeMemoize.c:281
#define EMPTY_ENTRY_MEMORY_BYTES(e)
Definition: nodeMemoize.c:86
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
MemoizeKey * key
Definition: nodeMemoize.c:116
MemoizeTuple * tuplehead
Definition: nodeMemoize.c:117
dlist_node lru_node
Definition: nodeMemoize.c:107
uint64 mem_used
Definition: execnodes.h:2104
TupleTableSlot * probeslot
Definition: execnodes.h:2098
dlist_head lru_list
Definition: execnodes.h:2107
uint64 mem_limit
Definition: execnodes.h:2105
struct MemoizeTuple * last_tuple
Definition: execnodes.h:2108
static MinimalTuple ExecCopySlotMinimalTuple(TupleTableSlot *slot)
Definition: tuptable.h:463

References Assert(), cache_reduce_memory(), MemoizeEntry::complete, dlist_move_tail(), dlist_push_tail(), EMPTY_ENTRY_MEMORY_BYTES, ExecCopySlotMinimalTuple(), MemoizeState::hashtable, MemoizeEntry::key, sort-test::key, MemoizeState::last_tuple, MemoizeState::lru_list, MemoizeKey::lru_node, MemoizeState::mem_limit, MemoizeState::mem_used, MemoryContextSwitchTo(), palloc(), prepare_probe_slot(), MemoizeState::probeslot, MemoizeEntry::status, MemoizeState::tableContext, MemoizeEntry::tuplehead, and unlikely.

Referenced by ExecMemoize().

◆ cache_purge_all()

static void cache_purge_all ( MemoizeState mstate)
static

Definition at line 374 of file nodeMemoize.c.

376 {
377  uint64 evictions = mstate->hashtable->members;
378  PlanState *pstate = (PlanState *) mstate;
379 
380  /*
381  * Likely the most efficient way to remove all items is to just reset the
382  * memory context for the cache and then rebuild a fresh hash table. This
383  * saves having to remove each item one by one and pfree each cached tuple
384  */
386 
387  /* Make the hash table the same size as the original size */
388  build_hash_table(mstate, ((Memoize *) pstate->plan)->est_entries);
389 
390  /* reset the LRU list */
391  dlist_init(&mstate->lru_list);
392  mstate->last_tuple = NULL;
393  mstate->entry = NULL;
394 
395  mstate->mem_used = 0;
396 
397  /* XXX should we add something new to track these purges? */
398  mstate->stats.cache_evictions += evictions; /* Update Stats */
static void dlist_init(dlist_head *head)
Definition: ilist.h:278
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:143
static void build_hash_table(MemoizeState *mstate, uint32 size)
Definition: nodeMemoize.c:264
struct MemoizeEntry * entry
Definition: execnodes.h:2112
MemoizeInstrumentation stats
Definition: execnodes.h:2118
Plan * plan
Definition: execnodes.h:971

References build_hash_table(), MemoizeInstrumentation::cache_evictions, dlist_init(), MemoizeState::entry, MemoizeState::hashtable, MemoizeState::last_tuple, MemoizeState::lru_list, MemoizeState::mem_used, MemoryContextReset(), PlanState::plan, MemoizeState::stats, and MemoizeState::tableContext.

Referenced by ExecReScanMemoize().

◆ cache_reduce_memory()

static bool cache_reduce_memory ( MemoizeState mstate,
MemoizeKey specialkey 
)
static

Definition at line 410 of file nodeMemoize.c.

412 {
413  bool specialkey_intact = true; /* for now */
414  dlist_mutable_iter iter;
415  uint64 evictions = 0;
416 
417  /* Update peak memory usage */
418  if (mstate->mem_used > mstate->stats.mem_peak)
419  mstate->stats.mem_peak = mstate->mem_used;
420 
421  /* We expect only to be called when we've gone over budget on memory */
422  Assert(mstate->mem_used > mstate->mem_limit);
423 
424  /* Start the eviction process starting at the head of the LRU list. */
425  dlist_foreach_modify(iter, &mstate->lru_list)
426  {
427  MemoizeKey *key = dlist_container(MemoizeKey, lru_node, iter.cur);
428  MemoizeEntry *entry;
429 
430  /*
431  * Populate the hash probe slot in preparation for looking up this LRU
432  * entry.
433  */
434  prepare_probe_slot(mstate, key);
435 
436  /*
437  * Ideally the LRU list pointers would be stored in the entry itself
438  * rather than in the key. Unfortunately, we can't do that as the
439  * simplehash.h code may resize the table and allocate new memory for
440  * entries which would result in those pointers pointing to the old
441  * buckets. However, it's fine to use the key to store this as that's
442  * only referenced by a pointer in the entry, which of course follows
443  * the entry whenever the hash table is resized. Since we only have a
444  * pointer to the key here, we must perform a hash table lookup to
445  * find the entry that the key belongs to.
446  */
447  entry = memoize_lookup(mstate->hashtable, NULL);
448 
449  /* A good spot to check for corruption of the table and LRU list. */
450  Assert(entry != NULL);
451  Assert(entry->key == key);
452 
453  /*
454  * If we're being called to free memory while the cache is being
455  * populated with new tuples, then we'd better take some care as we
456  * could end up freeing the entry which 'specialkey' belongs to.
457  * Generally callers will pass 'specialkey' as the key for the cache
458  * entry which is currently being populated, so we must set
459  * 'specialkey_intact' to false to inform the caller the specialkey
460  * entry has been removed.
461  */
462  if (key == specialkey)
463  specialkey_intact = false;
464 
465  /*
466  * Finally remove the entry. This will remove from the LRU list too.
467  */
468  remove_cache_entry(mstate, entry);
469 
470  evictions++;
471 
472  /* Exit if we've freed enough memory */
473  if (mstate->mem_used <= mstate->mem_limit)
474  break;
475  }
476 
477  mstate->stats.cache_evictions += evictions; /* Update Stats */
478 
479  return specialkey_intact;
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:543
#define dlist_container(type, membername, ptr)
Definition: ilist.h:496
static void remove_cache_entry(MemoizeState *mstate, MemoizeEntry *entry)
Definition: nodeMemoize.c:346
dlist_node * cur
Definition: ilist.h:180

References Assert(), MemoizeInstrumentation::cache_evictions, dlist_mutable_iter::cur, dlist_container, dlist_foreach_modify, MemoizeState::hashtable, MemoizeEntry::key, sort-test::key, MemoizeState::lru_list, MemoizeState::mem_limit, MemoizeInstrumentation::mem_peak, MemoizeState::mem_used, prepare_probe_slot(), remove_cache_entry(), and MemoizeState::stats.

Referenced by cache_lookup(), and cache_store_tuple().

◆ cache_store_tuple()

static bool cache_store_tuple ( MemoizeState mstate,
TupleTableSlot slot 
)
static

Definition at line 591 of file nodeMemoize.c.

593 {
594  MemoizeTuple *tuple;
595  MemoizeEntry *entry = mstate->entry;
596  MemoryContext oldcontext;
597 
598  Assert(slot != NULL);
599  Assert(entry != NULL);
600 
601  oldcontext = MemoryContextSwitchTo(mstate->tableContext);
602 
603  tuple = (MemoizeTuple *) palloc(sizeof(MemoizeTuple));
604  tuple->mintuple = ExecCopySlotMinimalTuple(slot);
605  tuple->next = NULL;
606 
607  /* Account for the memory we just consumed */
608  mstate->mem_used += CACHE_TUPLE_BYTES(tuple);
609 
610  if (entry->tuplehead == NULL)
611  {
612  /*
613  * This is the first tuple for this entry, so just point the list head
614  * to it.
615  */
616  entry->tuplehead = tuple;
617  }
618  else
619  {
620  /* push this tuple onto the tail of the list */
621  mstate->last_tuple->next = tuple;
622  }
623 
624  mstate->last_tuple = tuple;
625  MemoryContextSwitchTo(oldcontext);
626 
627  /*
628  * If we've gone over our memory budget then free up some space in the
629  * cache.
630  */
631  if (mstate->mem_used > mstate->mem_limit)
632  {
633  MemoizeKey *key = entry->key;
634 
635  if (!cache_reduce_memory(mstate, key))
636  return false;
637 
638  /*
639  * The process of removing entries from the cache may have caused the
640  * code in simplehash.h to shuffle elements to earlier buckets in the
641  * hash table. If it has, we'll need to find the entry again by
642  * performing a lookup. Fortunately, we can detect if this has
643  * happened by seeing if the entry is still in use and that the key
644  * pointer matches our expected key.
645  */
646  if (entry->status != memoize_SH_IN_USE || entry->key != key)
647  {
648  /*
649  * We need to repopulate the probeslot as lookups performed during
650  * the cache evictions above will have stored some other key.
651  */
652  prepare_probe_slot(mstate, key);
653 
654  /* Re-find the entry */
655  mstate->entry = entry = memoize_lookup(mstate->hashtable, NULL);
656  Assert(entry != NULL);
657  }
658  }
659 
660  return true;
#define CACHE_TUPLE_BYTES(t)
Definition: nodeMemoize.c:89
MinimalTuple mintuple
Definition: nodeMemoize.c:95
struct MemoizeTuple * next
Definition: nodeMemoize.c:96

References Assert(), cache_reduce_memory(), CACHE_TUPLE_BYTES, MemoizeState::entry, ExecCopySlotMinimalTuple(), MemoizeState::hashtable, MemoizeEntry::key, sort-test::key, MemoizeState::last_tuple, MemoizeState::mem_limit, MemoizeState::mem_used, MemoryContextSwitchTo(), MemoizeTuple::mintuple, MemoizeTuple::next, palloc(), prepare_probe_slot(), MemoizeEntry::status, MemoizeState::tableContext, and MemoizeEntry::tuplehead.

Referenced by ExecMemoize().

◆ entry_purge_tuples()

static void entry_purge_tuples ( MemoizeState mstate,
MemoizeEntry entry 
)
inlinestatic

Definition at line 316 of file nodeMemoize.c.

318 {
319  MemoizeTuple *tuple = entry->tuplehead;
320  uint64 freed_mem = 0;
321 
322  while (tuple != NULL)
323  {
324  MemoizeTuple *next = tuple->next;
325 
326  freed_mem += CACHE_TUPLE_BYTES(tuple);
327 
328  /* Free memory used for this tuple */
329  pfree(tuple->mintuple);
330  pfree(tuple);
331 
332  tuple = next;
333  }
334 
335  entry->complete = false;
336  entry->tuplehead = NULL;
337 
338  /* Update the memory accounting */
339  mstate->mem_used -= freed_mem;
static int32 next
Definition: blutils.c:219
void pfree(void *pointer)
Definition: mcxt.c:1169

References CACHE_TUPLE_BYTES, MemoizeEntry::complete, MemoizeState::mem_used, MemoizeTuple::mintuple, next, MemoizeTuple::next, pfree(), and MemoizeEntry::tuplehead.

Referenced by ExecMemoize(), and remove_cache_entry().

◆ ExecEndMemoize()

void ExecEndMemoize ( MemoizeState node)

Definition at line 1030 of file nodeMemoize.c.

1032 {
1033 #ifdef USE_ASSERT_CHECKING
1034  /* Validate the memory accounting code is correct in assert builds. */
1035  {
1036  int count;
1037  uint64 mem = 0;
1038  memoize_iterator i;
1039  MemoizeEntry *entry;
1040 
1041  memoize_start_iterate(node->hashtable, &i);
1042 
1043  count = 0;
1044  while ((entry = memoize_iterate(node->hashtable, &i)) != NULL)
1045  {
1046  MemoizeTuple *tuple = entry->tuplehead;
1047 
1048  mem += EMPTY_ENTRY_MEMORY_BYTES(entry);
1049  while (tuple != NULL)
1050  {
1051  mem += CACHE_TUPLE_BYTES(tuple);
1052  tuple = tuple->next;
1053  }
1054  count++;
1055  }
1056 
1057  Assert(count == node->hashtable->members);
1058  Assert(mem == node->mem_used);
1059  }
1060 #endif
1061 
1062  /*
1063  * When ending a parallel worker, copy the statistics gathered by the
1064  * worker back into shared memory so that it can be picked up by the main
1065  * process to report in EXPLAIN ANALYZE.
1066  */
1067  if (node->shared_info != NULL && IsParallelWorker())
1068  {
1070 
1071  /* Make mem_peak available for EXPLAIN */
1072  if (node->stats.mem_peak == 0)
1073  node->stats.mem_peak = node->mem_used;
1074 
1075  Assert(ParallelWorkerNumber <= node->shared_info->num_workers);
1077  memcpy(si, &node->stats, sizeof(MemoizeInstrumentation));
1078  }
1079 
1080  /* Remove the cache context */
1082 
1084  /* must drop pointer to cache result tuple */
1086 
1087  /*
1088  * free exprcontext
1089  */
1090  ExecFreeExprContext(&node->ss.ps);
1091 
1092  /*
1093  * shut down the subplan
1094  */
1095  ExecEndNode(outerPlanState(node));
int ParallelWorkerNumber
Definition: parallel.c:112
void ExecEndNode(PlanState *node)
Definition: execProcnode.c:556
void ExecFreeExprContext(PlanState *planstate)
Definition: execUtils.c:650
#define outerPlanState(node)
Definition: execnodes.h:1067
#define IsParallelWorker()
Definition: parallel.h:61
int i
Definition: isn.c:73
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:218
SharedMemoizeInfo * shared_info
Definition: execnodes.h:2119
ScanState ss
Definition: execnodes.h:2092
TupleTableSlot * ps_ResultTupleSlot
Definition: execnodes.h:1009
TupleTableSlot * ss_ScanTupleSlot
Definition: execnodes.h:1385
PlanState ps
Definition: execnodes.h:1382
MemoizeInstrumentation sinstrument[FLEXIBLE_ARRAY_MEMBER]
Definition: execnodes.h:2080
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:425

References Assert(), CACHE_TUPLE_BYTES, EMPTY_ENTRY_MEMORY_BYTES, ExecClearTuple(), ExecEndNode(), ExecFreeExprContext(), MemoizeState::hashtable, i, IsParallelWorker, MemoizeInstrumentation::mem_peak, MemoizeState::mem_used, MemoryContextDelete(), MemoizeTuple::next, outerPlanState, ParallelWorkerNumber, ScanState::ps, PlanState::ps_ResultTupleSlot, MemoizeState::shared_info, SharedMemoizeInfo::sinstrument, MemoizeState::ss, ScanState::ss_ScanTupleSlot, MemoizeState::stats, MemoizeState::tableContext, and MemoizeEntry::tuplehead.

Referenced by ExecEndNode().

◆ ExecEstimateCacheEntryOverheadBytes()

double ExecEstimateCacheEntryOverheadBytes ( double  ntuples)

Definition at line 1130 of file nodeMemoize.c.

1132 {
1133  return sizeof(MemoizeEntry) + sizeof(MemoizeKey) + sizeof(MemoizeTuple) *
1134  ntuples;

Referenced by cost_memoize_rescan().

◆ ExecInitMemoize()

MemoizeState* ExecInitMemoize ( Memoize node,
EState estate,
int  eflags 
)

Definition at line 905 of file nodeMemoize.c.

907 {
909  Plan *outerNode;
910  int i;
911  int nkeys;
912  Oid *eqfuncoids;
913 
914  /* check for unsupported flags */
915  Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
916 
917  mstate->ss.ps.plan = (Plan *) node;
918  mstate->ss.ps.state = estate;
919  mstate->ss.ps.ExecProcNode = ExecMemoize;
920 
921  /*
922  * Miscellaneous initialization
923  *
924  * create expression context for node
925  */
926  ExecAssignExprContext(estate, &mstate->ss.ps);
927 
928  outerNode = outerPlan(node);
929  outerPlanState(mstate) = ExecInitNode(outerNode, estate, eflags);
930 
931  /*
932  * Initialize return slot and type. No need to initialize projection info
933  * because this node doesn't do projections.
934  */
936  mstate->ss.ps.ps_ProjInfo = NULL;
937 
938  /*
939  * Initialize scan slot and type.
940  */
942 
943  /*
944  * Set the state machine to lookup the cache. We won't find anything
945  * until we cache something, but this saves a special case to create the
946  * first entry.
947  */
948  mstate->mstatus = MEMO_CACHE_LOOKUP;
949 
950  mstate->nkeys = nkeys = node->numKeys;
955  &TTSOpsVirtual);
956 
957  mstate->param_exprs = (ExprState **) palloc(nkeys * sizeof(ExprState *));
958  mstate->collations = node->collations; /* Just point directly to the plan
959  * data */
960  mstate->hashfunctions = (FmgrInfo *) palloc(nkeys * sizeof(FmgrInfo));
961 
962  eqfuncoids = palloc(nkeys * sizeof(Oid));
963 
964  for (i = 0; i < nkeys; i++)
965  {
966  Oid hashop = node->hashOperators[i];
967  Oid left_hashfn;
968  Oid right_hashfn;
969  Expr *param_expr = (Expr *) list_nth(node->param_exprs, i);
970 
971  if (!get_op_hash_functions(hashop, &left_hashfn, &right_hashfn))
972  elog(ERROR, "could not find hash function for hash operator %u",
973  hashop);
974 
975  fmgr_info(left_hashfn, &mstate->hashfunctions[i]);
976 
977  mstate->param_exprs[i] = ExecInitExpr(param_expr, (PlanState *) mstate);
978  eqfuncoids[i] = get_opcode(hashop);
979  }
980 
983  &TTSOpsVirtual,
984  eqfuncoids,
985  node->collations,
986  node->param_exprs,
987  (PlanState *) mstate);
988 
989  pfree(eqfuncoids);
990  mstate->mem_used = 0;
991 
992  /* Limit the total memory consumed by the cache to this */
993  mstate->mem_limit = get_hash_memory_limit();
994 
995  /* A memory context dedicated for the cache */
997  "MemoizeHashTable",
999 
1000  dlist_init(&mstate->lru_list);
1001  mstate->last_tuple = NULL;
1002  mstate->entry = NULL;
1003 
1004  /*
1005  * Mark if we can assume the cache entry is completed after we get the
1006  * first record for it. Some callers might not call us again after
1007  * getting the first match. e.g. A join operator performing a unique join
1008  * is able to skip to the next outer tuple after getting the first
1009  * matching inner tuple. In this case, the cache entry is complete after
1010  * getting the first tuple. This allows us to mark it as so.
1011  */
1012  mstate->singlerow = node->singlerow;
1013  mstate->keyparamids = node->keyparamids;
1014 
1015  /*
1016  * Record if the cache keys should be compared bit by bit, or logically
1017  * using the type's hash equality operator
1018  */
1019  mstate->binary_mode = node->binary_mode;
1020 
1021  /* Zero the statistics counters */
1022  memset(&mstate->stats, 0, sizeof(MemoizeInstrumentation));
1023 
1024  /* Allocate and set up the actual cache */
1025  build_hash_table(mstate, node->est_entries);
1026 
1027  return mstate;
#define ERROR
Definition: elog.h:33
#define elog(elevel,...)
Definition: elog.h:218
ExprState * ExecInitExpr(Expr *node, PlanState *parent)
Definition: execExpr.c:123
ExprState * ExecBuildParamSetEqual(TupleDesc desc, const TupleTableSlotOps *lops, const TupleTableSlotOps *rops, const Oid *eqfunctions, const Oid *collations, const List *param_exprs, PlanState *parent)
Definition: execExpr.c:3865
PlanState * ExecInitNode(Plan *node, EState *estate, int eflags)
Definition: execProcnode.c:141
const TupleTableSlotOps TTSOpsVirtual
Definition: execTuples.c:83
void ExecInitResultTupleSlotTL(PlanState *planstate, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1799
const TupleTableSlotOps TTSOpsMinimalTuple
Definition: execTuples.c:85
TupleDesc ExecTypeFromExprList(List *exprList)
Definition: execTuples.c:1997
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1238
void ExecCreateScanSlotFromOuterPlan(EState *estate, ScanState *scanstate, const TupleTableSlotOps *tts_ops)
Definition: execUtils.c:682
void ExecAssignExprContext(EState *estate, PlanState *planstate)
Definition: execUtils.c:480
#define EXEC_FLAG_BACKWARD
Definition: executor.h:58
#define EXEC_FLAG_MARK
Definition: executor.h:59
void fmgr_info(Oid functionId, FmgrInfo *finfo)
Definition: fmgr.c:126
RegProcedure get_opcode(Oid opno)
Definition: lsyscache.c:1256
bool get_op_hash_functions(Oid opno, RegProcedure *lhs_procno, RegProcedure *rhs_procno)
Definition: lsyscache.c:508
MemoryContext CurrentMemoryContext
Definition: mcxt.c:42
#define AllocSetContextCreate
Definition: memutils.h:173
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:195
size_t get_hash_memory_limit(void)
Definition: nodeHash.c:3401
#define MEMO_CACHE_LOOKUP
Definition: nodeMemoize.c:78
static TupleTableSlot * ExecMemoize(PlanState *pstate)
Definition: nodeMemoize.c:663
#define makeNode(_type_)
Definition: nodes.h:587
static void * list_nth(const List *list, int n)
Definition: pg_list.h:278
#define outerPlan(node)
Definition: plannodes.h:171
unsigned int Oid
Definition: postgres_ext.h:31
Definition: fmgr.h:57
TupleDesc hashkeydesc
Definition: execnodes.h:2096
FmgrInfo * hashfunctions
Definition: execnodes.h:2102
Oid * collations
Definition: execnodes.h:2103
ExprState * cache_eq_expr
Definition: execnodes.h:2099
bool singlerow
Definition: execnodes.h:2114
bool binary_mode
Definition: execnodes.h:2116
Bitmapset * keyparamids
Definition: execnodes.h:2120
ExprState ** param_exprs
Definition: execnodes.h:2100
TupleTableSlot * tableslot
Definition: execnodes.h:2097
bool singlerow
Definition: plannodes.h:814
Bitmapset * keyparamids
Definition: plannodes.h:822
bool binary_mode
Definition: plannodes.h:817
int numKeys
Definition: plannodes.h:809
List * param_exprs
Definition: plannodes.h:813
Oid * hashOperators
Definition: plannodes.h:811
uint32 est_entries
Definition: plannodes.h:819
Oid * collations
Definition: plannodes.h:812
EState * state
Definition: execnodes.h:973
ProjectionInfo * ps_ProjInfo
Definition: execnodes.h:1011
ExecProcNodeMtd ExecProcNode
Definition: execnodes.h:977

References ALLOCSET_DEFAULT_SIZES, AllocSetContextCreate, Assert(), MemoizeState::binary_mode, Memoize::binary_mode, build_hash_table(), MemoizeState::cache_eq_expr, MemoizeState::collations, Memoize::collations, CurrentMemoryContext, dlist_init(), elog, MemoizeState::entry, ERROR, Memoize::est_entries, EXEC_FLAG_BACKWARD, EXEC_FLAG_MARK, ExecAssignExprContext(), ExecBuildParamSetEqual(), ExecCreateScanSlotFromOuterPlan(), ExecInitExpr(), ExecInitNode(), ExecInitResultTupleSlotTL(), ExecMemoize(), PlanState::ExecProcNode, ExecTypeFromExprList(), fmgr_info(), get_hash_memory_limit(), get_op_hash_functions(), get_opcode(), MemoizeState::hashfunctions, MemoizeState::hashkeydesc, Memoize::hashOperators, i, MemoizeState::keyparamids, Memoize::keyparamids, MemoizeState::last_tuple, list_nth(), MemoizeState::lru_list, makeNode, MakeSingleTupleTableSlot(), MemoizeState::mem_limit, MemoizeState::mem_used, MEMO_CACHE_LOOKUP, MemoizeState::mstatus, MemoizeState::nkeys, Memoize::numKeys, outerPlan, outerPlanState, palloc(), MemoizeState::param_exprs, Memoize::param_exprs, pfree(), PlanState::plan, MemoizeState::probeslot, ScanState::ps, PlanState::ps_ProjInfo, MemoizeState::singlerow, Memoize::singlerow, MemoizeState::ss, PlanState::state, MemoizeState::stats, MemoizeState::tableContext, MemoizeState::tableslot, TTSOpsMinimalTuple, and TTSOpsVirtual.

Referenced by ExecInitNode().

◆ ExecMemoize()

static TupleTableSlot* ExecMemoize ( PlanState pstate)
static

Definition at line 663 of file nodeMemoize.c.

665 {
666  MemoizeState *node = castNode(MemoizeState, pstate);
667  PlanState *outerNode;
668  TupleTableSlot *slot;
669 
670  switch (node->mstatus)
671  {
672  case MEMO_CACHE_LOOKUP:
673  {
674  MemoizeEntry *entry;
675  TupleTableSlot *outerslot;
676  bool found;
677 
678  Assert(node->entry == NULL);
679 
680  /*
681  * We're only ever in this state for the first call of the
682  * scan. Here we have a look to see if we've already seen the
683  * current parameters before and if we have already cached a
684  * complete set of records that the outer plan will return for
685  * these parameters.
686  *
687  * When we find a valid cache entry, we'll return the first
688  * tuple from it. If not found, we'll create a cache entry and
689  * then try to fetch a tuple from the outer scan. If we find
690  * one there, we'll try to cache it.
691  */
692 
693  /* see if we've got anything cached for the current parameters */
694  entry = cache_lookup(node, &found);
695 
696  if (found && entry->complete)
697  {
698  node->stats.cache_hits += 1; /* stats update */
699 
700  /*
701  * Set last_tuple and entry so that the state
702  * MEMO_CACHE_FETCH_NEXT_TUPLE can easily find the next
703  * tuple for these parameters.
704  */
705  node->last_tuple = entry->tuplehead;
706  node->entry = entry;
707 
708  /* Fetch the first cached tuple, if there is one */
709  if (entry->tuplehead)
710  {
712 
713  slot = node->ss.ps.ps_ResultTupleSlot;
715  slot, false);
716 
717  return slot;
718  }
719 
720  /* The cache entry is void of any tuples. */
721  node->mstatus = MEMO_END_OF_SCAN;
722  return NULL;
723  }
724 
725  /* Handle cache miss */
726  node->stats.cache_misses += 1; /* stats update */
727 
728  if (found)
729  {
730  /*
731  * A cache entry was found, but the scan for that entry
732  * did not run to completion. We'll just remove all
733  * tuples and start again. It might be tempting to
734  * continue where we left off, but there's no guarantee
735  * the outer node will produce the tuples in the same
736  * order as it did last time.
737  */
738  entry_purge_tuples(node, entry);
739  }
740 
741  /* Scan the outer node for a tuple to cache */
742  outerNode = outerPlanState(node);
743  outerslot = ExecProcNode(outerNode);
744  if (TupIsNull(outerslot))
745  {
746  /*
747  * cache_lookup may have returned NULL due to failure to
748  * free enough cache space, so ensure we don't do anything
749  * here that assumes it worked. There's no need to go into
750  * bypass mode here as we're setting mstatus to end of
751  * scan.
752  */
753  if (likely(entry))
754  entry->complete = true;
755 
756  node->mstatus = MEMO_END_OF_SCAN;
757  return NULL;
758  }
759 
760  node->entry = entry;
761 
762  /*
763  * If we failed to create the entry or failed to store the
764  * tuple in the entry, then go into bypass mode.
765  */
766  if (unlikely(entry == NULL ||
767  !cache_store_tuple(node, outerslot)))
768  {
769  node->stats.cache_overflows += 1; /* stats update */
770 
772 
773  /*
774  * No need to clear out last_tuple as we'll stay in bypass
775  * mode until the end of the scan.
776  */
777  }
778  else
779  {
780  /*
781  * If we only expect a single row from this scan then we
782  * can mark that we're not expecting more. This allows
783  * cache lookups to work even when the scan has not been
784  * executed to completion.
785  */
786  entry->complete = node->singlerow;
787  node->mstatus = MEMO_FILLING_CACHE;
788  }
789 
790  slot = node->ss.ps.ps_ResultTupleSlot;
791  ExecCopySlot(slot, outerslot);
792  return slot;
793  }
794 
796  {
797  /* We shouldn't be in this state if these are not set */
798  Assert(node->entry != NULL);
799  Assert(node->last_tuple != NULL);
800 
801  /* Skip to the next tuple to output */
802  node->last_tuple = node->last_tuple->next;
803 
804  /* No more tuples in the cache */
805  if (node->last_tuple == NULL)
806  {
807  node->mstatus = MEMO_END_OF_SCAN;
808  return NULL;
809  }
810 
811  slot = node->ss.ps.ps_ResultTupleSlot;
813  false);
814 
815  return slot;
816  }
817 
818  case MEMO_FILLING_CACHE:
819  {
820  TupleTableSlot *outerslot;
821  MemoizeEntry *entry = node->entry;
822 
823  /* entry should already have been set by MEMO_CACHE_LOOKUP */
824  Assert(entry != NULL);
825 
826  /*
827  * When in the MEMO_FILLING_CACHE state, we've just had a
828  * cache miss and are populating the cache with the current
829  * scan tuples.
830  */
831  outerNode = outerPlanState(node);
832  outerslot = ExecProcNode(outerNode);
833  if (TupIsNull(outerslot))
834  {
835  /* No more tuples. Mark it as complete */
836  entry->complete = true;
837  node->mstatus = MEMO_END_OF_SCAN;
838  return NULL;
839  }
840 
841  /*
842  * Validate if the planner properly set the singlerow flag. It
843  * should only set that if each cache entry can, at most,
844  * return 1 row.
845  */
846  if (unlikely(entry->complete))
847  elog(ERROR, "cache entry already complete");
848 
849  /* Record the tuple in the current cache entry */
850  if (unlikely(!cache_store_tuple(node, outerslot)))
851  {
852  /* Couldn't store it? Handle overflow */
853  node->stats.cache_overflows += 1; /* stats update */
854 
856 
857  /*
858  * No need to clear out entry or last_tuple as we'll stay
859  * in bypass mode until the end of the scan.
860  */
861  }
862 
863  slot = node->ss.ps.ps_ResultTupleSlot;
864  ExecCopySlot(slot, outerslot);
865  return slot;
866  }
867 
869  {
870  TupleTableSlot *outerslot;
871 
872  /*
873  * When in bypass mode we just continue to read tuples without
874  * caching. We need to wait until the next rescan before we
875  * can come out of this mode.
876  */
877  outerNode = outerPlanState(node);
878  outerslot = ExecProcNode(outerNode);
879  if (TupIsNull(outerslot))
880  {
881  node->mstatus = MEMO_END_OF_SCAN;
882  return NULL;
883  }
884 
885  slot = node->ss.ps.ps_ResultTupleSlot;
886  ExecCopySlot(slot, outerslot);
887  return slot;
888  }
889 
890  case MEMO_END_OF_SCAN:
891 
892  /*
893  * We've already returned NULL for this scan, but just in case
894  * something calls us again by mistake.
895  */
896  return NULL;
897 
898  default:
899  elog(ERROR, "unrecognized memoize state: %d",
900  (int) node->mstatus);
901  return NULL;
902  } /* switch */
#define likely(x)
Definition: c.h:272
TupleTableSlot * ExecStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot, bool shouldFree)
Definition: execTuples.c:1446
static TupleTableSlot * ExecProcNode(PlanState *node)
Definition: executor.h:252
#define MEMO_CACHE_FETCH_NEXT_TUPLE
Definition: nodeMemoize.c:79
static bool cache_store_tuple(MemoizeState *mstate, TupleTableSlot *slot)
Definition: nodeMemoize.c:591
#define MEMO_CACHE_BYPASS_MODE
Definition: nodeMemoize.c:81
#define MEMO_END_OF_SCAN
Definition: nodeMemoize.c:82
#define MEMO_FILLING_CACHE
Definition: nodeMemoize.c:80
static MemoizeEntry * cache_lookup(MemoizeState *mstate, bool *found)
Definition: nodeMemoize.c:494
static void entry_purge_tuples(MemoizeState *mstate, MemoizeEntry *entry)
Definition: nodeMemoize.c:316
#define castNode(_type_, nodeptr)
Definition: nodes.h:608
static TupleTableSlot * ExecCopySlot(TupleTableSlot *dstslot, TupleTableSlot *srcslot)
Definition: tuptable.h:475
#define TupIsNull(slot)
Definition: tuptable.h:292

References Assert(), MemoizeInstrumentation::cache_hits, cache_lookup(), MemoizeInstrumentation::cache_misses, MemoizeInstrumentation::cache_overflows, cache_store_tuple(), castNode, MemoizeEntry::complete, elog, MemoizeState::entry, entry_purge_tuples(), ERROR, ExecCopySlot(), ExecProcNode(), ExecStoreMinimalTuple(), MemoizeState::last_tuple, likely, MEMO_CACHE_BYPASS_MODE, MEMO_CACHE_FETCH_NEXT_TUPLE, MEMO_CACHE_LOOKUP, MEMO_END_OF_SCAN, MEMO_FILLING_CACHE, MemoizeTuple::mintuple, MemoizeState::mstatus, MemoizeTuple::next, outerPlanState, ScanState::ps, PlanState::ps_ResultTupleSlot, MemoizeState::singlerow, MemoizeState::ss, MemoizeState::stats, TupIsNull, MemoizeEntry::tuplehead, and unlikely.

Referenced by ExecInitMemoize().

◆ ExecMemoizeEstimate()

void ExecMemoizeEstimate ( MemoizeState node,
ParallelContext pcxt 
)

Definition at line 1148 of file nodeMemoize.c.

1150 {
1151  Size size;
1152 
1153  /* don't need this if not instrumenting or no workers */
1154  if (!node->ss.ps.instrument || pcxt->nworkers == 0)
1155  return;
1156 
1157  size = mul_size(pcxt->nworkers, sizeof(MemoizeInstrumentation));
1158  size = add_size(size, offsetof(SharedMemoizeInfo, sinstrument));
1159  shm_toc_estimate_chunk(&pcxt->estimator, size);
1160  shm_toc_estimate_keys(&pcxt->estimator, 1);
#define offsetof(type, field)
Definition: c.h:727
size_t Size
Definition: c.h:540
#define shm_toc_estimate_chunk(e, sz)
Definition: shm_toc.h:51
#define shm_toc_estimate_keys(e, cnt)
Definition: shm_toc.h:53
Size add_size(Size s1, Size s2)
Definition: shmem.c:502
Size mul_size(Size s1, Size s2)
Definition: shmem.c:519
shm_toc_estimator estimator
Definition: parallel.h:42
Instrumentation * instrument
Definition: execnodes.h:981

References add_size(), ParallelContext::estimator, PlanState::instrument, mul_size(), ParallelContext::nworkers, offsetof, ScanState::ps, shm_toc_estimate_chunk, shm_toc_estimate_keys, and MemoizeState::ss.

Referenced by ExecParallelEstimate().

◆ ExecMemoizeInitializeDSM()

void ExecMemoizeInitializeDSM ( MemoizeState node,
ParallelContext pcxt 
)

Definition at line 1169 of file nodeMemoize.c.

1171 {
1172  Size size;
1173 
1174  /* don't need this if not instrumenting or no workers */
1175  if (!node->ss.ps.instrument || pcxt->nworkers == 0)
1176  return;
1177 
1178  size = offsetof(SharedMemoizeInfo, sinstrument)
1179  + pcxt->nworkers * sizeof(MemoizeInstrumentation);
1180  node->shared_info = shm_toc_allocate(pcxt->toc, size);
1181  /* ensure any unfilled slots will contain zeroes */
1182  memset(node->shared_info, 0, size);
1183  node->shared_info->num_workers = pcxt->nworkers;
1184  shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id,
1185  node->shared_info);
struct MemoizeInstrumentation MemoizeInstrumentation
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
Definition: shm_toc.c:171
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
Definition: shm_toc.c:88
shm_toc * toc
Definition: parallel.h:45
int plan_node_id
Definition: plannodes.h:140

References PlanState::instrument, SharedMemoizeInfo::num_workers, ParallelContext::nworkers, offsetof, PlanState::plan, Plan::plan_node_id, ScanState::ps, MemoizeState::shared_info, shm_toc_allocate(), shm_toc_insert(), MemoizeState::ss, and ParallelContext::toc.

Referenced by ExecParallelInitializeDSM().

◆ ExecMemoizeInitializeWorker()

void ExecMemoizeInitializeWorker ( MemoizeState node,
ParallelWorkerContext pwcxt 
)

Definition at line 1194 of file nodeMemoize.c.

1196 {
1197  node->shared_info =
1198  shm_toc_lookup(pwcxt->toc, node->ss.ps.plan->plan_node_id, true);
void * shm_toc_lookup(shm_toc *toc, uint64 key, bool noError)
Definition: shm_toc.c:232

References PlanState::plan, Plan::plan_node_id, ScanState::ps, MemoizeState::shared_info, shm_toc_lookup(), MemoizeState::ss, and ParallelWorkerContext::toc.

Referenced by ExecParallelInitializeWorker().

◆ ExecMemoizeRetrieveInstrumentation()

void ExecMemoizeRetrieveInstrumentation ( MemoizeState node)

Definition at line 1207 of file nodeMemoize.c.

1209 {
1210  Size size;
1211  SharedMemoizeInfo *si;
1212 
1213  if (node->shared_info == NULL)
1214  return;
1215 
1216  size = offsetof(SharedMemoizeInfo, sinstrument)
1217  + node->shared_info->num_workers * sizeof(MemoizeInstrumentation);
1218  si = palloc(size);
1219  memcpy(si, node->shared_info, size);
1220  node->shared_info = si;

References SharedMemoizeInfo::num_workers, offsetof, palloc(), and MemoizeState::shared_info.

Referenced by ExecParallelRetrieveInstrumentation().

◆ ExecReScanMemoize()

void ExecReScanMemoize ( MemoizeState node)

Definition at line 1098 of file nodeMemoize.c.

1100 {
1102 
1103  /* Mark that we must lookup the cache for a new set of parameters */
1104  node->mstatus = MEMO_CACHE_LOOKUP;
1105 
1106  /* nullify pointers used for the last scan */
1107  node->entry = NULL;
1108  node->last_tuple = NULL;
1109 
1110  /*
1111  * if chgParam of subnode is not null then plan will be re-scanned by
1112  * first ExecProcNode.
1113  */
1114  if (outerPlan->chgParam == NULL)
1116 
1117  /*
1118  * Purge the entire cache if a parameter changed that is not part of the
1119  * cache key.
1120  */
1121  if (bms_nonempty_difference(outerPlan->chgParam, node->keyparamids))
1122  cache_purge_all(node);
bool bms_nonempty_difference(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:547
void ExecReScan(PlanState *node)
Definition: execAmi.c:78
static void cache_purge_all(MemoizeState *mstate)
Definition: nodeMemoize.c:374

References bms_nonempty_difference(), cache_purge_all(), MemoizeState::entry, ExecReScan(), MemoizeState::keyparamids, MemoizeState::last_tuple, MEMO_CACHE_LOOKUP, MemoizeState::mstatus, outerPlan, and outerPlanState.

Referenced by ExecReScan().

◆ MemoizeHash_equal()

static bool MemoizeHash_equal ( struct memoize_hash *  tb,
const MemoizeKey params1,
const MemoizeKey params2 
)
static

Definition at line 215 of file nodeMemoize.c.

218 {
219  MemoizeState *mstate = (MemoizeState *) tb->private_data;
220  ExprContext *econtext = mstate->ss.ps.ps_ExprContext;
221  TupleTableSlot *tslot = mstate->tableslot;
222  TupleTableSlot *pslot = mstate->probeslot;
223 
224  /* probeslot should have already been prepared by prepare_probe_slot() */
225  ExecStoreMinimalTuple(key1->params, tslot, false);
226 
227  if (mstate->binary_mode)
228  {
229  int numkeys = mstate->nkeys;
230 
231  slot_getallattrs(tslot);
232  slot_getallattrs(pslot);
233 
234  for (int i = 0; i < numkeys; i++)
235  {
236  FormData_pg_attribute *attr;
237 
238  if (tslot->tts_isnull[i] != pslot->tts_isnull[i])
239  return false;
240 
241  /* both NULL? they're equal */
242  if (tslot->tts_isnull[i])
243  continue;
244 
245  /* perform binary comparison on the two datums */
246  attr = &tslot->tts_tupleDescriptor->attrs[i];
247  if (!datum_image_eq(tslot->tts_values[i], pslot->tts_values[i],
248  attr->attbyval, attr->attlen))
249  return false;
250  }
251  return true;
252  }
253  else
254  {
255  econtext->ecxt_innertuple = tslot;
256  econtext->ecxt_outertuple = pslot;
257  return ExecQualAndReset(mstate->cache_eq_expr, econtext);
258  }
bool datum_image_eq(Datum value1, Datum value2, bool typByVal, int typLen)
Definition: datum.c:266
static bool ExecQualAndReset(ExprState *state, ExprContext *econtext)
Definition: executor.h:423
FormData_pg_attribute
Definition: pg_attribute.h:191
ExprContext * ps_ExprContext
Definition: execnodes.h:1010
static void slot_getallattrs(TupleTableSlot *slot)
Definition: tuptable.h:354

References MemoizeState::binary_mode, MemoizeState::cache_eq_expr, datum_image_eq(), ExecQualAndReset(), ExecStoreMinimalTuple(), FormData_pg_attribute, i, MemoizeState::nkeys, MemoizeKey::params, MemoizeState::probeslot, ScanState::ps, PlanState::ps_ExprContext, slot_getallattrs(), MemoizeState::ss, and MemoizeState::tableslot.

◆ MemoizeHash_hash()

static uint32 MemoizeHash_hash ( struct memoize_hash *  tb,
const MemoizeKey key 
)
static

Definition at line 157 of file nodeMemoize.c.

159 {
160  MemoizeState *mstate = (MemoizeState *) tb->private_data;
161  TupleTableSlot *pslot = mstate->probeslot;
162  uint32 hashkey = 0;
163  int numkeys = mstate->nkeys;
164 
165  if (mstate->binary_mode)
166  {
167  for (int i = 0; i < numkeys; i++)
168  {
169  /* rotate hashkey left 1 bit at each step */
170  hashkey = (hashkey << 1) | ((hashkey & 0x80000000) ? 1 : 0);
171 
172  if (!pslot->tts_isnull[i]) /* treat nulls as having hash key 0 */
173  {
174  FormData_pg_attribute *attr;
175  uint32 hkey;
176 
177  attr = &pslot->tts_tupleDescriptor->attrs[i];
178 
179  hkey = datum_image_hash(pslot->tts_values[i], attr->attbyval, attr->attlen);
180 
181  hashkey ^= hkey;
182  }
183  }
184  }
185  else
186  {
187  FmgrInfo *hashfunctions = mstate->hashfunctions;
188  Oid *collations = mstate->collations;
189 
190  for (int i = 0; i < numkeys; i++)
191  {
192  /* rotate hashkey left 1 bit at each step */
193  hashkey = (hashkey << 1) | ((hashkey & 0x80000000) ? 1 : 0);
194 
195  if (!pslot->tts_isnull[i]) /* treat nulls as having hash key 0 */
196  {
197  uint32 hkey;
198 
199  hkey = DatumGetUInt32(FunctionCall1Coll(&hashfunctions[i],
200  collations[i], pslot->tts_values[i]));
201  hashkey ^= hkey;
202  }
203  }
204  }
205 
206  return murmurhash32(hashkey);
unsigned int uint32
Definition: c.h:441
uint32 datum_image_hash(Datum value, bool typByVal, int typLen)
Definition: datum.c:338
Datum FunctionCall1Coll(FmgrInfo *flinfo, Oid collation, Datum arg1)
Definition: fmgr.c:1128
static uint32 murmurhash32(uint32 data)
Definition: hashfn.h:92
#define DatumGetUInt32(X)
Definition: postgres.h:530

References MemoizeState::binary_mode, MemoizeState::collations, datum_image_hash(), DatumGetUInt32, FormData_pg_attribute, FunctionCall1Coll(), MemoizeState::hashfunctions, i, if(), murmurhash32(), MemoizeState::nkeys, and MemoizeState::probeslot.

◆ prepare_probe_slot()

static void prepare_probe_slot ( MemoizeState mstate,
MemoizeKey key 
)
inlinestatic

Definition at line 281 of file nodeMemoize.c.

283 {
284  TupleTableSlot *pslot = mstate->probeslot;
285  TupleTableSlot *tslot = mstate->tableslot;
286  int numKeys = mstate->nkeys;
287 
288  ExecClearTuple(pslot);
289 
290  if (key == NULL)
291  {
292  /* Set the probeslot's values based on the current parameter values */
293  for (int i = 0; i < numKeys; i++)
294  pslot->tts_values[i] = ExecEvalExpr(mstate->param_exprs[i],
295  mstate->ss.ps.ps_ExprContext,
296  &pslot->tts_isnull[i]);
297  }
298  else
299  {
300  /* Process the key's MinimalTuple and store the values in probeslot */
301  ExecStoreMinimalTuple(key->params, tslot, false);
302  slot_getallattrs(tslot);
303  memcpy(pslot->tts_values, tslot->tts_values, sizeof(Datum) * numKeys);
304  memcpy(pslot->tts_isnull, tslot->tts_isnull, sizeof(bool) * numKeys);
305  }
306 
307  ExecStoreVirtualTuple(pslot);
TupleTableSlot * ExecStoreVirtualTuple(TupleTableSlot *slot)
Definition: execTuples.c:1552
static Datum ExecEvalExpr(ExprState *state, ExprContext *econtext, bool *isNull)
Definition: executor.h:316
uintptr_t Datum
Definition: postgres.h:411
bool * tts_isnull
Definition: tuptable.h:128
Datum * tts_values
Definition: tuptable.h:126

References ExecClearTuple(), ExecEvalExpr(), ExecStoreMinimalTuple(), ExecStoreVirtualTuple(), i, sort-test::key, MemoizeState::nkeys, MemoizeState::param_exprs, MemoizeState::probeslot, ScanState::ps, PlanState::ps_ExprContext, slot_getallattrs(), MemoizeState::ss, MemoizeState::tableslot, TupleTableSlot::tts_isnull, and TupleTableSlot::tts_values.

Referenced by cache_lookup(), cache_reduce_memory(), and cache_store_tuple().

◆ remove_cache_entry()

static void remove_cache_entry ( MemoizeState mstate,
MemoizeEntry entry 
)
static

Definition at line 346 of file nodeMemoize.c.

348 {
349  MemoizeKey *key = entry->key;
350 
351  dlist_delete(&entry->key->lru_node);
352 
353  /* Remove all of the tuples from this entry */
354  entry_purge_tuples(mstate, entry);
355 
356  /*
357  * Update memory accounting. entry_purge_tuples should have already
358  * subtracted the memory used for each cached tuple. Here we just update
359  * the amount used by the entry itself.
360  */
361  mstate->mem_used -= EMPTY_ENTRY_MEMORY_BYTES(entry);
362 
363  /* Remove the entry from the cache */
364  memoize_delete_item(mstate->hashtable, entry);
365 
366  pfree(key->params);
367  pfree(key);
static void dlist_delete(dlist_node *node)
Definition: ilist.h:358

References dlist_delete(), EMPTY_ENTRY_MEMORY_BYTES, entry_purge_tuples(), MemoizeState::hashtable, MemoizeEntry::key, sort-test::key, MemoizeKey::lru_node, MemoizeState::mem_used, and pfree().

Referenced by cache_reduce_memory().