PostgreSQL Source Code  git master
nodeMemoize.c File Reference
#include "postgres.h"
#include "common/hashfn.h"
#include "executor/executor.h"
#include "executor/nodeMemoize.h"
#include "lib/ilist.h"
#include "miscadmin.h"
#include "utils/datum.h"
#include "utils/lsyscache.h"
#include "lib/simplehash.h"
Include dependency graph for nodeMemoize.c:

Go to the source code of this file.

Data Structures

struct  MemoizeTuple
 
struct  MemoizeKey
 
struct  MemoizeEntry
 

Macros

#define MEMO_CACHE_LOOKUP   1 /* Attempt to perform a cache lookup */
 
#define MEMO_CACHE_FETCH_NEXT_TUPLE   2 /* Get another tuple from the cache */
 
#define MEMO_FILLING_CACHE   3 /* Read outer node to fill cache */
 
#define MEMO_CACHE_BYPASS_MODE
 
#define MEMO_END_OF_SCAN   5 /* Ready for rescan */
 
#define EMPTY_ENTRY_MEMORY_BYTES(e)
 
#define CACHE_TUPLE_BYTES(t)
 
#define SH_PREFIX   memoize
 
#define SH_ELEMENT_TYPE   MemoizeEntry
 
#define SH_KEY_TYPE   MemoizeKey *
 
#define SH_SCOPE   static inline
 
#define SH_DECLARE
 
#define SH_PREFIX   memoize
 
#define SH_ELEMENT_TYPE   MemoizeEntry
 
#define SH_KEY_TYPE   MemoizeKey *
 
#define SH_KEY   key
 
#define SH_HASH_KEY(tb, key)   MemoizeHash_hash(tb, key)
 
#define SH_EQUAL(tb, a, b)   MemoizeHash_equal(tb, a, b)
 
#define SH_SCOPE   static inline
 
#define SH_STORE_HASH
 
#define SH_GET_HASH(tb, a)   a->hash
 
#define SH_DEFINE
 

Typedefs

typedef struct MemoizeTuple MemoizeTuple
 
typedef struct MemoizeKey MemoizeKey
 
typedef struct MemoizeEntry MemoizeEntry
 

Functions

static uint32 MemoizeHash_hash (struct memoize_hash *tb, const MemoizeKey *key)
 
static bool MemoizeHash_equal (struct memoize_hash *tb, const MemoizeKey *key1, const MemoizeKey *key2)
 
static void build_hash_table (MemoizeState *mstate, uint32 size)
 
static void prepare_probe_slot (MemoizeState *mstate, MemoizeKey *key)
 
static void entry_purge_tuples (MemoizeState *mstate, MemoizeEntry *entry)
 
static void remove_cache_entry (MemoizeState *mstate, MemoizeEntry *entry)
 
static void cache_purge_all (MemoizeState *mstate)
 
static bool cache_reduce_memory (MemoizeState *mstate, MemoizeKey *specialkey)
 
static MemoizeEntrycache_lookup (MemoizeState *mstate, bool *found)
 
static bool cache_store_tuple (MemoizeState *mstate, TupleTableSlot *slot)
 
static TupleTableSlotExecMemoize (PlanState *pstate)
 
MemoizeStateExecInitMemoize (Memoize *node, EState *estate, int eflags)
 
void ExecEndMemoize (MemoizeState *node)
 
void ExecReScanMemoize (MemoizeState *node)
 
double ExecEstimateCacheEntryOverheadBytes (double ntuples)
 
void ExecMemoizeEstimate (MemoizeState *node, ParallelContext *pcxt)
 
void ExecMemoizeInitializeDSM (MemoizeState *node, ParallelContext *pcxt)
 
void ExecMemoizeInitializeWorker (MemoizeState *node, ParallelWorkerContext *pwcxt)
 
void ExecMemoizeRetrieveInstrumentation (MemoizeState *node)
 

Macro Definition Documentation

◆ CACHE_TUPLE_BYTES

#define CACHE_TUPLE_BYTES (   t)
Value:
(sizeof(MemoizeTuple) + \
(t)->mintuple->t_len)
struct MemoizeTuple MemoizeTuple

Definition at line 89 of file nodeMemoize.c.

◆ EMPTY_ENTRY_MEMORY_BYTES

#define EMPTY_ENTRY_MEMORY_BYTES (   e)
Value:
(sizeof(MemoizeEntry) + \
sizeof(MemoizeKey) + \
(e)->key->params->t_len);
struct MemoizeEntry MemoizeEntry
e
Definition: preproc-init.c:82

Definition at line 86 of file nodeMemoize.c.

◆ MEMO_CACHE_BYPASS_MODE

#define MEMO_CACHE_BYPASS_MODE
Value:
4 /* Bypass mode. Just read from our
* subplan without caching anything */

Definition at line 81 of file nodeMemoize.c.

◆ MEMO_CACHE_FETCH_NEXT_TUPLE

#define MEMO_CACHE_FETCH_NEXT_TUPLE   2 /* Get another tuple from the cache */

Definition at line 79 of file nodeMemoize.c.

◆ MEMO_CACHE_LOOKUP

#define MEMO_CACHE_LOOKUP   1 /* Attempt to perform a cache lookup */

Definition at line 78 of file nodeMemoize.c.

◆ MEMO_END_OF_SCAN

#define MEMO_END_OF_SCAN   5 /* Ready for rescan */

Definition at line 82 of file nodeMemoize.c.

◆ MEMO_FILLING_CACHE

#define MEMO_FILLING_CACHE   3 /* Read outer node to fill cache */

Definition at line 80 of file nodeMemoize.c.

◆ SH_DECLARE

#define SH_DECLARE

Definition at line 129 of file nodeMemoize.c.

◆ SH_DEFINE

#define SH_DEFINE

Definition at line 147 of file nodeMemoize.c.

◆ SH_ELEMENT_TYPE [1/2]

#define SH_ELEMENT_TYPE   MemoizeEntry

Definition at line 139 of file nodeMemoize.c.

◆ SH_ELEMENT_TYPE [2/2]

#define SH_ELEMENT_TYPE   MemoizeEntry

Definition at line 139 of file nodeMemoize.c.

◆ SH_EQUAL

#define SH_EQUAL (   tb,
  a,
  b 
)    MemoizeHash_equal(tb, a, b)

Definition at line 143 of file nodeMemoize.c.

◆ SH_GET_HASH

#define SH_GET_HASH (   tb,
  a 
)    a->hash

Definition at line 146 of file nodeMemoize.c.

◆ SH_HASH_KEY

#define SH_HASH_KEY (   tb,
  key 
)    MemoizeHash_hash(tb, key)

Definition at line 142 of file nodeMemoize.c.

◆ SH_KEY

#define SH_KEY   key

Definition at line 141 of file nodeMemoize.c.

◆ SH_KEY_TYPE [1/2]

#define SH_KEY_TYPE   MemoizeKey *

Definition at line 140 of file nodeMemoize.c.

◆ SH_KEY_TYPE [2/2]

#define SH_KEY_TYPE   MemoizeKey *

Definition at line 140 of file nodeMemoize.c.

◆ SH_PREFIX [1/2]

#define SH_PREFIX   memoize

Definition at line 138 of file nodeMemoize.c.

◆ SH_PREFIX [2/2]

#define SH_PREFIX   memoize

Definition at line 138 of file nodeMemoize.c.

◆ SH_SCOPE [1/2]

#define SH_SCOPE   static inline

Definition at line 144 of file nodeMemoize.c.

◆ SH_SCOPE [2/2]

#define SH_SCOPE   static inline

Definition at line 144 of file nodeMemoize.c.

◆ SH_STORE_HASH

#define SH_STORE_HASH

Definition at line 145 of file nodeMemoize.c.

Typedef Documentation

◆ MemoizeEntry

typedef struct MemoizeEntry MemoizeEntry

◆ MemoizeKey

typedef struct MemoizeKey MemoizeKey

◆ MemoizeTuple

typedef struct MemoizeTuple MemoizeTuple

Function Documentation

◆ build_hash_table()

static void build_hash_table ( MemoizeState mstate,
uint32  size 
)
static

Definition at line 264 of file nodeMemoize.c.

266 {
267  /* Make a guess at a good size when we're not given a valid size. */
268  if (size == 0)
269  size = 1024;
270 
271  /* memoize_create will convert the size to a power of 2 */
272  mstate->hashtable = memoize_create(mstate->tableContext, size, mstate);
MemoryContext tableContext
Definition: execnodes.h:2200
struct memoize_hash * hashtable
Definition: execnodes.h:2189

References MemoizeState::hashtable, and MemoizeState::tableContext.

Referenced by cache_purge_all(), and ExecInitMemoize().

◆ cache_lookup()

static MemoizeEntry* cache_lookup ( MemoizeState mstate,
bool found 
)
static

Definition at line 505 of file nodeMemoize.c.

507 {
508  MemoizeKey *key;
509  MemoizeEntry *entry;
510  MemoryContext oldcontext;
511 
512  /* prepare the probe slot with the current scan parameters */
513  prepare_probe_slot(mstate, NULL);
514 
515  /*
516  * Add the new entry to the cache. No need to pass a valid key since the
517  * hash function uses mstate's probeslot, which we populated above.
518  */
519  entry = memoize_insert(mstate->hashtable, NULL, found);
520 
521  if (*found)
522  {
523  /*
524  * Move existing entry to the tail of the LRU list to mark it as the
525  * most recently used item.
526  */
527  dlist_move_tail(&mstate->lru_list, &entry->key->lru_node);
528 
529  return entry;
530  }
531 
532  oldcontext = MemoryContextSwitchTo(mstate->tableContext);
533 
534  /* Allocate a new key */
535  entry->key = key = (MemoizeKey *) palloc(sizeof(MemoizeKey));
536  key->params = ExecCopySlotMinimalTuple(mstate->probeslot);
537 
538  /* Update the total cache memory utilization */
539  mstate->mem_used += EMPTY_ENTRY_MEMORY_BYTES(entry);
540 
541  /* Initialize this entry */
542  entry->complete = false;
543  entry->tuplehead = NULL;
544 
545  /*
546  * Since this is the most recently used entry, push this entry onto the
547  * end of the LRU list.
548  */
549  dlist_push_tail(&mstate->lru_list, &entry->key->lru_node);
550 
551  mstate->last_tuple = NULL;
552 
553  MemoryContextSwitchTo(oldcontext);
554 
555  /*
556  * If we've gone over our memory budget, then we'll free up some space in
557  * the cache.
558  */
559  if (mstate->mem_used > mstate->mem_limit)
560  {
561  /*
562  * Try to free up some memory. It's highly unlikely that we'll fail
563  * to do so here since the entry we've just added is yet to contain
564  * any tuples and we're able to remove any other entry to reduce the
565  * memory consumption.
566  */
567  if (unlikely(!cache_reduce_memory(mstate, key)))
568  return NULL;
569 
570  /*
571  * The process of removing entries from the cache may have caused the
572  * code in simplehash.h to shuffle elements to earlier buckets in the
573  * hash table. If it has, we'll need to find the entry again by
574  * performing a lookup. Fortunately, we can detect if this has
575  * happened by seeing if the entry is still in use and that the key
576  * pointer matches our expected key.
577  */
578  if (entry->status != memoize_SH_IN_USE || entry->key != key)
579  {
580  /*
581  * We need to repopulate the probeslot as lookups performed during
582  * the cache evictions above will have stored some other key.
583  */
584  prepare_probe_slot(mstate, key);
585 
586  /* Re-find the newly added entry */
587  entry = memoize_lookup(mstate->hashtable, NULL);
588  Assert(entry != NULL);
589  }
590  }
591 
592  return entry;
#define unlikely(x)
Definition: c.h:295
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:364
static void dlist_move_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:486
Assert(fmt[strlen(fmt) - 1] !='\n')
void * palloc(Size size)
Definition: mcxt.c:1226
static bool cache_reduce_memory(MemoizeState *mstate, MemoizeKey *specialkey)
Definition: nodeMemoize.c:417
static void prepare_probe_slot(MemoizeState *mstate, MemoizeKey *key)
Definition: nodeMemoize.c:281
#define EMPTY_ENTRY_MEMORY_BYTES(e)
Definition: nodeMemoize.c:86
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:138
MemoizeKey * key
Definition: nodeMemoize.c:116
MemoizeTuple * tuplehead
Definition: nodeMemoize.c:117
dlist_node lru_node
Definition: nodeMemoize.c:107
uint64 mem_used
Definition: execnodes.h:2198
TupleTableSlot * probeslot
Definition: execnodes.h:2192
dlist_head lru_list
Definition: execnodes.h:2201
uint64 mem_limit
Definition: execnodes.h:2199
struct MemoizeTuple * last_tuple
Definition: execnodes.h:2202
static MinimalTuple ExecCopySlotMinimalTuple(TupleTableSlot *slot)
Definition: tuptable.h:471

References Assert(), cache_reduce_memory(), MemoizeEntry::complete, dlist_move_tail(), dlist_push_tail(), EMPTY_ENTRY_MEMORY_BYTES, ExecCopySlotMinimalTuple(), MemoizeState::hashtable, MemoizeEntry::key, sort-test::key, MemoizeState::last_tuple, MemoizeState::lru_list, MemoizeKey::lru_node, MemoizeState::mem_limit, MemoizeState::mem_used, MemoryContextSwitchTo(), palloc(), prepare_probe_slot(), MemoizeState::probeslot, MemoizeEntry::status, MemoizeState::tableContext, MemoizeEntry::tuplehead, and unlikely.

Referenced by ExecMemoize().

◆ cache_purge_all()

static void cache_purge_all ( MemoizeState mstate)
static

Definition at line 381 of file nodeMemoize.c.

383 {
384  uint64 evictions = mstate->hashtable->members;
385  PlanState *pstate = (PlanState *) mstate;
386 
387  /*
388  * Likely the most efficient way to remove all items is to just reset the
389  * memory context for the cache and then rebuild a fresh hash table. This
390  * saves having to remove each item one by one and pfree each cached tuple
391  */
393 
394  /* Make the hash table the same size as the original size */
395  build_hash_table(mstate, ((Memoize *) pstate->plan)->est_entries);
396 
397  /* reset the LRU list */
398  dlist_init(&mstate->lru_list);
399  mstate->last_tuple = NULL;
400  mstate->entry = NULL;
401 
402  mstate->mem_used = 0;
403 
404  /* XXX should we add something new to track these purges? */
405  mstate->stats.cache_evictions += evictions; /* Update Stats */
static void dlist_init(dlist_head *head)
Definition: ilist.h:314
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:330
static void build_hash_table(MemoizeState *mstate, uint32 size)
Definition: nodeMemoize.c:264
struct MemoizeEntry * entry
Definition: execnodes.h:2206
MemoizeInstrumentation stats
Definition: execnodes.h:2212
Plan * plan
Definition: execnodes.h:1037

References build_hash_table(), MemoizeInstrumentation::cache_evictions, dlist_init(), MemoizeState::entry, MemoizeState::hashtable, MemoizeState::last_tuple, MemoizeState::lru_list, MemoizeState::mem_used, MemoryContextReset(), PlanState::plan, MemoizeState::stats, and MemoizeState::tableContext.

Referenced by ExecReScanMemoize().

◆ cache_reduce_memory()

static bool cache_reduce_memory ( MemoizeState mstate,
MemoizeKey specialkey 
)
static

Definition at line 417 of file nodeMemoize.c.

419 {
420  bool specialkey_intact = true; /* for now */
421  dlist_mutable_iter iter;
422  uint64 evictions = 0;
423 
424  /* Update peak memory usage */
425  if (mstate->mem_used > mstate->stats.mem_peak)
426  mstate->stats.mem_peak = mstate->mem_used;
427 
428  /* We expect only to be called when we've gone over budget on memory */
429  Assert(mstate->mem_used > mstate->mem_limit);
430 
431  /* Start the eviction process starting at the head of the LRU list. */
432  dlist_foreach_modify(iter, &mstate->lru_list)
433  {
434  MemoizeKey *key = dlist_container(MemoizeKey, lru_node, iter.cur);
435  MemoizeEntry *entry;
436 
437  /*
438  * Populate the hash probe slot in preparation for looking up this LRU
439  * entry.
440  */
441  prepare_probe_slot(mstate, key);
442 
443  /*
444  * Ideally the LRU list pointers would be stored in the entry itself
445  * rather than in the key. Unfortunately, we can't do that as the
446  * simplehash.h code may resize the table and allocate new memory for
447  * entries which would result in those pointers pointing to the old
448  * buckets. However, it's fine to use the key to store this as that's
449  * only referenced by a pointer in the entry, which of course follows
450  * the entry whenever the hash table is resized. Since we only have a
451  * pointer to the key here, we must perform a hash table lookup to
452  * find the entry that the key belongs to.
453  */
454  entry = memoize_lookup(mstate->hashtable, NULL);
455 
456  /*
457  * Sanity check that we found the entry belonging to the LRU list
458  * item. A misbehaving hash or equality function could cause the
459  * entry not to be found or the wrong entry to be found.
460  */
461  if (unlikely(entry == NULL || entry->key != key))
462  elog(ERROR, "could not find memoization table entry");
463 
464  /*
465  * If we're being called to free memory while the cache is being
466  * populated with new tuples, then we'd better take some care as we
467  * could end up freeing the entry which 'specialkey' belongs to.
468  * Generally callers will pass 'specialkey' as the key for the cache
469  * entry which is currently being populated, so we must set
470  * 'specialkey_intact' to false to inform the caller the specialkey
471  * entry has been removed.
472  */
473  if (key == specialkey)
474  specialkey_intact = false;
475 
476  /*
477  * Finally remove the entry. This will remove from the LRU list too.
478  */
479  remove_cache_entry(mstate, entry);
480 
481  evictions++;
482 
483  /* Exit if we've freed enough memory */
484  if (mstate->mem_used <= mstate->mem_limit)
485  break;
486  }
487 
488  mstate->stats.cache_evictions += evictions; /* Update Stats */
489 
490  return specialkey_intact;
#define ERROR
Definition: elog.h:39
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:640
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
static void remove_cache_entry(MemoizeState *mstate, MemoizeEntry *entry)
Definition: nodeMemoize.c:353
dlist_node * cur
Definition: ilist.h:200

References Assert(), MemoizeInstrumentation::cache_evictions, dlist_mutable_iter::cur, dlist_container, dlist_foreach_modify, elog(), ERROR, MemoizeState::hashtable, MemoizeEntry::key, sort-test::key, MemoizeState::lru_list, MemoizeState::mem_limit, MemoizeInstrumentation::mem_peak, MemoizeState::mem_used, prepare_probe_slot(), remove_cache_entry(), MemoizeState::stats, and unlikely.

Referenced by cache_lookup(), and cache_store_tuple().

◆ cache_store_tuple()

static bool cache_store_tuple ( MemoizeState mstate,
TupleTableSlot slot 
)
static

Definition at line 602 of file nodeMemoize.c.

604 {
605  MemoizeTuple *tuple;
606  MemoizeEntry *entry = mstate->entry;
607  MemoryContext oldcontext;
608 
609  Assert(slot != NULL);
610  Assert(entry != NULL);
611 
612  oldcontext = MemoryContextSwitchTo(mstate->tableContext);
613 
614  tuple = (MemoizeTuple *) palloc(sizeof(MemoizeTuple));
615  tuple->mintuple = ExecCopySlotMinimalTuple(slot);
616  tuple->next = NULL;
617 
618  /* Account for the memory we just consumed */
619  mstate->mem_used += CACHE_TUPLE_BYTES(tuple);
620 
621  if (entry->tuplehead == NULL)
622  {
623  /*
624  * This is the first tuple for this entry, so just point the list head
625  * to it.
626  */
627  entry->tuplehead = tuple;
628  }
629  else
630  {
631  /* push this tuple onto the tail of the list */
632  mstate->last_tuple->next = tuple;
633  }
634 
635  mstate->last_tuple = tuple;
636  MemoryContextSwitchTo(oldcontext);
637 
638  /*
639  * If we've gone over our memory budget then free up some space in the
640  * cache.
641  */
642  if (mstate->mem_used > mstate->mem_limit)
643  {
644  MemoizeKey *key = entry->key;
645 
646  if (!cache_reduce_memory(mstate, key))
647  return false;
648 
649  /*
650  * The process of removing entries from the cache may have caused the
651  * code in simplehash.h to shuffle elements to earlier buckets in the
652  * hash table. If it has, we'll need to find the entry again by
653  * performing a lookup. Fortunately, we can detect if this has
654  * happened by seeing if the entry is still in use and that the key
655  * pointer matches our expected key.
656  */
657  if (entry->status != memoize_SH_IN_USE || entry->key != key)
658  {
659  /*
660  * We need to repopulate the probeslot as lookups performed during
661  * the cache evictions above will have stored some other key.
662  */
663  prepare_probe_slot(mstate, key);
664 
665  /* Re-find the entry */
666  mstate->entry = entry = memoize_lookup(mstate->hashtable, NULL);
667  Assert(entry != NULL);
668  }
669  }
670 
671  return true;
#define CACHE_TUPLE_BYTES(t)
Definition: nodeMemoize.c:89
MinimalTuple mintuple
Definition: nodeMemoize.c:95
struct MemoizeTuple * next
Definition: nodeMemoize.c:96

References Assert(), cache_reduce_memory(), CACHE_TUPLE_BYTES, MemoizeState::entry, ExecCopySlotMinimalTuple(), MemoizeState::hashtable, MemoizeEntry::key, sort-test::key, MemoizeState::last_tuple, MemoizeState::mem_limit, MemoizeState::mem_used, MemoryContextSwitchTo(), MemoizeTuple::mintuple, MemoizeTuple::next, palloc(), prepare_probe_slot(), MemoizeEntry::status, MemoizeState::tableContext, and MemoizeEntry::tuplehead.

Referenced by ExecMemoize().

◆ entry_purge_tuples()

static void entry_purge_tuples ( MemoizeState mstate,
MemoizeEntry entry 
)
inlinestatic

Definition at line 323 of file nodeMemoize.c.

325 {
326  MemoizeTuple *tuple = entry->tuplehead;
327  uint64 freed_mem = 0;
328 
329  while (tuple != NULL)
330  {
331  MemoizeTuple *next = tuple->next;
332 
333  freed_mem += CACHE_TUPLE_BYTES(tuple);
334 
335  /* Free memory used for this tuple */
336  pfree(tuple->mintuple);
337  pfree(tuple);
338 
339  tuple = next;
340  }
341 
342  entry->complete = false;
343  entry->tuplehead = NULL;
344 
345  /* Update the memory accounting */
346  mstate->mem_used -= freed_mem;
static int32 next
Definition: blutils.c:219
void pfree(void *pointer)
Definition: mcxt.c:1456

References CACHE_TUPLE_BYTES, MemoizeEntry::complete, MemoizeState::mem_used, MemoizeTuple::mintuple, next, MemoizeTuple::next, pfree(), and MemoizeEntry::tuplehead.

Referenced by ExecMemoize(), and remove_cache_entry().

◆ ExecEndMemoize()

void ExecEndMemoize ( MemoizeState node)

Definition at line 1041 of file nodeMemoize.c.

1043 {
1044 #ifdef USE_ASSERT_CHECKING
1045  /* Validate the memory accounting code is correct in assert builds. */
1046  {
1047  int count;
1048  uint64 mem = 0;
1049  memoize_iterator i;
1050  MemoizeEntry *entry;
1051 
1052  memoize_start_iterate(node->hashtable, &i);
1053 
1054  count = 0;
1055  while ((entry = memoize_iterate(node->hashtable, &i)) != NULL)
1056  {
1057  MemoizeTuple *tuple = entry->tuplehead;
1058 
1059  mem += EMPTY_ENTRY_MEMORY_BYTES(entry);
1060  while (tuple != NULL)
1061  {
1062  mem += CACHE_TUPLE_BYTES(tuple);
1063  tuple = tuple->next;
1064  }
1065  count++;
1066  }
1067 
1068  Assert(count == node->hashtable->members);
1069  Assert(mem == node->mem_used);
1070  }
1071 #endif
1072 
1073  /*
1074  * When ending a parallel worker, copy the statistics gathered by the
1075  * worker back into shared memory so that it can be picked up by the main
1076  * process to report in EXPLAIN ANALYZE.
1077  */
1078  if (node->shared_info != NULL && IsParallelWorker())
1079  {
1081 
1082  /* Make mem_peak available for EXPLAIN */
1083  if (node->stats.mem_peak == 0)
1084  node->stats.mem_peak = node->mem_used;
1085 
1086  Assert(ParallelWorkerNumber <= node->shared_info->num_workers);
1088  memcpy(si, &node->stats, sizeof(MemoizeInstrumentation));
1089  }
1090 
1091  /* Remove the cache context */
1093 
1095  /* must drop pointer to cache result tuple */
1097 
1098  /*
1099  * free exprcontext
1100  */
1101  ExecFreeExprContext(&node->ss.ps);
1102 
1103  /*
1104  * shut down the subplan
1105  */
1106  ExecEndNode(outerPlanState(node));
int ParallelWorkerNumber
Definition: parallel.c:113
void ExecEndNode(PlanState *node)
Definition: execProcnode.c:557
void ExecFreeExprContext(PlanState *planstate)
Definition: execUtils.c:658
#define outerPlanState(node)
Definition: execnodes.h:1133
#define IsParallelWorker()
Definition: parallel.h:61
int i
Definition: isn.c:73
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:403
SharedMemoizeInfo * shared_info
Definition: execnodes.h:2213
ScanState ss
Definition: execnodes.h:2186
TupleTableSlot * ps_ResultTupleSlot
Definition: execnodes.h:1075
TupleTableSlot * ss_ScanTupleSlot
Definition: execnodes.h:1477
PlanState ps
Definition: execnodes.h:1474
MemoizeInstrumentation sinstrument[FLEXIBLE_ARRAY_MEMBER]
Definition: execnodes.h:2174
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:433

References Assert(), CACHE_TUPLE_BYTES, EMPTY_ENTRY_MEMORY_BYTES, ExecClearTuple(), ExecEndNode(), ExecFreeExprContext(), MemoizeState::hashtable, i, IsParallelWorker, MemoizeInstrumentation::mem_peak, MemoizeState::mem_used, MemoryContextDelete(), MemoizeTuple::next, outerPlanState, ParallelWorkerNumber, ScanState::ps, PlanState::ps_ResultTupleSlot, MemoizeState::shared_info, SharedMemoizeInfo::sinstrument, MemoizeState::ss, ScanState::ss_ScanTupleSlot, MemoizeState::stats, MemoizeState::tableContext, and MemoizeEntry::tuplehead.

Referenced by ExecEndNode().

◆ ExecEstimateCacheEntryOverheadBytes()

double ExecEstimateCacheEntryOverheadBytes ( double  ntuples)

Definition at line 1141 of file nodeMemoize.c.

1143 {
1144  return sizeof(MemoizeEntry) + sizeof(MemoizeKey) + sizeof(MemoizeTuple) *
1145  ntuples;

Referenced by cost_memoize_rescan().

◆ ExecInitMemoize()

MemoizeState* ExecInitMemoize ( Memoize node,
EState estate,
int  eflags 
)

Definition at line 916 of file nodeMemoize.c.

918 {
920  Plan *outerNode;
921  int i;
922  int nkeys;
923  Oid *eqfuncoids;
924 
925  /* check for unsupported flags */
926  Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
927 
928  mstate->ss.ps.plan = (Plan *) node;
929  mstate->ss.ps.state = estate;
930  mstate->ss.ps.ExecProcNode = ExecMemoize;
931 
932  /*
933  * Miscellaneous initialization
934  *
935  * create expression context for node
936  */
937  ExecAssignExprContext(estate, &mstate->ss.ps);
938 
939  outerNode = outerPlan(node);
940  outerPlanState(mstate) = ExecInitNode(outerNode, estate, eflags);
941 
942  /*
943  * Initialize return slot and type. No need to initialize projection info
944  * because this node doesn't do projections.
945  */
947  mstate->ss.ps.ps_ProjInfo = NULL;
948 
949  /*
950  * Initialize scan slot and type.
951  */
953 
954  /*
955  * Set the state machine to lookup the cache. We won't find anything
956  * until we cache something, but this saves a special case to create the
957  * first entry.
958  */
959  mstate->mstatus = MEMO_CACHE_LOOKUP;
960 
961  mstate->nkeys = nkeys = node->numKeys;
966  &TTSOpsVirtual);
967 
968  mstate->param_exprs = (ExprState **) palloc(nkeys * sizeof(ExprState *));
969  mstate->collations = node->collations; /* Just point directly to the plan
970  * data */
971  mstate->hashfunctions = (FmgrInfo *) palloc(nkeys * sizeof(FmgrInfo));
972 
973  eqfuncoids = palloc(nkeys * sizeof(Oid));
974 
975  for (i = 0; i < nkeys; i++)
976  {
977  Oid hashop = node->hashOperators[i];
978  Oid left_hashfn;
979  Oid right_hashfn;
980  Expr *param_expr = (Expr *) list_nth(node->param_exprs, i);
981 
982  if (!get_op_hash_functions(hashop, &left_hashfn, &right_hashfn))
983  elog(ERROR, "could not find hash function for hash operator %u",
984  hashop);
985 
986  fmgr_info(left_hashfn, &mstate->hashfunctions[i]);
987 
988  mstate->param_exprs[i] = ExecInitExpr(param_expr, (PlanState *) mstate);
989  eqfuncoids[i] = get_opcode(hashop);
990  }
991 
994  &TTSOpsVirtual,
995  eqfuncoids,
996  node->collations,
997  node->param_exprs,
998  (PlanState *) mstate);
999 
1000  pfree(eqfuncoids);
1001  mstate->mem_used = 0;
1002 
1003  /* Limit the total memory consumed by the cache to this */
1004  mstate->mem_limit = get_hash_memory_limit();
1005 
1006  /* A memory context dedicated for the cache */
1008  "MemoizeHashTable",
1010 
1011  dlist_init(&mstate->lru_list);
1012  mstate->last_tuple = NULL;
1013  mstate->entry = NULL;
1014 
1015  /*
1016  * Mark if we can assume the cache entry is completed after we get the
1017  * first record for it. Some callers might not call us again after
1018  * getting the first match. e.g. A join operator performing a unique join
1019  * is able to skip to the next outer tuple after getting the first
1020  * matching inner tuple. In this case, the cache entry is complete after
1021  * getting the first tuple. This allows us to mark it as so.
1022  */
1023  mstate->singlerow = node->singlerow;
1024  mstate->keyparamids = node->keyparamids;
1025 
1026  /*
1027  * Record if the cache keys should be compared bit by bit, or logically
1028  * using the type's hash equality operator
1029  */
1030  mstate->binary_mode = node->binary_mode;
1031 
1032  /* Zero the statistics counters */
1033  memset(&mstate->stats, 0, sizeof(MemoizeInstrumentation));
1034 
1035  /* Allocate and set up the actual cache */
1036  build_hash_table(mstate, node->est_entries);
1037 
1038  return mstate;
ExprState * ExecInitExpr(Expr *node, PlanState *parent)
Definition: execExpr.c:127
ExprState * ExecBuildParamSetEqual(TupleDesc desc, const TupleTableSlotOps *lops, const TupleTableSlotOps *rops, const Oid *eqfunctions, const Oid *collations, const List *param_exprs, PlanState *parent)
Definition: execExpr.c:4047
PlanState * ExecInitNode(Plan *node, EState *estate, int eflags)
Definition: execProcnode.c:142
const TupleTableSlotOps TTSOpsVirtual
Definition: execTuples.c:83
void ExecInitResultTupleSlotTL(PlanState *planstate, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1800
const TupleTableSlotOps TTSOpsMinimalTuple
Definition: execTuples.c:85
TupleDesc ExecTypeFromExprList(List *exprList)
Definition: execTuples.c:1998
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1239
void ExecCreateScanSlotFromOuterPlan(EState *estate, ScanState *scanstate, const TupleTableSlotOps *tts_ops)
Definition: execUtils.c:690
void ExecAssignExprContext(EState *estate, PlanState *planstate)
Definition: execUtils.c:488
#define EXEC_FLAG_BACKWARD
Definition: executor.h:68
#define EXEC_FLAG_MARK
Definition: executor.h:69
void fmgr_info(Oid functionId, FmgrInfo *finfo)
Definition: fmgr.c:127
RegProcedure get_opcode(Oid opno)
Definition: lsyscache.c:1267
bool get_op_hash_functions(Oid opno, RegProcedure *lhs_procno, RegProcedure *rhs_procno)
Definition: lsyscache.c:509
MemoryContext CurrentMemoryContext
Definition: mcxt.c:135
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:153
size_t get_hash_memory_limit(void)
Definition: nodeHash.c:3592
#define MEMO_CACHE_LOOKUP
Definition: nodeMemoize.c:78
static TupleTableSlot * ExecMemoize(PlanState *pstate)
Definition: nodeMemoize.c:674
#define makeNode(_type_)
Definition: nodes.h:176
static void * list_nth(const List *list, int n)
Definition: pg_list.h:299
#define outerPlan(node)
Definition: plannodes.h:183
unsigned int Oid
Definition: postgres_ext.h:31
Definition: fmgr.h:57
TupleDesc hashkeydesc
Definition: execnodes.h:2190
FmgrInfo * hashfunctions
Definition: execnodes.h:2196
Oid * collations
Definition: execnodes.h:2197
ExprState * cache_eq_expr
Definition: execnodes.h:2193
bool singlerow
Definition: execnodes.h:2208
bool binary_mode
Definition: execnodes.h:2210
Bitmapset * keyparamids
Definition: execnodes.h:2214
ExprState ** param_exprs
Definition: execnodes.h:2194
TupleTableSlot * tableslot
Definition: execnodes.h:2191
bool singlerow
Definition: plannodes.h:907
Bitmapset * keyparamids
Definition: plannodes.h:922
bool binary_mode
Definition: plannodes.h:913
int numKeys
Definition: plannodes.h:892
List * param_exprs
Definition: plannodes.h:901
uint32 est_entries
Definition: plannodes.h:919
EState * state
Definition: execnodes.h:1039
ProjectionInfo * ps_ProjInfo
Definition: execnodes.h:1077
ExecProcNodeMtd ExecProcNode
Definition: execnodes.h:1043

References ALLOCSET_DEFAULT_SIZES, AllocSetContextCreate, Assert(), MemoizeState::binary_mode, Memoize::binary_mode, build_hash_table(), MemoizeState::cache_eq_expr, MemoizeState::collations, CurrentMemoryContext, dlist_init(), elog(), MemoizeState::entry, ERROR, Memoize::est_entries, EXEC_FLAG_BACKWARD, EXEC_FLAG_MARK, ExecAssignExprContext(), ExecBuildParamSetEqual(), ExecCreateScanSlotFromOuterPlan(), ExecInitExpr(), ExecInitNode(), ExecInitResultTupleSlotTL(), ExecMemoize(), PlanState::ExecProcNode, ExecTypeFromExprList(), fmgr_info(), get_hash_memory_limit(), get_op_hash_functions(), get_opcode(), MemoizeState::hashfunctions, MemoizeState::hashkeydesc, i, MemoizeState::keyparamids, Memoize::keyparamids, MemoizeState::last_tuple, list_nth(), MemoizeState::lru_list, makeNode, MakeSingleTupleTableSlot(), MemoizeState::mem_limit, MemoizeState::mem_used, MEMO_CACHE_LOOKUP, MemoizeState::mstatus, MemoizeState::nkeys, Memoize::numKeys, outerPlan, outerPlanState, palloc(), MemoizeState::param_exprs, Memoize::param_exprs, pfree(), PlanState::plan, MemoizeState::probeslot, ScanState::ps, PlanState::ps_ProjInfo, MemoizeState::singlerow, Memoize::singlerow, MemoizeState::ss, PlanState::state, MemoizeState::stats, MemoizeState::tableContext, MemoizeState::tableslot, TTSOpsMinimalTuple, and TTSOpsVirtual.

Referenced by ExecInitNode().

◆ ExecMemoize()

static TupleTableSlot* ExecMemoize ( PlanState pstate)
static

Definition at line 674 of file nodeMemoize.c.

676 {
677  MemoizeState *node = castNode(MemoizeState, pstate);
678  PlanState *outerNode;
679  TupleTableSlot *slot;
680 
681  switch (node->mstatus)
682  {
683  case MEMO_CACHE_LOOKUP:
684  {
685  MemoizeEntry *entry;
686  TupleTableSlot *outerslot;
687  bool found;
688 
689  Assert(node->entry == NULL);
690 
691  /*
692  * We're only ever in this state for the first call of the
693  * scan. Here we have a look to see if we've already seen the
694  * current parameters before and if we have already cached a
695  * complete set of records that the outer plan will return for
696  * these parameters.
697  *
698  * When we find a valid cache entry, we'll return the first
699  * tuple from it. If not found, we'll create a cache entry and
700  * then try to fetch a tuple from the outer scan. If we find
701  * one there, we'll try to cache it.
702  */
703 
704  /* see if we've got anything cached for the current parameters */
705  entry = cache_lookup(node, &found);
706 
707  if (found && entry->complete)
708  {
709  node->stats.cache_hits += 1; /* stats update */
710 
711  /*
712  * Set last_tuple and entry so that the state
713  * MEMO_CACHE_FETCH_NEXT_TUPLE can easily find the next
714  * tuple for these parameters.
715  */
716  node->last_tuple = entry->tuplehead;
717  node->entry = entry;
718 
719  /* Fetch the first cached tuple, if there is one */
720  if (entry->tuplehead)
721  {
723 
724  slot = node->ss.ps.ps_ResultTupleSlot;
726  slot, false);
727 
728  return slot;
729  }
730 
731  /* The cache entry is void of any tuples. */
732  node->mstatus = MEMO_END_OF_SCAN;
733  return NULL;
734  }
735 
736  /* Handle cache miss */
737  node->stats.cache_misses += 1; /* stats update */
738 
739  if (found)
740  {
741  /*
742  * A cache entry was found, but the scan for that entry
743  * did not run to completion. We'll just remove all
744  * tuples and start again. It might be tempting to
745  * continue where we left off, but there's no guarantee
746  * the outer node will produce the tuples in the same
747  * order as it did last time.
748  */
749  entry_purge_tuples(node, entry);
750  }
751 
752  /* Scan the outer node for a tuple to cache */
753  outerNode = outerPlanState(node);
754  outerslot = ExecProcNode(outerNode);
755  if (TupIsNull(outerslot))
756  {
757  /*
758  * cache_lookup may have returned NULL due to failure to
759  * free enough cache space, so ensure we don't do anything
760  * here that assumes it worked. There's no need to go into
761  * bypass mode here as we're setting mstatus to end of
762  * scan.
763  */
764  if (likely(entry))
765  entry->complete = true;
766 
767  node->mstatus = MEMO_END_OF_SCAN;
768  return NULL;
769  }
770 
771  node->entry = entry;
772 
773  /*
774  * If we failed to create the entry or failed to store the
775  * tuple in the entry, then go into bypass mode.
776  */
777  if (unlikely(entry == NULL ||
778  !cache_store_tuple(node, outerslot)))
779  {
780  node->stats.cache_overflows += 1; /* stats update */
781 
783 
784  /*
785  * No need to clear out last_tuple as we'll stay in bypass
786  * mode until the end of the scan.
787  */
788  }
789  else
790  {
791  /*
792  * If we only expect a single row from this scan then we
793  * can mark that we're not expecting more. This allows
794  * cache lookups to work even when the scan has not been
795  * executed to completion.
796  */
797  entry->complete = node->singlerow;
798  node->mstatus = MEMO_FILLING_CACHE;
799  }
800 
801  slot = node->ss.ps.ps_ResultTupleSlot;
802  ExecCopySlot(slot, outerslot);
803  return slot;
804  }
805 
807  {
808  /* We shouldn't be in this state if these are not set */
809  Assert(node->entry != NULL);
810  Assert(node->last_tuple != NULL);
811 
812  /* Skip to the next tuple to output */
813  node->last_tuple = node->last_tuple->next;
814 
815  /* No more tuples in the cache */
816  if (node->last_tuple == NULL)
817  {
818  node->mstatus = MEMO_END_OF_SCAN;
819  return NULL;
820  }
821 
822  slot = node->ss.ps.ps_ResultTupleSlot;
824  false);
825 
826  return slot;
827  }
828 
829  case MEMO_FILLING_CACHE:
830  {
831  TupleTableSlot *outerslot;
832  MemoizeEntry *entry = node->entry;
833 
834  /* entry should already have been set by MEMO_CACHE_LOOKUP */
835  Assert(entry != NULL);
836 
837  /*
838  * When in the MEMO_FILLING_CACHE state, we've just had a
839  * cache miss and are populating the cache with the current
840  * scan tuples.
841  */
842  outerNode = outerPlanState(node);
843  outerslot = ExecProcNode(outerNode);
844  if (TupIsNull(outerslot))
845  {
846  /* No more tuples. Mark it as complete */
847  entry->complete = true;
848  node->mstatus = MEMO_END_OF_SCAN;
849  return NULL;
850  }
851 
852  /*
853  * Validate if the planner properly set the singlerow flag. It
854  * should only set that if each cache entry can, at most,
855  * return 1 row.
856  */
857  if (unlikely(entry->complete))
858  elog(ERROR, "cache entry already complete");
859 
860  /* Record the tuple in the current cache entry */
861  if (unlikely(!cache_store_tuple(node, outerslot)))
862  {
863  /* Couldn't store it? Handle overflow */
864  node->stats.cache_overflows += 1; /* stats update */
865 
867 
868  /*
869  * No need to clear out entry or last_tuple as we'll stay
870  * in bypass mode until the end of the scan.
871  */
872  }
873 
874  slot = node->ss.ps.ps_ResultTupleSlot;
875  ExecCopySlot(slot, outerslot);
876  return slot;
877  }
878 
880  {
881  TupleTableSlot *outerslot;
882 
883  /*
884  * When in bypass mode we just continue to read tuples without
885  * caching. We need to wait until the next rescan before we
886  * can come out of this mode.
887  */
888  outerNode = outerPlanState(node);
889  outerslot = ExecProcNode(outerNode);
890  if (TupIsNull(outerslot))
891  {
892  node->mstatus = MEMO_END_OF_SCAN;
893  return NULL;
894  }
895 
896  slot = node->ss.ps.ps_ResultTupleSlot;
897  ExecCopySlot(slot, outerslot);
898  return slot;
899  }
900 
901  case MEMO_END_OF_SCAN:
902 
903  /*
904  * We've already returned NULL for this scan, but just in case
905  * something calls us again by mistake.
906  */
907  return NULL;
908 
909  default:
910  elog(ERROR, "unrecognized memoize state: %d",
911  (int) node->mstatus);
912  return NULL;
913  } /* switch */
#define likely(x)
Definition: c.h:294
TupleTableSlot * ExecStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot, bool shouldFree)
Definition: execTuples.c:1447
static TupleTableSlot * ExecProcNode(PlanState *node)
Definition: executor.h:268
#define MEMO_CACHE_FETCH_NEXT_TUPLE
Definition: nodeMemoize.c:79
static bool cache_store_tuple(MemoizeState *mstate, TupleTableSlot *slot)
Definition: nodeMemoize.c:602
#define MEMO_CACHE_BYPASS_MODE
Definition: nodeMemoize.c:81
#define MEMO_END_OF_SCAN
Definition: nodeMemoize.c:82
#define MEMO_FILLING_CACHE
Definition: nodeMemoize.c:80
static MemoizeEntry * cache_lookup(MemoizeState *mstate, bool *found)
Definition: nodeMemoize.c:505
static void entry_purge_tuples(MemoizeState *mstate, MemoizeEntry *entry)
Definition: nodeMemoize.c:323
#define castNode(_type_, nodeptr)
Definition: nodes.h:197
static TupleTableSlot * ExecCopySlot(TupleTableSlot *dstslot, TupleTableSlot *srcslot)
Definition: tuptable.h:483
#define TupIsNull(slot)
Definition: tuptable.h:300

References Assert(), MemoizeInstrumentation::cache_hits, cache_lookup(), MemoizeInstrumentation::cache_misses, MemoizeInstrumentation::cache_overflows, cache_store_tuple(), castNode, MemoizeEntry::complete, elog(), MemoizeState::entry, entry_purge_tuples(), ERROR, ExecCopySlot(), ExecProcNode(), ExecStoreMinimalTuple(), MemoizeState::last_tuple, likely, MEMO_CACHE_BYPASS_MODE, MEMO_CACHE_FETCH_NEXT_TUPLE, MEMO_CACHE_LOOKUP, MEMO_END_OF_SCAN, MEMO_FILLING_CACHE, MemoizeTuple::mintuple, MemoizeState::mstatus, MemoizeTuple::next, outerPlanState, ScanState::ps, PlanState::ps_ResultTupleSlot, MemoizeState::singlerow, MemoizeState::ss, MemoizeState::stats, TupIsNull, MemoizeEntry::tuplehead, and unlikely.

Referenced by ExecInitMemoize().

◆ ExecMemoizeEstimate()

void ExecMemoizeEstimate ( MemoizeState node,
ParallelContext pcxt 
)

Definition at line 1159 of file nodeMemoize.c.

1161 {
1162  Size size;
1163 
1164  /* don't need this if not instrumenting or no workers */
1165  if (!node->ss.ps.instrument || pcxt->nworkers == 0)
1166  return;
1167 
1168  size = mul_size(pcxt->nworkers, sizeof(MemoizeInstrumentation));
1169  size = add_size(size, offsetof(SharedMemoizeInfo, sinstrument));
1170  shm_toc_estimate_chunk(&pcxt->estimator, size);
1171  shm_toc_estimate_keys(&pcxt->estimator, 1);
size_t Size
Definition: c.h:589
#define shm_toc_estimate_chunk(e, sz)
Definition: shm_toc.h:51
#define shm_toc_estimate_keys(e, cnt)
Definition: shm_toc.h:53
Size add_size(Size s1, Size s2)
Definition: shmem.c:502
Size mul_size(Size s1, Size s2)
Definition: shmem.c:519
shm_toc_estimator estimator
Definition: parallel.h:42
Instrumentation * instrument
Definition: execnodes.h:1047

References add_size(), ParallelContext::estimator, PlanState::instrument, mul_size(), ParallelContext::nworkers, ScanState::ps, shm_toc_estimate_chunk, shm_toc_estimate_keys, and MemoizeState::ss.

Referenced by ExecParallelEstimate().

◆ ExecMemoizeInitializeDSM()

void ExecMemoizeInitializeDSM ( MemoizeState node,
ParallelContext pcxt 
)

Definition at line 1180 of file nodeMemoize.c.

1182 {
1183  Size size;
1184 
1185  /* don't need this if not instrumenting or no workers */
1186  if (!node->ss.ps.instrument || pcxt->nworkers == 0)
1187  return;
1188 
1189  size = offsetof(SharedMemoizeInfo, sinstrument)
1190  + pcxt->nworkers * sizeof(MemoizeInstrumentation);
1191  node->shared_info = shm_toc_allocate(pcxt->toc, size);
1192  /* ensure any unfilled slots will contain zeroes */
1193  memset(node->shared_info, 0, size);
1194  node->shared_info->num_workers = pcxt->nworkers;
1195  shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id,
1196  node->shared_info);
struct MemoizeInstrumentation MemoizeInstrumentation
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
Definition: shm_toc.c:171
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
Definition: shm_toc.c:88
shm_toc * toc
Definition: parallel.h:45
int plan_node_id
Definition: plannodes.h:152

References PlanState::instrument, SharedMemoizeInfo::num_workers, ParallelContext::nworkers, PlanState::plan, Plan::plan_node_id, ScanState::ps, MemoizeState::shared_info, shm_toc_allocate(), shm_toc_insert(), MemoizeState::ss, and ParallelContext::toc.

Referenced by ExecParallelInitializeDSM().

◆ ExecMemoizeInitializeWorker()

void ExecMemoizeInitializeWorker ( MemoizeState node,
ParallelWorkerContext pwcxt 
)

Definition at line 1205 of file nodeMemoize.c.

1207 {
1208  node->shared_info =
1209  shm_toc_lookup(pwcxt->toc, node->ss.ps.plan->plan_node_id, true);
void * shm_toc_lookup(shm_toc *toc, uint64 key, bool noError)
Definition: shm_toc.c:232

References PlanState::plan, Plan::plan_node_id, ScanState::ps, MemoizeState::shared_info, shm_toc_lookup(), MemoizeState::ss, and ParallelWorkerContext::toc.

Referenced by ExecParallelInitializeWorker().

◆ ExecMemoizeRetrieveInstrumentation()

void ExecMemoizeRetrieveInstrumentation ( MemoizeState node)

Definition at line 1218 of file nodeMemoize.c.

1220 {
1221  Size size;
1222  SharedMemoizeInfo *si;
1223 
1224  if (node->shared_info == NULL)
1225  return;
1226 
1227  size = offsetof(SharedMemoizeInfo, sinstrument)
1228  + node->shared_info->num_workers * sizeof(MemoizeInstrumentation);
1229  si = palloc(size);
1230  memcpy(si, node->shared_info, size);
1231  node->shared_info = si;

References SharedMemoizeInfo::num_workers, palloc(), and MemoizeState::shared_info.

Referenced by ExecParallelRetrieveInstrumentation().

◆ ExecReScanMemoize()

void ExecReScanMemoize ( MemoizeState node)

Definition at line 1109 of file nodeMemoize.c.

1111 {
1113 
1114  /* Mark that we must lookup the cache for a new set of parameters */
1115  node->mstatus = MEMO_CACHE_LOOKUP;
1116 
1117  /* nullify pointers used for the last scan */
1118  node->entry = NULL;
1119  node->last_tuple = NULL;
1120 
1121  /*
1122  * if chgParam of subnode is not null then plan will be re-scanned by
1123  * first ExecProcNode.
1124  */
1125  if (outerPlan->chgParam == NULL)
1127 
1128  /*
1129  * Purge the entire cache if a parameter changed that is not part of the
1130  * cache key.
1131  */
1132  if (bms_nonempty_difference(outerPlan->chgParam, node->keyparamids))
1133  cache_purge_all(node);
bool bms_nonempty_difference(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:564
void ExecReScan(PlanState *node)
Definition: execAmi.c:78
static void cache_purge_all(MemoizeState *mstate)
Definition: nodeMemoize.c:381

References bms_nonempty_difference(), cache_purge_all(), MemoizeState::entry, ExecReScan(), MemoizeState::keyparamids, MemoizeState::last_tuple, MEMO_CACHE_LOOKUP, MemoizeState::mstatus, outerPlan, and outerPlanState.

Referenced by ExecReScan().

◆ MemoizeHash_equal()

static bool MemoizeHash_equal ( struct memoize_hash *  tb,
const MemoizeKey key1,
const MemoizeKey key2 
)
static

Definition at line 215 of file nodeMemoize.c.

218 {
219  MemoizeState *mstate = (MemoizeState *) tb->private_data;
220  ExprContext *econtext = mstate->ss.ps.ps_ExprContext;
221  TupleTableSlot *tslot = mstate->tableslot;
222  TupleTableSlot *pslot = mstate->probeslot;
223 
224  /* probeslot should have already been prepared by prepare_probe_slot() */
225  ExecStoreMinimalTuple(key1->params, tslot, false);
226 
227  if (mstate->binary_mode)
228  {
229  int numkeys = mstate->nkeys;
230 
231  slot_getallattrs(tslot);
232  slot_getallattrs(pslot);
233 
234  for (int i = 0; i < numkeys; i++)
235  {
236  FormData_pg_attribute *attr;
237 
238  if (tslot->tts_isnull[i] != pslot->tts_isnull[i])
239  return false;
240 
241  /* both NULL? they're equal */
242  if (tslot->tts_isnull[i])
243  continue;
244 
245  /* perform binary comparison on the two datums */
246  attr = &tslot->tts_tupleDescriptor->attrs[i];
247  if (!datum_image_eq(tslot->tts_values[i], pslot->tts_values[i],
248  attr->attbyval, attr->attlen))
249  return false;
250  }
251  return true;
252  }
253  else
254  {
255  econtext->ecxt_innertuple = tslot;
256  econtext->ecxt_outertuple = pslot;
257  return ExecQualAndReset(mstate->cache_eq_expr, econtext);
258  }
bool datum_image_eq(Datum value1, Datum value2, bool typByVal, int typLen)
Definition: datum.c:266
static bool ExecQualAndReset(ExprState *state, ExprContext *econtext)
Definition: executor.h:439
FormData_pg_attribute
Definition: pg_attribute.h:193
MinimalTuple params
Definition: nodeMemoize.c:106
ExprContext * ps_ExprContext
Definition: execnodes.h:1076
static void slot_getallattrs(TupleTableSlot *slot)
Definition: tuptable.h:362

References MemoizeState::binary_mode, MemoizeState::cache_eq_expr, datum_image_eq(), ExecQualAndReset(), ExecStoreMinimalTuple(), FormData_pg_attribute, i, MemoizeState::nkeys, MemoizeKey::params, MemoizeState::probeslot, ScanState::ps, PlanState::ps_ExprContext, slot_getallattrs(), MemoizeState::ss, and MemoizeState::tableslot.

◆ MemoizeHash_hash()

static uint32 MemoizeHash_hash ( struct memoize_hash *  tb,
const MemoizeKey key 
)
static

Definition at line 157 of file nodeMemoize.c.

159 {
160  MemoizeState *mstate = (MemoizeState *) tb->private_data;
161  TupleTableSlot *pslot = mstate->probeslot;
162  uint32 hashkey = 0;
163  int numkeys = mstate->nkeys;
164 
165  if (mstate->binary_mode)
166  {
167  for (int i = 0; i < numkeys; i++)
168  {
169  /* combine successive hashkeys by rotating */
170  hashkey = pg_rotate_left32(hashkey, 1);
171 
172  if (!pslot->tts_isnull[i]) /* treat nulls as having hash key 0 */
173  {
174  FormData_pg_attribute *attr;
175  uint32 hkey;
176 
177  attr = &pslot->tts_tupleDescriptor->attrs[i];
178 
179  hkey = datum_image_hash(pslot->tts_values[i], attr->attbyval, attr->attlen);
180 
181  hashkey ^= hkey;
182  }
183  }
184  }
185  else
186  {
187  FmgrInfo *hashfunctions = mstate->hashfunctions;
188  Oid *collations = mstate->collations;
189 
190  for (int i = 0; i < numkeys; i++)
191  {
192  /* combine successive hashkeys by rotating */
193  hashkey = pg_rotate_left32(hashkey, 1);
194 
195  if (!pslot->tts_isnull[i]) /* treat nulls as having hash key 0 */
196  {
197  uint32 hkey;
198 
199  hkey = DatumGetUInt32(FunctionCall1Coll(&hashfunctions[i],
200  collations[i], pslot->tts_values[i]));
201  hashkey ^= hkey;
202  }
203  }
204  }
205 
206  return murmurhash32(hashkey);
unsigned int uint32
Definition: c.h:490
uint32 datum_image_hash(Datum value, bool typByVal, int typLen)
Definition: datum.c:338
Datum FunctionCall1Coll(FmgrInfo *flinfo, Oid collation, Datum arg1)
Definition: fmgr.c:1100
static uint32 murmurhash32(uint32 data)
Definition: hashfn.h:92
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:77
static uint32 pg_rotate_left32(uint32 word, int n)
Definition: pg_bitutils.h:322
static uint32 DatumGetUInt32(Datum X)
Definition: postgres.h:222

References MemoizeState::binary_mode, MemoizeState::collations, datum_image_hash(), DatumGetUInt32(), FormData_pg_attribute, FunctionCall1Coll(), MemoizeState::hashfunctions, i, if(), murmurhash32(), MemoizeState::nkeys, pg_rotate_left32(), and MemoizeState::probeslot.

◆ prepare_probe_slot()

static void prepare_probe_slot ( MemoizeState mstate,
MemoizeKey key 
)
inlinestatic

Definition at line 281 of file nodeMemoize.c.

283 {
284  TupleTableSlot *pslot = mstate->probeslot;
285  TupleTableSlot *tslot = mstate->tableslot;
286  int numKeys = mstate->nkeys;
287 
288  ExecClearTuple(pslot);
289 
290  if (key == NULL)
291  {
292  ExprContext *econtext = mstate->ss.ps.ps_ExprContext;
293  MemoryContext oldcontext;
294 
295  oldcontext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
296 
297  /* Set the probeslot's values based on the current parameter values */
298  for (int i = 0; i < numKeys; i++)
299  pslot->tts_values[i] = ExecEvalExpr(mstate->param_exprs[i],
300  econtext,
301  &pslot->tts_isnull[i]);
302 
303  MemoryContextSwitchTo(oldcontext);
304  }
305  else
306  {
307  /* Process the key's MinimalTuple and store the values in probeslot */
308  ExecStoreMinimalTuple(key->params, tslot, false);
309  slot_getallattrs(tslot);
310  memcpy(pslot->tts_values, tslot->tts_values, sizeof(Datum) * numKeys);
311  memcpy(pslot->tts_isnull, tslot->tts_isnull, sizeof(bool) * numKeys);
312  }
313 
314  ExecStoreVirtualTuple(pslot);
TupleTableSlot * ExecStoreVirtualTuple(TupleTableSlot *slot)
Definition: execTuples.c:1553
static Datum ExecEvalExpr(ExprState *state, ExprContext *econtext, bool *isNull)
Definition: executor.h:332
uintptr_t Datum
Definition: postgres.h:64
MemoryContext ecxt_per_tuple_memory
Definition: execnodes.h:257
bool * tts_isnull
Definition: tuptable.h:128
Datum * tts_values
Definition: tuptable.h:126

References ExprContext::ecxt_per_tuple_memory, ExecClearTuple(), ExecEvalExpr(), ExecStoreMinimalTuple(), ExecStoreVirtualTuple(), i, sort-test::key, MemoryContextSwitchTo(), MemoizeState::nkeys, MemoizeState::param_exprs, MemoizeState::probeslot, ScanState::ps, PlanState::ps_ExprContext, slot_getallattrs(), MemoizeState::ss, MemoizeState::tableslot, TupleTableSlot::tts_isnull, and TupleTableSlot::tts_values.

Referenced by cache_lookup(), cache_reduce_memory(), and cache_store_tuple().

◆ remove_cache_entry()

static void remove_cache_entry ( MemoizeState mstate,
MemoizeEntry entry 
)
static

Definition at line 353 of file nodeMemoize.c.

355 {
356  MemoizeKey *key = entry->key;
357 
358  dlist_delete(&entry->key->lru_node);
359 
360  /* Remove all of the tuples from this entry */
361  entry_purge_tuples(mstate, entry);
362 
363  /*
364  * Update memory accounting. entry_purge_tuples should have already
365  * subtracted the memory used for each cached tuple. Here we just update
366  * the amount used by the entry itself.
367  */
368  mstate->mem_used -= EMPTY_ENTRY_MEMORY_BYTES(entry);
369 
370  /* Remove the entry from the cache */
371  memoize_delete_item(mstate->hashtable, entry);
372 
373  pfree(key->params);
374  pfree(key);
static void dlist_delete(dlist_node *node)
Definition: ilist.h:405

References dlist_delete(), EMPTY_ENTRY_MEMORY_BYTES, entry_purge_tuples(), MemoizeState::hashtable, MemoizeEntry::key, sort-test::key, MemoizeKey::lru_node, MemoizeState::mem_used, and pfree().

Referenced by cache_reduce_memory().