PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
nodeMemoize.c File Reference
#include "postgres.h"
#include "access/htup_details.h"
#include "common/hashfn.h"
#include "executor/executor.h"
#include "executor/nodeMemoize.h"
#include "lib/ilist.h"
#include "miscadmin.h"
#include "utils/datum.h"
#include "utils/lsyscache.h"
#include "lib/simplehash.h"
Include dependency graph for nodeMemoize.c:

Go to the source code of this file.

Data Structures

struct  MemoizeTuple
 
struct  MemoizeKey
 
struct  MemoizeEntry
 

Macros

#define MEMO_CACHE_LOOKUP   1 /* Attempt to perform a cache lookup */
 
#define MEMO_CACHE_FETCH_NEXT_TUPLE   2 /* Get another tuple from the cache */
 
#define MEMO_FILLING_CACHE   3 /* Read outer node to fill cache */
 
#define MEMO_CACHE_BYPASS_MODE
 
#define MEMO_END_OF_SCAN   5 /* Ready for rescan */
 
#define EMPTY_ENTRY_MEMORY_BYTES(e)
 
#define CACHE_TUPLE_BYTES(t)
 
#define SH_PREFIX   memoize
 
#define SH_ELEMENT_TYPE   MemoizeEntry
 
#define SH_KEY_TYPE   MemoizeKey *
 
#define SH_SCOPE   static inline
 
#define SH_DECLARE
 
#define SH_PREFIX   memoize
 
#define SH_ELEMENT_TYPE   MemoizeEntry
 
#define SH_KEY_TYPE   MemoizeKey *
 
#define SH_KEY   key
 
#define SH_HASH_KEY(tb, key)   MemoizeHash_hash(tb, key)
 
#define SH_EQUAL(tb, a, b)   MemoizeHash_equal(tb, a, b)
 
#define SH_SCOPE   static inline
 
#define SH_STORE_HASH
 
#define SH_GET_HASH(tb, a)   a->hash
 
#define SH_DEFINE
 

Typedefs

typedef struct MemoizeTuple MemoizeTuple
 
typedef struct MemoizeKey MemoizeKey
 
typedef struct MemoizeEntry MemoizeEntry
 

Functions

static uint32 MemoizeHash_hash (struct memoize_hash *tb, const MemoizeKey *key)
 
static bool MemoizeHash_equal (struct memoize_hash *tb, const MemoizeKey *key1, const MemoizeKey *key2)
 
static void build_hash_table (MemoizeState *mstate, uint32 size)
 
static void prepare_probe_slot (MemoizeState *mstate, MemoizeKey *key)
 
static void entry_purge_tuples (MemoizeState *mstate, MemoizeEntry *entry)
 
static void remove_cache_entry (MemoizeState *mstate, MemoizeEntry *entry)
 
static void cache_purge_all (MemoizeState *mstate)
 
static bool cache_reduce_memory (MemoizeState *mstate, MemoizeKey *specialkey)
 
static MemoizeEntrycache_lookup (MemoizeState *mstate, bool *found)
 
static bool cache_store_tuple (MemoizeState *mstate, TupleTableSlot *slot)
 
static TupleTableSlotExecMemoize (PlanState *pstate)
 
MemoizeStateExecInitMemoize (Memoize *node, EState *estate, int eflags)
 
void ExecEndMemoize (MemoizeState *node)
 
void ExecReScanMemoize (MemoizeState *node)
 
double ExecEstimateCacheEntryOverheadBytes (double ntuples)
 
void ExecMemoizeEstimate (MemoizeState *node, ParallelContext *pcxt)
 
void ExecMemoizeInitializeDSM (MemoizeState *node, ParallelContext *pcxt)
 
void ExecMemoizeInitializeWorker (MemoizeState *node, ParallelWorkerContext *pwcxt)
 
void ExecMemoizeRetrieveInstrumentation (MemoizeState *node)
 

Macro Definition Documentation

◆ CACHE_TUPLE_BYTES

#define CACHE_TUPLE_BYTES (   t)
Value:
(sizeof(MemoizeTuple) + \
(t)->mintuple->t_len)
struct MemoizeTuple MemoizeTuple

Definition at line 90 of file nodeMemoize.c.

◆ EMPTY_ENTRY_MEMORY_BYTES

#define EMPTY_ENTRY_MEMORY_BYTES (   e)
Value:
(sizeof(MemoizeEntry) + \
sizeof(MemoizeKey) + \
(e)->key->params->t_len);
struct MemoizeEntry MemoizeEntry
e
Definition: preproc-init.c:82

Definition at line 87 of file nodeMemoize.c.

◆ MEMO_CACHE_BYPASS_MODE

#define MEMO_CACHE_BYPASS_MODE
Value:
4 /* Bypass mode. Just read from our
* subplan without caching anything */

Definition at line 82 of file nodeMemoize.c.

◆ MEMO_CACHE_FETCH_NEXT_TUPLE

#define MEMO_CACHE_FETCH_NEXT_TUPLE   2 /* Get another tuple from the cache */

Definition at line 80 of file nodeMemoize.c.

◆ MEMO_CACHE_LOOKUP

#define MEMO_CACHE_LOOKUP   1 /* Attempt to perform a cache lookup */

Definition at line 79 of file nodeMemoize.c.

◆ MEMO_END_OF_SCAN

#define MEMO_END_OF_SCAN   5 /* Ready for rescan */

Definition at line 83 of file nodeMemoize.c.

◆ MEMO_FILLING_CACHE

#define MEMO_FILLING_CACHE   3 /* Read outer node to fill cache */

Definition at line 81 of file nodeMemoize.c.

◆ SH_DECLARE

#define SH_DECLARE

Definition at line 130 of file nodeMemoize.c.

◆ SH_DEFINE

#define SH_DEFINE

Definition at line 148 of file nodeMemoize.c.

◆ SH_ELEMENT_TYPE [1/2]

#define SH_ELEMENT_TYPE   MemoizeEntry

Definition at line 140 of file nodeMemoize.c.

◆ SH_ELEMENT_TYPE [2/2]

#define SH_ELEMENT_TYPE   MemoizeEntry

Definition at line 140 of file nodeMemoize.c.

◆ SH_EQUAL

#define SH_EQUAL (   tb,
  a,
  b 
)    MemoizeHash_equal(tb, a, b)

Definition at line 144 of file nodeMemoize.c.

◆ SH_GET_HASH

#define SH_GET_HASH (   tb,
  a 
)    a->hash

Definition at line 147 of file nodeMemoize.c.

◆ SH_HASH_KEY

#define SH_HASH_KEY (   tb,
  key 
)    MemoizeHash_hash(tb, key)

Definition at line 143 of file nodeMemoize.c.

◆ SH_KEY

#define SH_KEY   key

Definition at line 142 of file nodeMemoize.c.

◆ SH_KEY_TYPE [1/2]

#define SH_KEY_TYPE   MemoizeKey *

Definition at line 141 of file nodeMemoize.c.

◆ SH_KEY_TYPE [2/2]

#define SH_KEY_TYPE   MemoizeKey *

Definition at line 141 of file nodeMemoize.c.

◆ SH_PREFIX [1/2]

#define SH_PREFIX   memoize

Definition at line 139 of file nodeMemoize.c.

◆ SH_PREFIX [2/2]

#define SH_PREFIX   memoize

Definition at line 139 of file nodeMemoize.c.

◆ SH_SCOPE [1/2]

#define SH_SCOPE   static inline

Definition at line 145 of file nodeMemoize.c.

◆ SH_SCOPE [2/2]

#define SH_SCOPE   static inline

Definition at line 145 of file nodeMemoize.c.

◆ SH_STORE_HASH

#define SH_STORE_HASH

Definition at line 146 of file nodeMemoize.c.

Typedef Documentation

◆ MemoizeEntry

typedef struct MemoizeEntry MemoizeEntry

◆ MemoizeKey

typedef struct MemoizeKey MemoizeKey

◆ MemoizeTuple

typedef struct MemoizeTuple MemoizeTuple

Function Documentation

◆ build_hash_table()

static void build_hash_table ( MemoizeState mstate,
uint32  size 
)
static

Definition at line 283 of file nodeMemoize.c.

285{
286 Assert(mstate->hashtable == NULL);
287
288 /* Make a guess at a good size when we're not given a valid size. */
289 if (size == 0)
290 size = 1024;
291
292 /* memoize_create will convert the size to a power of 2 */
293 mstate->hashtable = memoize_create(mstate->tableContext, size, mstate);
Assert(PointerIsAligned(start, uint64))
MemoryContext tableContext
Definition: execnodes.h:2354
struct memoize_hash * hashtable
Definition: execnodes.h:2343

References Assert(), MemoizeState::hashtable, and MemoizeState::tableContext.

Referenced by ExecMemoize().

◆ cache_lookup()

static MemoizeEntry * cache_lookup ( MemoizeState mstate,
bool *  found 
)
static

Definition at line 528 of file nodeMemoize.c.

530{
532 MemoizeEntry *entry;
533 MemoryContext oldcontext;
534
535 /* prepare the probe slot with the current scan parameters */
536 prepare_probe_slot(mstate, NULL);
537
538 /*
539 * Add the new entry to the cache. No need to pass a valid key since the
540 * hash function uses mstate's probeslot, which we populated above.
541 */
542 entry = memoize_insert(mstate->hashtable, NULL, found);
543
544 if (*found)
545 {
546 /*
547 * Move existing entry to the tail of the LRU list to mark it as the
548 * most recently used item.
549 */
550 dlist_move_tail(&mstate->lru_list, &entry->key->lru_node);
551
552 return entry;
553 }
554
555 oldcontext = MemoryContextSwitchTo(mstate->tableContext);
556
557 /* Allocate a new key */
558 entry->key = key = (MemoizeKey *) palloc(sizeof(MemoizeKey));
559 key->params = ExecCopySlotMinimalTuple(mstate->probeslot);
560
561 /* Update the total cache memory utilization */
562 mstate->mem_used += EMPTY_ENTRY_MEMORY_BYTES(entry);
563
564 /* Initialize this entry */
565 entry->complete = false;
566 entry->tuplehead = NULL;
567
568 /*
569 * Since this is the most recently used entry, push this entry onto the
570 * end of the LRU list.
571 */
572 dlist_push_tail(&mstate->lru_list, &entry->key->lru_node);
573
574 mstate->last_tuple = NULL;
575
576 MemoryContextSwitchTo(oldcontext);
577
578 /*
579 * If we've gone over our memory budget, then we'll free up some space in
580 * the cache.
581 */
582 if (mstate->mem_used > mstate->mem_limit)
583 {
584 /*
585 * Try to free up some memory. It's highly unlikely that we'll fail
586 * to do so here since the entry we've just added is yet to contain
587 * any tuples and we're able to remove any other entry to reduce the
588 * memory consumption.
589 */
590 if (unlikely(!cache_reduce_memory(mstate, key)))
591 return NULL;
592
593 /*
594 * The process of removing entries from the cache may have caused the
595 * code in simplehash.h to shuffle elements to earlier buckets in the
596 * hash table. If it has, we'll need to find the entry again by
597 * performing a lookup. Fortunately, we can detect if this has
598 * happened by seeing if the entry is still in use and that the key
599 * pointer matches our expected key.
600 */
601 if (entry->status != memoize_SH_IN_USE || entry->key != key)
602 {
603 /*
604 * We need to repopulate the probeslot as lookups performed during
605 * the cache evictions above will have stored some other key.
606 */
607 prepare_probe_slot(mstate, key);
608
609 /* Re-find the newly added entry */
610 entry = memoize_lookup(mstate->hashtable, NULL);
611 Assert(entry != NULL);
612 }
613 }
614
615 return entry;
#define unlikely(x)
Definition: c.h:406
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:364
static void dlist_move_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:486
void * palloc(Size size)
Definition: mcxt.c:1365
static bool cache_reduce_memory(MemoizeState *mstate, MemoizeKey *specialkey)
Definition: nodeMemoize.c:440
static void prepare_probe_slot(MemoizeState *mstate, MemoizeKey *key)
Definition: nodeMemoize.c:302
#define EMPTY_ENTRY_MEMORY_BYTES(e)
Definition: nodeMemoize.c:87
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:124
MemoizeKey * key
Definition: nodeMemoize.c:117
MemoizeTuple * tuplehead
Definition: nodeMemoize.c:118
dlist_node lru_node
Definition: nodeMemoize.c:108
uint64 mem_used
Definition: execnodes.h:2352
TupleTableSlot * probeslot
Definition: execnodes.h:2346
dlist_head lru_list
Definition: execnodes.h:2355
uint64 mem_limit
Definition: execnodes.h:2353
struct MemoizeTuple * last_tuple
Definition: execnodes.h:2356
static MinimalTuple ExecCopySlotMinimalTuple(TupleTableSlot *slot)
Definition: tuptable.h:495

References Assert(), cache_reduce_memory(), MemoizeEntry::complete, dlist_move_tail(), dlist_push_tail(), EMPTY_ENTRY_MEMORY_BYTES, ExecCopySlotMinimalTuple(), MemoizeState::hashtable, MemoizeEntry::key, sort-test::key, MemoizeState::last_tuple, MemoizeState::lru_list, MemoizeKey::lru_node, MemoizeState::mem_limit, MemoizeState::mem_used, MemoryContextSwitchTo(), palloc(), prepare_probe_slot(), MemoizeState::probeslot, MemoizeEntry::status, MemoizeState::tableContext, MemoizeEntry::tuplehead, and unlikely.

Referenced by ExecMemoize().

◆ cache_purge_all()

static void cache_purge_all ( MemoizeState mstate)
static

Definition at line 402 of file nodeMemoize.c.

404{
405 uint64 evictions = 0;
406
407 if (mstate->hashtable != NULL)
408 evictions = mstate->hashtable->members;
409
410 /*
411 * Likely the most efficient way to remove all items is to just reset the
412 * memory context for the cache and then rebuild a fresh hash table. This
413 * saves having to remove each item one by one and pfree each cached tuple
414 */
416
417 /* NULLify so we recreate the table on the next call */
418 mstate->hashtable = NULL;
419
420 /* reset the LRU list */
421 dlist_init(&mstate->lru_list);
422 mstate->last_tuple = NULL;
423 mstate->entry = NULL;
424
425 mstate->mem_used = 0;
426
427 /* XXX should we add something new to track these purges? */
428 mstate->stats.cache_evictions += evictions; /* Update Stats */
uint64_t uint64
Definition: c.h:543
static void dlist_init(dlist_head *head)
Definition: ilist.h:314
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:400
struct MemoizeEntry * entry
Definition: execnodes.h:2360
MemoizeInstrumentation stats
Definition: execnodes.h:2366

References MemoizeInstrumentation::cache_evictions, dlist_init(), MemoizeState::entry, MemoizeState::hashtable, MemoizeState::last_tuple, MemoizeState::lru_list, MemoizeState::mem_used, MemoryContextReset(), MemoizeState::stats, and MemoizeState::tableContext.

Referenced by ExecReScanMemoize().

◆ cache_reduce_memory()

static bool cache_reduce_memory ( MemoizeState mstate,
MemoizeKey specialkey 
)
static

Definition at line 440 of file nodeMemoize.c.

442{
443 bool specialkey_intact = true; /* for now */
445 uint64 evictions = 0;
446
447 /* Update peak memory usage */
448 if (mstate->mem_used > mstate->stats.mem_peak)
449 mstate->stats.mem_peak = mstate->mem_used;
450
451 /* We expect only to be called when we've gone over budget on memory */
452 Assert(mstate->mem_used > mstate->mem_limit);
453
454 /* Start the eviction process starting at the head of the LRU list. */
455 dlist_foreach_modify(iter, &mstate->lru_list)
456 {
457 MemoizeKey *key = dlist_container(MemoizeKey, lru_node, iter.cur);
458 MemoizeEntry *entry;
459
460 /*
461 * Populate the hash probe slot in preparation for looking up this LRU
462 * entry.
463 */
464 prepare_probe_slot(mstate, key);
465
466 /*
467 * Ideally the LRU list pointers would be stored in the entry itself
468 * rather than in the key. Unfortunately, we can't do that as the
469 * simplehash.h code may resize the table and allocate new memory for
470 * entries which would result in those pointers pointing to the old
471 * buckets. However, it's fine to use the key to store this as that's
472 * only referenced by a pointer in the entry, which of course follows
473 * the entry whenever the hash table is resized. Since we only have a
474 * pointer to the key here, we must perform a hash table lookup to
475 * find the entry that the key belongs to.
476 */
477 entry = memoize_lookup(mstate->hashtable, NULL);
478
479 /*
480 * Sanity check that we found the entry belonging to the LRU list
481 * item. A misbehaving hash or equality function could cause the
482 * entry not to be found or the wrong entry to be found.
483 */
484 if (unlikely(entry == NULL || entry->key != key))
485 elog(ERROR, "could not find memoization table entry");
486
487 /*
488 * If we're being called to free memory while the cache is being
489 * populated with new tuples, then we'd better take some care as we
490 * could end up freeing the entry which 'specialkey' belongs to.
491 * Generally callers will pass 'specialkey' as the key for the cache
492 * entry which is currently being populated, so we must set
493 * 'specialkey_intact' to false to inform the caller the specialkey
494 * entry has been removed.
495 */
496 if (key == specialkey)
497 specialkey_intact = false;
498
499 /*
500 * Finally remove the entry. This will remove from the LRU list too.
501 */
502 remove_cache_entry(mstate, entry);
503
504 evictions++;
505
506 /* Exit if we've freed enough memory */
507 if (mstate->mem_used <= mstate->mem_limit)
508 break;
509 }
510
511 mstate->stats.cache_evictions += evictions; /* Update Stats */
512
513 return specialkey_intact;
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:640
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
static void remove_cache_entry(MemoizeState *mstate, MemoizeEntry *entry)
Definition: nodeMemoize.c:374
dlist_node * cur
Definition: ilist.h:200

References Assert(), MemoizeInstrumentation::cache_evictions, dlist_mutable_iter::cur, dlist_container, dlist_foreach_modify, elog, ERROR, MemoizeState::hashtable, MemoizeEntry::key, sort-test::key, MemoizeState::lru_list, MemoizeState::mem_limit, MemoizeInstrumentation::mem_peak, MemoizeState::mem_used, prepare_probe_slot(), remove_cache_entry(), MemoizeState::stats, and unlikely.

Referenced by cache_lookup(), and cache_store_tuple().

◆ cache_store_tuple()

static bool cache_store_tuple ( MemoizeState mstate,
TupleTableSlot slot 
)
static

Definition at line 625 of file nodeMemoize.c.

627{
628 MemoizeTuple *tuple;
629 MemoizeEntry *entry = mstate->entry;
630 MemoryContext oldcontext;
631
632 Assert(slot != NULL);
633 Assert(entry != NULL);
634
635 oldcontext = MemoryContextSwitchTo(mstate->tableContext);
636
637 tuple = (MemoizeTuple *) palloc(sizeof(MemoizeTuple));
638 tuple->mintuple = ExecCopySlotMinimalTuple(slot);
639 tuple->next = NULL;
640
641 /* Account for the memory we just consumed */
642 mstate->mem_used += CACHE_TUPLE_BYTES(tuple);
643
644 if (entry->tuplehead == NULL)
645 {
646 /*
647 * This is the first tuple for this entry, so just point the list head
648 * to it.
649 */
650 entry->tuplehead = tuple;
651 }
652 else
653 {
654 /* push this tuple onto the tail of the list */
655 mstate->last_tuple->next = tuple;
656 }
657
658 mstate->last_tuple = tuple;
659 MemoryContextSwitchTo(oldcontext);
660
661 /*
662 * If we've gone over our memory budget then free up some space in the
663 * cache.
664 */
665 if (mstate->mem_used > mstate->mem_limit)
666 {
667 MemoizeKey *key = entry->key;
668
669 if (!cache_reduce_memory(mstate, key))
670 return false;
671
672 /*
673 * The process of removing entries from the cache may have caused the
674 * code in simplehash.h to shuffle elements to earlier buckets in the
675 * hash table. If it has, we'll need to find the entry again by
676 * performing a lookup. Fortunately, we can detect if this has
677 * happened by seeing if the entry is still in use and that the key
678 * pointer matches our expected key.
679 */
680 if (entry->status != memoize_SH_IN_USE || entry->key != key)
681 {
682 /*
683 * We need to repopulate the probeslot as lookups performed during
684 * the cache evictions above will have stored some other key.
685 */
686 prepare_probe_slot(mstate, key);
687
688 /* Re-find the entry */
689 mstate->entry = entry = memoize_lookup(mstate->hashtable, NULL);
690 Assert(entry != NULL);
691 }
692 }
693
694 return true;
#define CACHE_TUPLE_BYTES(t)
Definition: nodeMemoize.c:90
MinimalTuple mintuple
Definition: nodeMemoize.c:96
struct MemoizeTuple * next
Definition: nodeMemoize.c:97

References Assert(), cache_reduce_memory(), CACHE_TUPLE_BYTES, MemoizeState::entry, ExecCopySlotMinimalTuple(), MemoizeState::hashtable, MemoizeEntry::key, sort-test::key, MemoizeState::last_tuple, MemoizeState::mem_limit, MemoizeState::mem_used, MemoryContextSwitchTo(), MemoizeTuple::mintuple, MemoizeTuple::next, palloc(), prepare_probe_slot(), MemoizeEntry::status, MemoizeState::tableContext, and MemoizeEntry::tuplehead.

Referenced by ExecMemoize().

◆ entry_purge_tuples()

static void entry_purge_tuples ( MemoizeState mstate,
MemoizeEntry entry 
)
inlinestatic

Definition at line 344 of file nodeMemoize.c.

346{
347 MemoizeTuple *tuple = entry->tuplehead;
348 uint64 freed_mem = 0;
349
350 while (tuple != NULL)
351 {
352 MemoizeTuple *next = tuple->next;
353
354 freed_mem += CACHE_TUPLE_BYTES(tuple);
355
356 /* Free memory used for this tuple */
357 pfree(tuple->mintuple);
358 pfree(tuple);
359
360 tuple = next;
361 }
362
363 entry->complete = false;
364 entry->tuplehead = NULL;
365
366 /* Update the memory accounting */
367 mstate->mem_used -= freed_mem;
static int32 next
Definition: blutils.c:224
void pfree(void *pointer)
Definition: mcxt.c:1594

References CACHE_TUPLE_BYTES, MemoizeEntry::complete, MemoizeState::mem_used, MemoizeTuple::mintuple, next, MemoizeTuple::next, pfree(), and MemoizeEntry::tuplehead.

Referenced by ExecMemoize(), and remove_cache_entry().

◆ ExecEndMemoize()

void ExecEndMemoize ( MemoizeState node)

Definition at line 1080 of file nodeMemoize.c.

1082{
1083#ifdef USE_ASSERT_CHECKING
1084 /* Validate the memory accounting code is correct in assert builds. */
1085 if (node->hashtable != NULL)
1086 {
1087 int count;
1088 uint64 mem = 0;
1089 memoize_iterator i;
1090 MemoizeEntry *entry;
1091
1092 memoize_start_iterate(node->hashtable, &i);
1093
1094 count = 0;
1095 while ((entry = memoize_iterate(node->hashtable, &i)) != NULL)
1096 {
1097 MemoizeTuple *tuple = entry->tuplehead;
1098
1099 mem += EMPTY_ENTRY_MEMORY_BYTES(entry);
1100 while (tuple != NULL)
1101 {
1102 mem += CACHE_TUPLE_BYTES(tuple);
1103 tuple = tuple->next;
1104 }
1105 count++;
1106 }
1107
1108 Assert(count == node->hashtable->members);
1109 Assert(mem == node->mem_used);
1110 }
1111#endif
1112
1113 /*
1114 * When ending a parallel worker, copy the statistics gathered by the
1115 * worker back into shared memory so that it can be picked up by the main
1116 * process to report in EXPLAIN ANALYZE.
1117 */
1118 if (node->shared_info != NULL && IsParallelWorker())
1119 {
1121
1122 /* Make mem_peak available for EXPLAIN */
1123 if (node->stats.mem_peak == 0)
1124 node->stats.mem_peak = node->mem_used;
1125
1126 Assert(ParallelWorkerNumber <= node->shared_info->num_workers);
1128 memcpy(si, &node->stats, sizeof(MemoizeInstrumentation));
1129 }
1130
1131 /* Remove the cache context */
1133
1134 /*
1135 * shut down the subplan
1136 */
int ParallelWorkerNumber
Definition: parallel.c:115
void ExecEndNode(PlanState *node)
Definition: execProcnode.c:562
#define outerPlanState(node)
Definition: execnodes.h:1261
#define IsParallelWorker()
Definition: parallel.h:60
int i
Definition: isn.c:77
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:469
SharedMemoizeInfo * shared_info
Definition: execnodes.h:2367
MemoizeInstrumentation sinstrument[FLEXIBLE_ARRAY_MEMBER]
Definition: execnodes.h:2328

References Assert(), CACHE_TUPLE_BYTES, EMPTY_ENTRY_MEMORY_BYTES, ExecEndNode(), MemoizeState::hashtable, i, IsParallelWorker, MemoizeInstrumentation::mem_peak, MemoizeState::mem_used, MemoryContextDelete(), MemoizeTuple::next, outerPlanState, ParallelWorkerNumber, MemoizeState::shared_info, SharedMemoizeInfo::sinstrument, MemoizeState::stats, MemoizeState::tableContext, and MemoizeEntry::tuplehead.

Referenced by ExecEndNode().

◆ ExecEstimateCacheEntryOverheadBytes()

double ExecEstimateCacheEntryOverheadBytes ( double  ntuples)

Definition at line 1172 of file nodeMemoize.c.

1174{
1175 return sizeof(MemoizeEntry) + sizeof(MemoizeKey) + sizeof(MemoizeTuple) *
1176 ntuples;

Referenced by cost_memoize_rescan().

◆ ExecInitMemoize()

MemoizeState * ExecInitMemoize ( Memoize node,
EState estate,
int  eflags 
)

Definition at line 952 of file nodeMemoize.c.

954{
956 Plan *outerNode;
957 int i;
958 int nkeys;
959 Oid *eqfuncoids;
960
961 /* check for unsupported flags */
963
964 mstate->ss.ps.plan = (Plan *) node;
965 mstate->ss.ps.state = estate;
966 mstate->ss.ps.ExecProcNode = ExecMemoize;
967
968 /*
969 * Miscellaneous initialization
970 *
971 * create expression context for node
972 */
973 ExecAssignExprContext(estate, &mstate->ss.ps);
974
975 outerNode = outerPlan(node);
976 outerPlanState(mstate) = ExecInitNode(outerNode, estate, eflags);
977
978 /*
979 * Initialize return slot and type. No need to initialize projection info
980 * because this node doesn't do projections.
981 */
983 mstate->ss.ps.ps_ProjInfo = NULL;
984
985 /*
986 * Initialize scan slot and type.
987 */
989
990 /*
991 * Set the state machine to lookup the cache. We won't find anything
992 * until we cache something, but this saves a special case to create the
993 * first entry.
994 */
995 mstate->mstatus = MEMO_CACHE_LOOKUP;
996
997 mstate->nkeys = nkeys = node->numKeys;
1002 &TTSOpsVirtual);
1003
1004 mstate->param_exprs = (ExprState **) palloc(nkeys * sizeof(ExprState *));
1005 mstate->collations = node->collations; /* Just point directly to the plan
1006 * data */
1007 mstate->hashfunctions = (FmgrInfo *) palloc(nkeys * sizeof(FmgrInfo));
1008
1009 eqfuncoids = palloc(nkeys * sizeof(Oid));
1010
1011 for (i = 0; i < nkeys; i++)
1012 {
1013 Oid hashop = node->hashOperators[i];
1014 Oid left_hashfn;
1015 Oid right_hashfn;
1016 Expr *param_expr = (Expr *) list_nth(node->param_exprs, i);
1017
1018 if (!get_op_hash_functions(hashop, &left_hashfn, &right_hashfn))
1019 elog(ERROR, "could not find hash function for hash operator %u",
1020 hashop);
1021
1022 fmgr_info(left_hashfn, &mstate->hashfunctions[i]);
1023
1024 mstate->param_exprs[i] = ExecInitExpr(param_expr, (PlanState *) mstate);
1025 eqfuncoids[i] = get_opcode(hashop);
1026 }
1027
1031 eqfuncoids,
1032 node->collations,
1033 node->param_exprs,
1034 (PlanState *) mstate);
1035
1036 pfree(eqfuncoids);
1037 mstate->mem_used = 0;
1038
1039 /* Limit the total memory consumed by the cache to this */
1040 mstate->mem_limit = get_hash_memory_limit();
1041
1042 /* A memory context dedicated for the cache */
1044 "MemoizeHashTable",
1046
1047 dlist_init(&mstate->lru_list);
1048 mstate->last_tuple = NULL;
1049 mstate->entry = NULL;
1050
1051 /*
1052 * Mark if we can assume the cache entry is completed after we get the
1053 * first record for it. Some callers might not call us again after
1054 * getting the first match. e.g. A join operator performing a unique join
1055 * is able to skip to the next outer tuple after getting the first
1056 * matching inner tuple. In this case, the cache entry is complete after
1057 * getting the first tuple. This allows us to mark it as so.
1058 */
1059 mstate->singlerow = node->singlerow;
1060 mstate->keyparamids = node->keyparamids;
1061
1062 /*
1063 * Record if the cache keys should be compared bit by bit, or logically
1064 * using the type's hash equality operator
1065 */
1066 mstate->binary_mode = node->binary_mode;
1067
1068 /* Zero the statistics counters */
1069 memset(&mstate->stats, 0, sizeof(MemoizeInstrumentation));
1070
1071 /*
1072 * Because it may require a large allocation, we delay building of the
1073 * hash table until executor run.
1074 */
1075 mstate->hashtable = NULL;
1076
1077 return mstate;
ExprState * ExecInitExpr(Expr *node, PlanState *parent)
Definition: execExpr.c:143
ExprState * ExecBuildParamSetEqual(TupleDesc desc, const TupleTableSlotOps *lops, const TupleTableSlotOps *rops, const Oid *eqfunctions, const Oid *collations, const List *param_exprs, PlanState *parent)
Definition: execExpr.c:4624
PlanState * ExecInitNode(Plan *node, EState *estate, int eflags)
Definition: execProcnode.c:142
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1427
const TupleTableSlotOps TTSOpsVirtual
Definition: execTuples.c:84
void ExecInitResultTupleSlotTL(PlanState *planstate, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1988
const TupleTableSlotOps TTSOpsMinimalTuple
Definition: execTuples.c:86
TupleDesc ExecTypeFromExprList(List *exprList)
Definition: execTuples.c:2186
void ExecCreateScanSlotFromOuterPlan(EState *estate, ScanState *scanstate, const TupleTableSlotOps *tts_ops)
Definition: execUtils.c:704
void ExecAssignExprContext(EState *estate, PlanState *planstate)
Definition: execUtils.c:485
#define EXEC_FLAG_BACKWARD
Definition: executor.h:69
#define EXEC_FLAG_MARK
Definition: executor.h:70
void fmgr_info(Oid functionId, FmgrInfo *finfo)
Definition: fmgr.c:128
RegProcedure get_opcode(Oid opno)
Definition: lsyscache.c:1452
bool get_op_hash_functions(Oid opno, RegProcedure *lhs_procno, RegProcedure *rhs_procno)
Definition: lsyscache.c:582
MemoryContext CurrentMemoryContext
Definition: mcxt.c:160
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:160
size_t get_hash_memory_limit(void)
Definition: nodeHash.c:3621
#define MEMO_CACHE_LOOKUP
Definition: nodeMemoize.c:79
static TupleTableSlot * ExecMemoize(PlanState *pstate)
Definition: nodeMemoize.c:697
#define makeNode(_type_)
Definition: nodes.h:161
static void * list_nth(const List *list, int n)
Definition: pg_list.h:299
#define outerPlan(node)
Definition: plannodes.h:261
unsigned int Oid
Definition: postgres_ext.h:32
Definition: fmgr.h:57
TupleDesc hashkeydesc
Definition: execnodes.h:2344
FmgrInfo * hashfunctions
Definition: execnodes.h:2350
Oid * collations
Definition: execnodes.h:2351
ExprState * cache_eq_expr
Definition: execnodes.h:2347
bool singlerow
Definition: execnodes.h:2362
bool binary_mode
Definition: execnodes.h:2364
Bitmapset * keyparamids
Definition: execnodes.h:2368
ScanState ss
Definition: execnodes.h:2340
ExprState ** param_exprs
Definition: execnodes.h:2348
TupleTableSlot * tableslot
Definition: execnodes.h:2345
bool singlerow
Definition: plannodes.h:1090
Bitmapset * keyparamids
Definition: plannodes.h:1105
bool binary_mode
Definition: plannodes.h:1096
int numKeys
Definition: plannodes.h:1075
List * param_exprs
Definition: plannodes.h:1084
Plan * plan
Definition: execnodes.h:1165
EState * state
Definition: execnodes.h:1167
ProjectionInfo * ps_ProjInfo
Definition: execnodes.h:1205
ExecProcNodeMtd ExecProcNode
Definition: execnodes.h:1171
PlanState ps
Definition: execnodes.h:1621

References ALLOCSET_DEFAULT_SIZES, AllocSetContextCreate, Assert(), MemoizeState::binary_mode, Memoize::binary_mode, MemoizeState::cache_eq_expr, MemoizeState::collations, CurrentMemoryContext, dlist_init(), elog, MemoizeState::entry, ERROR, EXEC_FLAG_BACKWARD, EXEC_FLAG_MARK, ExecAssignExprContext(), ExecBuildParamSetEqual(), ExecCreateScanSlotFromOuterPlan(), ExecInitExpr(), ExecInitNode(), ExecInitResultTupleSlotTL(), ExecMemoize(), PlanState::ExecProcNode, ExecTypeFromExprList(), fmgr_info(), get_hash_memory_limit(), get_op_hash_functions(), get_opcode(), MemoizeState::hashfunctions, MemoizeState::hashkeydesc, MemoizeState::hashtable, i, MemoizeState::keyparamids, Memoize::keyparamids, MemoizeState::last_tuple, list_nth(), MemoizeState::lru_list, makeNode, MakeSingleTupleTableSlot(), MemoizeState::mem_limit, MemoizeState::mem_used, MEMO_CACHE_LOOKUP, MemoizeState::mstatus, MemoizeState::nkeys, Memoize::numKeys, outerPlan, outerPlanState, palloc(), MemoizeState::param_exprs, Memoize::param_exprs, pfree(), PlanState::plan, MemoizeState::probeslot, ScanState::ps, PlanState::ps_ProjInfo, MemoizeState::singlerow, Memoize::singlerow, MemoizeState::ss, PlanState::state, MemoizeState::stats, MemoizeState::tableContext, MemoizeState::tableslot, TTSOpsMinimalTuple, and TTSOpsVirtual.

Referenced by ExecInitNode().

◆ ExecMemoize()

static TupleTableSlot * ExecMemoize ( PlanState pstate)
static

Definition at line 697 of file nodeMemoize.c.

699{
700 MemoizeState *node = castNode(MemoizeState, pstate);
701 ExprContext *econtext = node->ss.ps.ps_ExprContext;
702 PlanState *outerNode;
703 TupleTableSlot *slot;
704
706
707 /*
708 * Reset per-tuple memory context to free any expression evaluation
709 * storage allocated in the previous tuple cycle.
710 */
711 ResetExprContext(econtext);
712
713 switch (node->mstatus)
714 {
716 {
717 MemoizeEntry *entry;
718 TupleTableSlot *outerslot;
719 bool found;
720
721 Assert(node->entry == NULL);
722
723 /* first call? we'll need a hash table. */
724 if (unlikely(node->hashtable == NULL))
725 build_hash_table(node, ((Memoize *) pstate->plan)->est_entries);
726
727 /*
728 * We're only ever in this state for the first call of the
729 * scan. Here we have a look to see if we've already seen the
730 * current parameters before and if we have already cached a
731 * complete set of records that the outer plan will return for
732 * these parameters.
733 *
734 * When we find a valid cache entry, we'll return the first
735 * tuple from it. If not found, we'll create a cache entry and
736 * then try to fetch a tuple from the outer scan. If we find
737 * one there, we'll try to cache it.
738 */
739
740 /* see if we've got anything cached for the current parameters */
741 entry = cache_lookup(node, &found);
742
743 if (found && entry->complete)
744 {
745 node->stats.cache_hits += 1; /* stats update */
746
747 /*
748 * Set last_tuple and entry so that the state
749 * MEMO_CACHE_FETCH_NEXT_TUPLE can easily find the next
750 * tuple for these parameters.
751 */
752 node->last_tuple = entry->tuplehead;
753 node->entry = entry;
754
755 /* Fetch the first cached tuple, if there is one */
756 if (entry->tuplehead)
757 {
759
760 slot = node->ss.ps.ps_ResultTupleSlot;
762 slot, false);
763
764 return slot;
765 }
766
767 /* The cache entry is void of any tuples. */
769 return NULL;
770 }
771
772 /* Handle cache miss */
773 node->stats.cache_misses += 1; /* stats update */
774
775 if (found)
776 {
777 /*
778 * A cache entry was found, but the scan for that entry
779 * did not run to completion. We'll just remove all
780 * tuples and start again. It might be tempting to
781 * continue where we left off, but there's no guarantee
782 * the outer node will produce the tuples in the same
783 * order as it did last time.
784 */
785 entry_purge_tuples(node, entry);
786 }
787
788 /* Scan the outer node for a tuple to cache */
789 outerNode = outerPlanState(node);
790 outerslot = ExecProcNode(outerNode);
791 if (TupIsNull(outerslot))
792 {
793 /*
794 * cache_lookup may have returned NULL due to failure to
795 * free enough cache space, so ensure we don't do anything
796 * here that assumes it worked. There's no need to go into
797 * bypass mode here as we're setting mstatus to end of
798 * scan.
799 */
800 if (likely(entry))
801 entry->complete = true;
802
804 return NULL;
805 }
806
807 node->entry = entry;
808
809 /*
810 * If we failed to create the entry or failed to store the
811 * tuple in the entry, then go into bypass mode.
812 */
813 if (unlikely(entry == NULL ||
814 !cache_store_tuple(node, outerslot)))
815 {
816 node->stats.cache_overflows += 1; /* stats update */
817
819
820 /*
821 * No need to clear out last_tuple as we'll stay in bypass
822 * mode until the end of the scan.
823 */
824 }
825 else
826 {
827 /*
828 * If we only expect a single row from this scan then we
829 * can mark that we're not expecting more. This allows
830 * cache lookups to work even when the scan has not been
831 * executed to completion.
832 */
833 entry->complete = node->singlerow;
835 }
836
837 slot = node->ss.ps.ps_ResultTupleSlot;
838 ExecCopySlot(slot, outerslot);
839 return slot;
840 }
841
843 {
844 /* We shouldn't be in this state if these are not set */
845 Assert(node->entry != NULL);
846 Assert(node->last_tuple != NULL);
847
848 /* Skip to the next tuple to output */
849 node->last_tuple = node->last_tuple->next;
850
851 /* No more tuples in the cache */
852 if (node->last_tuple == NULL)
853 {
855 return NULL;
856 }
857
858 slot = node->ss.ps.ps_ResultTupleSlot;
860 false);
861
862 return slot;
863 }
864
866 {
867 TupleTableSlot *outerslot;
868 MemoizeEntry *entry = node->entry;
869
870 /* entry should already have been set by MEMO_CACHE_LOOKUP */
871 Assert(entry != NULL);
872
873 /*
874 * When in the MEMO_FILLING_CACHE state, we've just had a
875 * cache miss and are populating the cache with the current
876 * scan tuples.
877 */
878 outerNode = outerPlanState(node);
879 outerslot = ExecProcNode(outerNode);
880 if (TupIsNull(outerslot))
881 {
882 /* No more tuples. Mark it as complete */
883 entry->complete = true;
885 return NULL;
886 }
887
888 /*
889 * Validate if the planner properly set the singlerow flag. It
890 * should only set that if each cache entry can, at most,
891 * return 1 row.
892 */
893 if (unlikely(entry->complete))
894 elog(ERROR, "cache entry already complete");
895
896 /* Record the tuple in the current cache entry */
897 if (unlikely(!cache_store_tuple(node, outerslot)))
898 {
899 /* Couldn't store it? Handle overflow */
900 node->stats.cache_overflows += 1; /* stats update */
901
903
904 /*
905 * No need to clear out entry or last_tuple as we'll stay
906 * in bypass mode until the end of the scan.
907 */
908 }
909
910 slot = node->ss.ps.ps_ResultTupleSlot;
911 ExecCopySlot(slot, outerslot);
912 return slot;
913 }
914
916 {
917 TupleTableSlot *outerslot;
918
919 /*
920 * When in bypass mode we just continue to read tuples without
921 * caching. We need to wait until the next rescan before we
922 * can come out of this mode.
923 */
924 outerNode = outerPlanState(node);
925 outerslot = ExecProcNode(outerNode);
926 if (TupIsNull(outerslot))
927 {
929 return NULL;
930 }
931
932 slot = node->ss.ps.ps_ResultTupleSlot;
933 ExecCopySlot(slot, outerslot);
934 return slot;
935 }
936
937 case MEMO_END_OF_SCAN:
938
939 /*
940 * We've already returned NULL for this scan, but just in case
941 * something calls us again by mistake.
942 */
943 return NULL;
944
945 default:
946 elog(ERROR, "unrecognized memoize state: %d",
947 (int) node->mstatus);
948 return NULL;
949 } /* switch */
#define likely(x)
Definition: c.h:405
TupleTableSlot * ExecStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot, bool shouldFree)
Definition: execTuples.c:1635
#define ResetExprContext(econtext)
Definition: executor.h:647
static TupleTableSlot * ExecProcNode(PlanState *node)
Definition: executor.h:311
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:122
#define MEMO_CACHE_FETCH_NEXT_TUPLE
Definition: nodeMemoize.c:80
static bool cache_store_tuple(MemoizeState *mstate, TupleTableSlot *slot)
Definition: nodeMemoize.c:625
#define MEMO_CACHE_BYPASS_MODE
Definition: nodeMemoize.c:82
#define MEMO_END_OF_SCAN
Definition: nodeMemoize.c:83
static MemoizeEntry * cache_lookup(MemoizeState *mstate, bool *found)
Definition: nodeMemoize.c:528
#define MEMO_FILLING_CACHE
Definition: nodeMemoize.c:81
static void entry_purge_tuples(MemoizeState *mstate, MemoizeEntry *entry)
Definition: nodeMemoize.c:344
static void build_hash_table(MemoizeState *mstate, uint32 size)
Definition: nodeMemoize.c:283
#define castNode(_type_, nodeptr)
Definition: nodes.h:182
ExprContext * ps_ExprContext
Definition: execnodes.h:1204
TupleTableSlot * ps_ResultTupleSlot
Definition: execnodes.h:1203
#define TupIsNull(slot)
Definition: tuptable.h:309
static TupleTableSlot * ExecCopySlot(TupleTableSlot *dstslot, TupleTableSlot *srcslot)
Definition: tuptable.h:524

References Assert(), build_hash_table(), MemoizeInstrumentation::cache_hits, cache_lookup(), MemoizeInstrumentation::cache_misses, MemoizeInstrumentation::cache_overflows, cache_store_tuple(), castNode, CHECK_FOR_INTERRUPTS, MemoizeEntry::complete, elog, MemoizeState::entry, entry_purge_tuples(), ERROR, ExecCopySlot(), ExecProcNode(), ExecStoreMinimalTuple(), MemoizeState::hashtable, MemoizeState::last_tuple, likely, MEMO_CACHE_BYPASS_MODE, MEMO_CACHE_FETCH_NEXT_TUPLE, MEMO_CACHE_LOOKUP, MEMO_END_OF_SCAN, MEMO_FILLING_CACHE, MemoizeTuple::mintuple, MemoizeState::mstatus, MemoizeTuple::next, outerPlanState, PlanState::plan, ScanState::ps, PlanState::ps_ExprContext, PlanState::ps_ResultTupleSlot, ResetExprContext, MemoizeState::singlerow, MemoizeState::ss, MemoizeState::stats, TupIsNull, MemoizeEntry::tuplehead, and unlikely.

Referenced by ExecInitMemoize().

◆ ExecMemoizeEstimate()

void ExecMemoizeEstimate ( MemoizeState node,
ParallelContext pcxt 
)

Definition at line 1190 of file nodeMemoize.c.

1192{
1193 Size size;
1194
1195 /* don't need this if not instrumenting or no workers */
1196 if (!node->ss.ps.instrument || pcxt->nworkers == 0)
1197 return;
1198
1199 size = mul_size(pcxt->nworkers, sizeof(MemoizeInstrumentation));
1200 size = add_size(size, offsetof(SharedMemoizeInfo, sinstrument));
1201 shm_toc_estimate_chunk(&pcxt->estimator, size);
size_t Size
Definition: c.h:614
#define shm_toc_estimate_chunk(e, sz)
Definition: shm_toc.h:51
#define shm_toc_estimate_keys(e, cnt)
Definition: shm_toc.h:53
Size add_size(Size s1, Size s2)
Definition: shmem.c:493
Size mul_size(Size s1, Size s2)
Definition: shmem.c:510
shm_toc_estimator estimator
Definition: parallel.h:41
Instrumentation * instrument
Definition: execnodes.h:1175

References add_size(), ParallelContext::estimator, PlanState::instrument, mul_size(), ParallelContext::nworkers, ScanState::ps, shm_toc_estimate_chunk, shm_toc_estimate_keys, and MemoizeState::ss.

Referenced by ExecParallelEstimate().

◆ ExecMemoizeInitializeDSM()

void ExecMemoizeInitializeDSM ( MemoizeState node,
ParallelContext pcxt 
)

Definition at line 1211 of file nodeMemoize.c.

1213{
1214 Size size;
1215
1216 /* don't need this if not instrumenting or no workers */
1217 if (!node->ss.ps.instrument || pcxt->nworkers == 0)
1218 return;
1219
1220 size = offsetof(SharedMemoizeInfo, sinstrument)
1221 + pcxt->nworkers * sizeof(MemoizeInstrumentation);
1222 node->shared_info = shm_toc_allocate(pcxt->toc, size);
1223 /* ensure any unfilled slots will contain zeroes */
1224 memset(node->shared_info, 0, size);
1225 node->shared_info->num_workers = pcxt->nworkers;
1226 shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id,
1227 node->shared_info);
struct MemoizeInstrumentation MemoizeInstrumentation
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
Definition: shm_toc.c:88
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
Definition: shm_toc.c:171
shm_toc * toc
Definition: parallel.h:44
int plan_node_id
Definition: plannodes.h:227

References PlanState::instrument, SharedMemoizeInfo::num_workers, ParallelContext::nworkers, PlanState::plan, Plan::plan_node_id, ScanState::ps, MemoizeState::shared_info, shm_toc_allocate(), shm_toc_insert(), MemoizeState::ss, and ParallelContext::toc.

Referenced by ExecParallelInitializeDSM().

◆ ExecMemoizeInitializeWorker()

void ExecMemoizeInitializeWorker ( MemoizeState node,
ParallelWorkerContext pwcxt 
)

Definition at line 1236 of file nodeMemoize.c.

1238{
1239 node->shared_info =
1240 shm_toc_lookup(pwcxt->toc, node->ss.ps.plan->plan_node_id, true);
void * shm_toc_lookup(shm_toc *toc, uint64 key, bool noError)
Definition: shm_toc.c:232

References PlanState::plan, Plan::plan_node_id, ScanState::ps, MemoizeState::shared_info, shm_toc_lookup(), MemoizeState::ss, and ParallelWorkerContext::toc.

Referenced by ExecParallelInitializeWorker().

◆ ExecMemoizeRetrieveInstrumentation()

void ExecMemoizeRetrieveInstrumentation ( MemoizeState node)

Definition at line 1249 of file nodeMemoize.c.

1251{
1252 Size size;
1254
1255 if (node->shared_info == NULL)
1256 return;
1257
1258 size = offsetof(SharedMemoizeInfo, sinstrument)
1260 si = palloc(size);
1261 memcpy(si, node->shared_info, size);
1262 node->shared_info = si;

References SharedMemoizeInfo::num_workers, palloc(), and MemoizeState::shared_info.

Referenced by ExecParallelRetrieveInstrumentation().

◆ ExecReScanMemoize()

void ExecReScanMemoize ( MemoizeState node)

Definition at line 1140 of file nodeMemoize.c.

1142{
1144
1145 /* Mark that we must lookup the cache for a new set of parameters */
1146 node->mstatus = MEMO_CACHE_LOOKUP;
1147
1148 /* nullify pointers used for the last scan */
1149 node->entry = NULL;
1150 node->last_tuple = NULL;
1151
1152 /*
1153 * if chgParam of subnode is not null then plan will be re-scanned by
1154 * first ExecProcNode.
1155 */
1156 if (outerPlan->chgParam == NULL)
1158
1159 /*
1160 * Purge the entire cache if a parameter changed that is not part of the
1161 * cache key.
1162 */
1163 if (bms_nonempty_difference(outerPlan->chgParam, node->keyparamids))
1164 cache_purge_all(node);
bool bms_nonempty_difference(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:641
void ExecReScan(PlanState *node)
Definition: execAmi.c:77
static void cache_purge_all(MemoizeState *mstate)
Definition: nodeMemoize.c:402

References bms_nonempty_difference(), cache_purge_all(), MemoizeState::entry, ExecReScan(), MemoizeState::keyparamids, MemoizeState::last_tuple, MEMO_CACHE_LOOKUP, MemoizeState::mstatus, outerPlan, and outerPlanState.

Referenced by ExecReScan().

◆ MemoizeHash_equal()

static bool MemoizeHash_equal ( struct memoize_hash *  tb,
const MemoizeKey key1,
const MemoizeKey key2 
)
static

Definition at line 221 of file nodeMemoize.c.

224{
225 MemoizeState *mstate = (MemoizeState *) tb->private_data;
226 ExprContext *econtext = mstate->ss.ps.ps_ExprContext;
227 TupleTableSlot *tslot = mstate->tableslot;
228 TupleTableSlot *pslot = mstate->probeslot;
229
230 /* probeslot should have already been prepared by prepare_probe_slot() */
231 ExecStoreMinimalTuple(key1->params, tslot, false);
232
233 if (mstate->binary_mode)
234 {
235 MemoryContext oldcontext;
236 int numkeys = mstate->nkeys;
237 bool match = true;
238
239 oldcontext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
240
241 slot_getallattrs(tslot);
242 slot_getallattrs(pslot);
243
244 for (int i = 0; i < numkeys; i++)
245 {
246 CompactAttribute *attr;
247
248 if (tslot->tts_isnull[i] != pslot->tts_isnull[i])
249 {
250 match = false;
251 break;
252 }
253
254 /* both NULL? they're equal */
255 if (tslot->tts_isnull[i])
256 continue;
257
258 /* perform binary comparison on the two datums */
259 attr = TupleDescCompactAttr(tslot->tts_tupleDescriptor, i);
260 if (!datum_image_eq(tslot->tts_values[i], pslot->tts_values[i],
261 attr->attbyval, attr->attlen))
262 {
263 match = false;
264 break;
265 }
266 }
267
268 MemoryContextSwitchTo(oldcontext);
269 return match;
270 }
271 else
272 {
273 econtext->ecxt_innertuple = tslot;
274 econtext->ecxt_outertuple = pslot;
275 return ExecQual(mstate->cache_eq_expr, econtext);
276 }
bool datum_image_eq(Datum value1, Datum value2, bool typByVal, int typLen)
Definition: datum.c:266
static bool ExecQual(ExprState *state, ExprContext *econtext)
Definition: executor.h:516
int16 attlen
Definition: tupdesc.h:71
MinimalTuple params
Definition: nodeMemoize.c:107
static CompactAttribute * TupleDescCompactAttr(TupleDesc tupdesc, int i)
Definition: tupdesc.h:175
static void slot_getallattrs(TupleTableSlot *slot)
Definition: tuptable.h:371

References CompactAttribute::attbyval, CompactAttribute::attlen, MemoizeState::binary_mode, MemoizeState::cache_eq_expr, datum_image_eq(), ExecQual(), ExecStoreMinimalTuple(), i, MemoryContextSwitchTo(), MemoizeState::nkeys, MemoizeKey::params, MemoizeState::probeslot, ScanState::ps, PlanState::ps_ExprContext, slot_getallattrs(), MemoizeState::ss, MemoizeState::tableslot, and TupleDescCompactAttr().

◆ MemoizeHash_hash()

static uint32 MemoizeHash_hash ( struct memoize_hash *  tb,
const MemoizeKey key 
)
static

Definition at line 158 of file nodeMemoize.c.

160{
161 MemoizeState *mstate = (MemoizeState *) tb->private_data;
162 ExprContext *econtext = mstate->ss.ps.ps_ExprContext;
163 MemoryContext oldcontext;
164 TupleTableSlot *pslot = mstate->probeslot;
165 uint32 hashkey = 0;
166 int numkeys = mstate->nkeys;
167
168 oldcontext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
169
170 if (mstate->binary_mode)
171 {
172 for (int i = 0; i < numkeys; i++)
173 {
174 /* combine successive hashkeys by rotating */
175 hashkey = pg_rotate_left32(hashkey, 1);
176
177 if (!pslot->tts_isnull[i]) /* treat nulls as having hash key 0 */
178 {
179 CompactAttribute *attr;
180 uint32 hkey;
181
182 attr = TupleDescCompactAttr(pslot->tts_tupleDescriptor, i);
183
184 hkey = datum_image_hash(pslot->tts_values[i], attr->attbyval, attr->attlen);
185
186 hashkey ^= hkey;
187 }
188 }
189 }
190 else
191 {
192 FmgrInfo *hashfunctions = mstate->hashfunctions;
193 Oid *collations = mstate->collations;
194
195 for (int i = 0; i < numkeys; i++)
196 {
197 /* combine successive hashkeys by rotating */
198 hashkey = pg_rotate_left32(hashkey, 1);
199
200 if (!pslot->tts_isnull[i]) /* treat nulls as having hash key 0 */
201 {
202 uint32 hkey;
203
204 hkey = DatumGetUInt32(FunctionCall1Coll(&hashfunctions[i],
205 collations[i], pslot->tts_values[i]));
206 hashkey ^= hkey;
207 }
208 }
209 }
210
211 MemoryContextSwitchTo(oldcontext);
212 return murmurhash32(hashkey);
uint32_t uint32
Definition: c.h:542
uint32 datum_image_hash(Datum value, bool typByVal, int typLen)
Definition: datum.c:338
Datum FunctionCall1Coll(FmgrInfo *flinfo, Oid collation, Datum arg1)
Definition: fmgr.c:1130
static uint32 murmurhash32(uint32 data)
Definition: hashfn.h:92
static uint32 pg_rotate_left32(uint32 word, int n)
Definition: pg_bitutils.h:428
static uint32 DatumGetUInt32(Datum X)
Definition: postgres.h:232

References CompactAttribute::attbyval, CompactAttribute::attlen, MemoizeState::binary_mode, MemoizeState::collations, datum_image_hash(), DatumGetUInt32(), FunctionCall1Coll(), MemoizeState::hashfunctions, i, MemoryContextSwitchTo(), murmurhash32(), MemoizeState::nkeys, pg_rotate_left32(), MemoizeState::probeslot, ScanState::ps, PlanState::ps_ExprContext, MemoizeState::ss, and TupleDescCompactAttr().

◆ prepare_probe_slot()

static void prepare_probe_slot ( MemoizeState mstate,
MemoizeKey key 
)
inlinestatic

Definition at line 302 of file nodeMemoize.c.

304{
305 TupleTableSlot *pslot = mstate->probeslot;
306 TupleTableSlot *tslot = mstate->tableslot;
307 int numKeys = mstate->nkeys;
308
309 ExecClearTuple(pslot);
310
311 if (key == NULL)
312 {
313 ExprContext *econtext = mstate->ss.ps.ps_ExprContext;
314 MemoryContext oldcontext;
315
316 oldcontext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
317
318 /* Set the probeslot's values based on the current parameter values */
319 for (int i = 0; i < numKeys; i++)
320 pslot->tts_values[i] = ExecEvalExpr(mstate->param_exprs[i],
321 econtext,
322 &pslot->tts_isnull[i]);
323
324 MemoryContextSwitchTo(oldcontext);
325 }
326 else
327 {
328 /* Process the key's MinimalTuple and store the values in probeslot */
329 ExecStoreMinimalTuple(key->params, tslot, false);
330 slot_getallattrs(tslot);
331 memcpy(pslot->tts_values, tslot->tts_values, sizeof(Datum) * numKeys);
332 memcpy(pslot->tts_isnull, tslot->tts_isnull, sizeof(bool) * numKeys);
333 }
334
TupleTableSlot * ExecStoreVirtualTuple(TupleTableSlot *slot)
Definition: execTuples.c:1741
static Datum ExecEvalExpr(ExprState *state, ExprContext *econtext, bool *isNull)
Definition: executor.h:390
uint64_t Datum
Definition: postgres.h:70
MemoryContext ecxt_per_tuple_memory
Definition: execnodes.h:281
bool * tts_isnull
Definition: tuptable.h:126
Datum * tts_values
Definition: tuptable.h:124
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:457

References ExprContext::ecxt_per_tuple_memory, ExecClearTuple(), ExecEvalExpr(), ExecStoreMinimalTuple(), ExecStoreVirtualTuple(), i, sort-test::key, MemoryContextSwitchTo(), MemoizeState::nkeys, MemoizeState::param_exprs, MemoizeState::probeslot, ScanState::ps, PlanState::ps_ExprContext, slot_getallattrs(), MemoizeState::ss, MemoizeState::tableslot, TupleTableSlot::tts_isnull, and TupleTableSlot::tts_values.

Referenced by cache_lookup(), cache_reduce_memory(), and cache_store_tuple().

◆ remove_cache_entry()

static void remove_cache_entry ( MemoizeState mstate,
MemoizeEntry entry 
)
static

Definition at line 374 of file nodeMemoize.c.

376{
377 MemoizeKey *key = entry->key;
378
379 dlist_delete(&entry->key->lru_node);
380
381 /* Remove all of the tuples from this entry */
382 entry_purge_tuples(mstate, entry);
383
384 /*
385 * Update memory accounting. entry_purge_tuples should have already
386 * subtracted the memory used for each cached tuple. Here we just update
387 * the amount used by the entry itself.
388 */
389 mstate->mem_used -= EMPTY_ENTRY_MEMORY_BYTES(entry);
390
391 /* Remove the entry from the cache */
392 memoize_delete_item(mstate->hashtable, entry);
393
394 pfree(key->params);
395 pfree(key);
static void dlist_delete(dlist_node *node)
Definition: ilist.h:405

References dlist_delete(), EMPTY_ENTRY_MEMORY_BYTES, entry_purge_tuples(), MemoizeState::hashtable, MemoizeEntry::key, sort-test::key, MemoizeKey::lru_node, MemoizeState::mem_used, and pfree().

Referenced by cache_reduce_memory().