PostgreSQL Source Code  git master
nodeMemoize.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * nodeMemoize.c
4  * Routines to handle caching of results from parameterized nodes
5  *
6  * Portions Copyright (c) 2021-2022, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/executor/nodeMemoize.c
12  *
13  * Memoize nodes are intended to sit above parameterized nodes in the plan
14  * tree in order to cache results from them. The intention here is that a
15  * repeat scan with a parameter value that has already been seen by the node
16  * can fetch tuples from the cache rather than having to re-scan the outer
17  * node all over again. The query planner may choose to make use of one of
18  * these when it thinks rescans for previously seen values are likely enough
19  * to warrant adding the additional node.
20  *
21  * The method of cache we use is a hash table. When the cache fills, we never
22  * spill tuples to disk, instead, we choose to evict the least recently used
23  * cache entry from the cache. We remember the least recently used entry by
24  * always pushing new entries and entries we look for onto the tail of a
25  * doubly linked list. This means that older items always bubble to the top
26  * of this LRU list.
27  *
28  * Sometimes our callers won't run their scans to completion. For example a
29  * semi-join only needs to run until it finds a matching tuple, and once it
30  * does, the join operator skips to the next outer tuple and does not execute
31  * the inner side again on that scan. Because of this, we must keep track of
32  * when a cache entry is complete, and by default, we know it is when we run
33  * out of tuples to read during the scan. However, there are cases where we
34  * can mark the cache entry as complete without exhausting the scan of all
35  * tuples. One case is unique joins, where the join operator knows that there
36  * will only be at most one match for any given outer tuple. In order to
37  * support such cases we allow the "singlerow" option to be set for the cache.
38  * This option marks the cache entry as complete after we read the first tuple
39  * from the subnode.
40  *
41  * It's possible when we're filling the cache for a given set of parameters
42  * that we're unable to free enough memory to store any more tuples. If this
43  * happens then we'll have already evicted all other cache entries. When
44  * caching another tuple would cause us to exceed our memory budget, we must
45  * free the entry that we're currently populating and move the state machine
46  * into MEMO_CACHE_BYPASS_MODE. This means that we'll not attempt to cache
47  * any further tuples for this particular scan. We don't have the memory for
48  * it. The state machine will be reset again on the next rescan. If the
49  * memory requirements to cache the next parameter's tuples are less
50  * demanding, then that may allow us to start putting useful entries back into
51  * the cache again.
52  *
53  *
54  * INTERFACE ROUTINES
55  * ExecMemoize - lookup cache, exec subplan when not found
56  * ExecInitMemoize - initialize node and subnodes
57  * ExecEndMemoize - shutdown node and subnodes
58  * ExecReScanMemoize - rescan the memoize node
59  *
60  * ExecMemoizeEstimate estimates DSM space needed for parallel plan
61  * ExecMemoizeInitializeDSM initialize DSM for parallel plan
62  * ExecMemoizeInitializeWorker attach to DSM info in parallel worker
63  * ExecMemoizeRetrieveInstrumentation get instrumentation from worker
64  *-------------------------------------------------------------------------
65  */
66 
67 #include "postgres.h"
68 
69 #include "common/hashfn.h"
70 #include "executor/executor.h"
71 #include "executor/nodeMemoize.h"
72 #include "lib/ilist.h"
73 #include "miscadmin.h"
74 #include "utils/datum.h"
75 #include "utils/lsyscache.h"
76 
77 /* States of the ExecMemoize state machine */
78 #define MEMO_CACHE_LOOKUP 1 /* Attempt to perform a cache lookup */
79 #define MEMO_CACHE_FETCH_NEXT_TUPLE 2 /* Get another tuple from the cache */
80 #define MEMO_FILLING_CACHE 3 /* Read outer node to fill cache */
81 #define MEMO_CACHE_BYPASS_MODE 4 /* Bypass mode. Just read from our
82  * subplan without caching anything */
83 #define MEMO_END_OF_SCAN 5 /* Ready for rescan */
84 
85 
86 /* Helper macros for memory accounting */
87 #define EMPTY_ENTRY_MEMORY_BYTES(e) (sizeof(MemoizeEntry) + \
88  sizeof(MemoizeKey) + \
89  (e)->key->params->t_len);
90 #define CACHE_TUPLE_BYTES(t) (sizeof(MemoizeTuple) + \
91  (t)->mintuple->t_len)
92 
93  /* MemoizeTuple Stores an individually cached tuple */
94 typedef struct MemoizeTuple
95 {
96  MinimalTuple mintuple; /* Cached tuple */
97  struct MemoizeTuple *next; /* The next tuple with the same parameter
98  * values or NULL if it's the last one */
99 } MemoizeTuple;
100 
101 /*
102  * MemoizeKey
103  * The hash table key for cached entries plus the LRU list link
104  */
105 typedef struct MemoizeKey
106 {
108  dlist_node lru_node; /* Pointer to next/prev key in LRU list */
109 } MemoizeKey;
110 
111 /*
112  * MemoizeEntry
113  * The data struct that the cache hash table stores
114  */
115 typedef struct MemoizeEntry
116 {
117  MemoizeKey *key; /* Hash key for hash table lookups */
118  MemoizeTuple *tuplehead; /* Pointer to the first tuple or NULL if no
119  * tuples are cached for this entry */
120  uint32 hash; /* Hash value (cached) */
121  char status; /* Hash status */
122  bool complete; /* Did we read the outer plan to completion? */
123 } MemoizeEntry;
124 
125 
126 #define SH_PREFIX memoize
127 #define SH_ELEMENT_TYPE MemoizeEntry
128 #define SH_KEY_TYPE MemoizeKey *
129 #define SH_SCOPE static inline
130 #define SH_DECLARE
131 #include "lib/simplehash.h"
132 
133 static uint32 MemoizeHash_hash(struct memoize_hash *tb,
134  const MemoizeKey *key);
135 static bool MemoizeHash_equal(struct memoize_hash *tb,
136  const MemoizeKey *key1,
137  const MemoizeKey *key2);
138 
139 #define SH_PREFIX memoize
140 #define SH_ELEMENT_TYPE MemoizeEntry
141 #define SH_KEY_TYPE MemoizeKey *
142 #define SH_KEY key
143 #define SH_HASH_KEY(tb, key) MemoizeHash_hash(tb, key)
144 #define SH_EQUAL(tb, a, b) MemoizeHash_equal(tb, a, b)
145 #define SH_SCOPE static inline
146 #define SH_STORE_HASH
147 #define SH_GET_HASH(tb, a) a->hash
148 #define SH_DEFINE
149 #include "lib/simplehash.h"
150 
151 /*
152  * MemoizeHash_hash
153  * Hash function for simplehash hashtable. 'key' is unused here as we
154  * require that all table lookups first populate the MemoizeState's
155  * probeslot with the key values to be looked up.
156  */
157 static uint32
158 MemoizeHash_hash(struct memoize_hash *tb, const MemoizeKey *key)
159 {
160  MemoizeState *mstate = (MemoizeState *) tb->private_data;
161  TupleTableSlot *pslot = mstate->probeslot;
162  uint32 hashkey = 0;
163  int numkeys = mstate->nkeys;
164 
165  if (mstate->binary_mode)
166  {
167  for (int i = 0; i < numkeys; i++)
168  {
169  /* combine successive hashkeys by rotating */
170  hashkey = pg_rotate_left32(hashkey, 1);
171 
172  if (!pslot->tts_isnull[i]) /* treat nulls as having hash key 0 */
173  {
174  FormData_pg_attribute *attr;
175  uint32 hkey;
176 
177  attr = &pslot->tts_tupleDescriptor->attrs[i];
178 
179  hkey = datum_image_hash(pslot->tts_values[i], attr->attbyval, attr->attlen);
180 
181  hashkey ^= hkey;
182  }
183  }
184  }
185  else
186  {
187  FmgrInfo *hashfunctions = mstate->hashfunctions;
188  Oid *collations = mstate->collations;
189 
190  for (int i = 0; i < numkeys; i++)
191  {
192  /* combine successive hashkeys by rotating */
193  hashkey = pg_rotate_left32(hashkey, 1);
194 
195  if (!pslot->tts_isnull[i]) /* treat nulls as having hash key 0 */
196  {
197  uint32 hkey;
198 
199  hkey = DatumGetUInt32(FunctionCall1Coll(&hashfunctions[i],
200  collations[i], pslot->tts_values[i]));
201  hashkey ^= hkey;
202  }
203  }
204  }
205 
206  return murmurhash32(hashkey);
207 }
208 
209 /*
210  * MemoizeHash_equal
211  * Equality function for confirming hash value matches during a hash
212  * table lookup. 'key2' is never used. Instead the MemoizeState's
213  * probeslot is always populated with details of what's being looked up.
214  */
215 static bool
216 MemoizeHash_equal(struct memoize_hash *tb, const MemoizeKey *key1,
217  const MemoizeKey *key2)
218 {
219  MemoizeState *mstate = (MemoizeState *) tb->private_data;
220  ExprContext *econtext = mstate->ss.ps.ps_ExprContext;
221  TupleTableSlot *tslot = mstate->tableslot;
222  TupleTableSlot *pslot = mstate->probeslot;
223 
224  /* probeslot should have already been prepared by prepare_probe_slot() */
225  ExecStoreMinimalTuple(key1->params, tslot, false);
226 
227  if (mstate->binary_mode)
228  {
229  int numkeys = mstate->nkeys;
230 
231  slot_getallattrs(tslot);
232  slot_getallattrs(pslot);
233 
234  for (int i = 0; i < numkeys; i++)
235  {
236  FormData_pg_attribute *attr;
237 
238  if (tslot->tts_isnull[i] != pslot->tts_isnull[i])
239  return false;
240 
241  /* both NULL? they're equal */
242  if (tslot->tts_isnull[i])
243  continue;
244 
245  /* perform binary comparison on the two datums */
246  attr = &tslot->tts_tupleDescriptor->attrs[i];
247  if (!datum_image_eq(tslot->tts_values[i], pslot->tts_values[i],
248  attr->attbyval, attr->attlen))
249  return false;
250  }
251  return true;
252  }
253  else
254  {
255  econtext->ecxt_innertuple = tslot;
256  econtext->ecxt_outertuple = pslot;
257  return ExecQualAndReset(mstate->cache_eq_expr, econtext);
258  }
259 }
260 
261 /*
262  * Initialize the hash table to empty.
263  */
264 static void
265 build_hash_table(MemoizeState *mstate, uint32 size)
266 {
267  /* Make a guess at a good size when we're not given a valid size. */
268  if (size == 0)
269  size = 1024;
270 
271  /* memoize_create will convert the size to a power of 2 */
272  mstate->hashtable = memoize_create(mstate->tableContext, size, mstate);
273 }
274 
275 /*
276  * prepare_probe_slot
277  * Populate mstate's probeslot with the values from the tuple stored
278  * in 'key'. If 'key' is NULL, then perform the population by evaluating
279  * mstate's param_exprs.
280  */
281 static inline void
283 {
284  TupleTableSlot *pslot = mstate->probeslot;
285  TupleTableSlot *tslot = mstate->tableslot;
286  int numKeys = mstate->nkeys;
287 
288  ExecClearTuple(pslot);
289 
290  if (key == NULL)
291  {
292  /* Set the probeslot's values based on the current parameter values */
293  for (int i = 0; i < numKeys; i++)
294  pslot->tts_values[i] = ExecEvalExpr(mstate->param_exprs[i],
295  mstate->ss.ps.ps_ExprContext,
296  &pslot->tts_isnull[i]);
297  }
298  else
299  {
300  /* Process the key's MinimalTuple and store the values in probeslot */
301  ExecStoreMinimalTuple(key->params, tslot, false);
302  slot_getallattrs(tslot);
303  memcpy(pslot->tts_values, tslot->tts_values, sizeof(Datum) * numKeys);
304  memcpy(pslot->tts_isnull, tslot->tts_isnull, sizeof(bool) * numKeys);
305  }
306 
307  ExecStoreVirtualTuple(pslot);
308 }
309 
310 /*
311  * entry_purge_tuples
312  * Remove all tuples from the cache entry pointed to by 'entry'. This
313  * leaves an empty cache entry. Also, update the memory accounting to
314  * reflect the removal of the tuples.
315  */
316 static inline void
318 {
319  MemoizeTuple *tuple = entry->tuplehead;
320  uint64 freed_mem = 0;
321 
322  while (tuple != NULL)
323  {
324  MemoizeTuple *next = tuple->next;
325 
326  freed_mem += CACHE_TUPLE_BYTES(tuple);
327 
328  /* Free memory used for this tuple */
329  pfree(tuple->mintuple);
330  pfree(tuple);
331 
332  tuple = next;
333  }
334 
335  entry->complete = false;
336  entry->tuplehead = NULL;
337 
338  /* Update the memory accounting */
339  mstate->mem_used -= freed_mem;
340 }
341 
342 /*
343  * remove_cache_entry
344  * Remove 'entry' from the cache and free memory used by it.
345  */
346 static void
348 {
349  MemoizeKey *key = entry->key;
350 
351  dlist_delete(&entry->key->lru_node);
352 
353  /* Remove all of the tuples from this entry */
354  entry_purge_tuples(mstate, entry);
355 
356  /*
357  * Update memory accounting. entry_purge_tuples should have already
358  * subtracted the memory used for each cached tuple. Here we just update
359  * the amount used by the entry itself.
360  */
361  mstate->mem_used -= EMPTY_ENTRY_MEMORY_BYTES(entry);
362 
363  /* Remove the entry from the cache */
364  memoize_delete_item(mstate->hashtable, entry);
365 
366  pfree(key->params);
367  pfree(key);
368 }
369 
370 /*
371  * cache_purge_all
372  * Remove all items from the cache
373  */
374 static void
376 {
377  uint64 evictions = mstate->hashtable->members;
378  PlanState *pstate = (PlanState *) mstate;
379 
380  /*
381  * Likely the most efficient way to remove all items is to just reset the
382  * memory context for the cache and then rebuild a fresh hash table. This
383  * saves having to remove each item one by one and pfree each cached tuple
384  */
386 
387  /* Make the hash table the same size as the original size */
388  build_hash_table(mstate, ((Memoize *) pstate->plan)->est_entries);
389 
390  /* reset the LRU list */
391  dlist_init(&mstate->lru_list);
392  mstate->last_tuple = NULL;
393  mstate->entry = NULL;
394 
395  mstate->mem_used = 0;
396 
397  /* XXX should we add something new to track these purges? */
398  mstate->stats.cache_evictions += evictions; /* Update Stats */
399 }
400 
401 /*
402  * cache_reduce_memory
403  * Evict older and less recently used items from the cache in order to
404  * reduce the memory consumption back to something below the
405  * MemoizeState's mem_limit.
406  *
407  * 'specialkey', if not NULL, causes the function to return false if the entry
408  * which the key belongs to is removed from the cache.
409  */
410 static bool
411 cache_reduce_memory(MemoizeState *mstate, MemoizeKey *specialkey)
412 {
413  bool specialkey_intact = true; /* for now */
414  dlist_mutable_iter iter;
415  uint64 evictions = 0;
416 
417  /* Update peak memory usage */
418  if (mstate->mem_used > mstate->stats.mem_peak)
419  mstate->stats.mem_peak = mstate->mem_used;
420 
421  /* We expect only to be called when we've gone over budget on memory */
422  Assert(mstate->mem_used > mstate->mem_limit);
423 
424  /* Start the eviction process starting at the head of the LRU list. */
425  dlist_foreach_modify(iter, &mstate->lru_list)
426  {
427  MemoizeKey *key = dlist_container(MemoizeKey, lru_node, iter.cur);
428  MemoizeEntry *entry;
429 
430  /*
431  * Populate the hash probe slot in preparation for looking up this LRU
432  * entry.
433  */
434  prepare_probe_slot(mstate, key);
435 
436  /*
437  * Ideally the LRU list pointers would be stored in the entry itself
438  * rather than in the key. Unfortunately, we can't do that as the
439  * simplehash.h code may resize the table and allocate new memory for
440  * entries which would result in those pointers pointing to the old
441  * buckets. However, it's fine to use the key to store this as that's
442  * only referenced by a pointer in the entry, which of course follows
443  * the entry whenever the hash table is resized. Since we only have a
444  * pointer to the key here, we must perform a hash table lookup to
445  * find the entry that the key belongs to.
446  */
447  entry = memoize_lookup(mstate->hashtable, NULL);
448 
449  /*
450  * Sanity check that we found the entry belonging to the LRU list
451  * item. A misbehaving hash or equality function could cause the
452  * entry not to be found or the wrong entry to be found.
453  */
454  if (unlikely(entry == NULL || entry->key != key))
455  elog(ERROR, "could not find memoization table entry");
456 
457  /*
458  * If we're being called to free memory while the cache is being
459  * populated with new tuples, then we'd better take some care as we
460  * could end up freeing the entry which 'specialkey' belongs to.
461  * Generally callers will pass 'specialkey' as the key for the cache
462  * entry which is currently being populated, so we must set
463  * 'specialkey_intact' to false to inform the caller the specialkey
464  * entry has been removed.
465  */
466  if (key == specialkey)
467  specialkey_intact = false;
468 
469  /*
470  * Finally remove the entry. This will remove from the LRU list too.
471  */
472  remove_cache_entry(mstate, entry);
473 
474  evictions++;
475 
476  /* Exit if we've freed enough memory */
477  if (mstate->mem_used <= mstate->mem_limit)
478  break;
479  }
480 
481  mstate->stats.cache_evictions += evictions; /* Update Stats */
482 
483  return specialkey_intact;
484 }
485 
486 /*
487  * cache_lookup
488  * Perform a lookup to see if we've already cached tuples based on the
489  * scan's current parameters. If we find an existing entry we move it to
490  * the end of the LRU list, set *found to true then return it. If we
491  * don't find an entry then we create a new one and add it to the end of
492  * the LRU list. We also update cache memory accounting and remove older
493  * entries if we go over the memory budget. If we managed to free enough
494  * memory we return the new entry, else we return NULL.
495  *
496  * Callers can assume we'll never return NULL when *found is true.
497  */
498 static MemoizeEntry *
499 cache_lookup(MemoizeState *mstate, bool *found)
500 {
501  MemoizeKey *key;
502  MemoizeEntry *entry;
503  MemoryContext oldcontext;
504 
505  /* prepare the probe slot with the current scan parameters */
506  prepare_probe_slot(mstate, NULL);
507 
508  /*
509  * Add the new entry to the cache. No need to pass a valid key since the
510  * hash function uses mstate's probeslot, which we populated above.
511  */
512  entry = memoize_insert(mstate->hashtable, NULL, found);
513 
514  if (*found)
515  {
516  /*
517  * Move existing entry to the tail of the LRU list to mark it as the
518  * most recently used item.
519  */
520  dlist_move_tail(&mstate->lru_list, &entry->key->lru_node);
521 
522  return entry;
523  }
524 
525  oldcontext = MemoryContextSwitchTo(mstate->tableContext);
526 
527  /* Allocate a new key */
528  entry->key = key = (MemoizeKey *) palloc(sizeof(MemoizeKey));
529  key->params = ExecCopySlotMinimalTuple(mstate->probeslot);
530 
531  /* Update the total cache memory utilization */
532  mstate->mem_used += EMPTY_ENTRY_MEMORY_BYTES(entry);
533 
534  /* Initialize this entry */
535  entry->complete = false;
536  entry->tuplehead = NULL;
537 
538  /*
539  * Since this is the most recently used entry, push this entry onto the
540  * end of the LRU list.
541  */
542  dlist_push_tail(&mstate->lru_list, &entry->key->lru_node);
543 
544  mstate->last_tuple = NULL;
545 
546  MemoryContextSwitchTo(oldcontext);
547 
548  /*
549  * If we've gone over our memory budget, then we'll free up some space in
550  * the cache.
551  */
552  if (mstate->mem_used > mstate->mem_limit)
553  {
554  /*
555  * Try to free up some memory. It's highly unlikely that we'll fail
556  * to do so here since the entry we've just added is yet to contain
557  * any tuples and we're able to remove any other entry to reduce the
558  * memory consumption.
559  */
560  if (unlikely(!cache_reduce_memory(mstate, key)))
561  return NULL;
562 
563  /*
564  * The process of removing entries from the cache may have caused the
565  * code in simplehash.h to shuffle elements to earlier buckets in the
566  * hash table. If it has, we'll need to find the entry again by
567  * performing a lookup. Fortunately, we can detect if this has
568  * happened by seeing if the entry is still in use and that the key
569  * pointer matches our expected key.
570  */
571  if (entry->status != memoize_SH_IN_USE || entry->key != key)
572  {
573  /*
574  * We need to repopulate the probeslot as lookups performed during
575  * the cache evictions above will have stored some other key.
576  */
577  prepare_probe_slot(mstate, key);
578 
579  /* Re-find the newly added entry */
580  entry = memoize_lookup(mstate->hashtable, NULL);
581  Assert(entry != NULL);
582  }
583  }
584 
585  return entry;
586 }
587 
588 /*
589  * cache_store_tuple
590  * Add the tuple stored in 'slot' to the mstate's current cache entry.
591  * The cache entry must have already been made with cache_lookup().
592  * mstate's last_tuple field must point to the tail of mstate->entry's
593  * list of tuples.
594  */
595 static bool
597 {
598  MemoizeTuple *tuple;
599  MemoizeEntry *entry = mstate->entry;
600  MemoryContext oldcontext;
601 
602  Assert(slot != NULL);
603  Assert(entry != NULL);
604 
605  oldcontext = MemoryContextSwitchTo(mstate->tableContext);
606 
607  tuple = (MemoizeTuple *) palloc(sizeof(MemoizeTuple));
608  tuple->mintuple = ExecCopySlotMinimalTuple(slot);
609  tuple->next = NULL;
610 
611  /* Account for the memory we just consumed */
612  mstate->mem_used += CACHE_TUPLE_BYTES(tuple);
613 
614  if (entry->tuplehead == NULL)
615  {
616  /*
617  * This is the first tuple for this entry, so just point the list head
618  * to it.
619  */
620  entry->tuplehead = tuple;
621  }
622  else
623  {
624  /* push this tuple onto the tail of the list */
625  mstate->last_tuple->next = tuple;
626  }
627 
628  mstate->last_tuple = tuple;
629  MemoryContextSwitchTo(oldcontext);
630 
631  /*
632  * If we've gone over our memory budget then free up some space in the
633  * cache.
634  */
635  if (mstate->mem_used > mstate->mem_limit)
636  {
637  MemoizeKey *key = entry->key;
638 
639  if (!cache_reduce_memory(mstate, key))
640  return false;
641 
642  /*
643  * The process of removing entries from the cache may have caused the
644  * code in simplehash.h to shuffle elements to earlier buckets in the
645  * hash table. If it has, we'll need to find the entry again by
646  * performing a lookup. Fortunately, we can detect if this has
647  * happened by seeing if the entry is still in use and that the key
648  * pointer matches our expected key.
649  */
650  if (entry->status != memoize_SH_IN_USE || entry->key != key)
651  {
652  /*
653  * We need to repopulate the probeslot as lookups performed during
654  * the cache evictions above will have stored some other key.
655  */
656  prepare_probe_slot(mstate, key);
657 
658  /* Re-find the entry */
659  mstate->entry = entry = memoize_lookup(mstate->hashtable, NULL);
660  Assert(entry != NULL);
661  }
662  }
663 
664  return true;
665 }
666 
668 ExecMemoize(PlanState *pstate)
669 {
670  MemoizeState *node = castNode(MemoizeState, pstate);
671  PlanState *outerNode;
672  TupleTableSlot *slot;
673 
674  switch (node->mstatus)
675  {
676  case MEMO_CACHE_LOOKUP:
677  {
678  MemoizeEntry *entry;
679  TupleTableSlot *outerslot;
680  bool found;
681 
682  Assert(node->entry == NULL);
683 
684  /*
685  * We're only ever in this state for the first call of the
686  * scan. Here we have a look to see if we've already seen the
687  * current parameters before and if we have already cached a
688  * complete set of records that the outer plan will return for
689  * these parameters.
690  *
691  * When we find a valid cache entry, we'll return the first
692  * tuple from it. If not found, we'll create a cache entry and
693  * then try to fetch a tuple from the outer scan. If we find
694  * one there, we'll try to cache it.
695  */
696 
697  /* see if we've got anything cached for the current parameters */
698  entry = cache_lookup(node, &found);
699 
700  if (found && entry->complete)
701  {
702  node->stats.cache_hits += 1; /* stats update */
703 
704  /*
705  * Set last_tuple and entry so that the state
706  * MEMO_CACHE_FETCH_NEXT_TUPLE can easily find the next
707  * tuple for these parameters.
708  */
709  node->last_tuple = entry->tuplehead;
710  node->entry = entry;
711 
712  /* Fetch the first cached tuple, if there is one */
713  if (entry->tuplehead)
714  {
716 
717  slot = node->ss.ps.ps_ResultTupleSlot;
719  slot, false);
720 
721  return slot;
722  }
723 
724  /* The cache entry is void of any tuples. */
725  node->mstatus = MEMO_END_OF_SCAN;
726  return NULL;
727  }
728 
729  /* Handle cache miss */
730  node->stats.cache_misses += 1; /* stats update */
731 
732  if (found)
733  {
734  /*
735  * A cache entry was found, but the scan for that entry
736  * did not run to completion. We'll just remove all
737  * tuples and start again. It might be tempting to
738  * continue where we left off, but there's no guarantee
739  * the outer node will produce the tuples in the same
740  * order as it did last time.
741  */
742  entry_purge_tuples(node, entry);
743  }
744 
745  /* Scan the outer node for a tuple to cache */
746  outerNode = outerPlanState(node);
747  outerslot = ExecProcNode(outerNode);
748  if (TupIsNull(outerslot))
749  {
750  /*
751  * cache_lookup may have returned NULL due to failure to
752  * free enough cache space, so ensure we don't do anything
753  * here that assumes it worked. There's no need to go into
754  * bypass mode here as we're setting mstatus to end of
755  * scan.
756  */
757  if (likely(entry))
758  entry->complete = true;
759 
760  node->mstatus = MEMO_END_OF_SCAN;
761  return NULL;
762  }
763 
764  node->entry = entry;
765 
766  /*
767  * If we failed to create the entry or failed to store the
768  * tuple in the entry, then go into bypass mode.
769  */
770  if (unlikely(entry == NULL ||
771  !cache_store_tuple(node, outerslot)))
772  {
773  node->stats.cache_overflows += 1; /* stats update */
774 
776 
777  /*
778  * No need to clear out last_tuple as we'll stay in bypass
779  * mode until the end of the scan.
780  */
781  }
782  else
783  {
784  /*
785  * If we only expect a single row from this scan then we
786  * can mark that we're not expecting more. This allows
787  * cache lookups to work even when the scan has not been
788  * executed to completion.
789  */
790  entry->complete = node->singlerow;
791  node->mstatus = MEMO_FILLING_CACHE;
792  }
793 
794  slot = node->ss.ps.ps_ResultTupleSlot;
795  ExecCopySlot(slot, outerslot);
796  return slot;
797  }
798 
800  {
801  /* We shouldn't be in this state if these are not set */
802  Assert(node->entry != NULL);
803  Assert(node->last_tuple != NULL);
804 
805  /* Skip to the next tuple to output */
806  node->last_tuple = node->last_tuple->next;
807 
808  /* No more tuples in the cache */
809  if (node->last_tuple == NULL)
810  {
811  node->mstatus = MEMO_END_OF_SCAN;
812  return NULL;
813  }
814 
815  slot = node->ss.ps.ps_ResultTupleSlot;
817  false);
818 
819  return slot;
820  }
821 
822  case MEMO_FILLING_CACHE:
823  {
824  TupleTableSlot *outerslot;
825  MemoizeEntry *entry = node->entry;
826 
827  /* entry should already have been set by MEMO_CACHE_LOOKUP */
828  Assert(entry != NULL);
829 
830  /*
831  * When in the MEMO_FILLING_CACHE state, we've just had a
832  * cache miss and are populating the cache with the current
833  * scan tuples.
834  */
835  outerNode = outerPlanState(node);
836  outerslot = ExecProcNode(outerNode);
837  if (TupIsNull(outerslot))
838  {
839  /* No more tuples. Mark it as complete */
840  entry->complete = true;
841  node->mstatus = MEMO_END_OF_SCAN;
842  return NULL;
843  }
844 
845  /*
846  * Validate if the planner properly set the singlerow flag. It
847  * should only set that if each cache entry can, at most,
848  * return 1 row.
849  */
850  if (unlikely(entry->complete))
851  elog(ERROR, "cache entry already complete");
852 
853  /* Record the tuple in the current cache entry */
854  if (unlikely(!cache_store_tuple(node, outerslot)))
855  {
856  /* Couldn't store it? Handle overflow */
857  node->stats.cache_overflows += 1; /* stats update */
858 
860 
861  /*
862  * No need to clear out entry or last_tuple as we'll stay
863  * in bypass mode until the end of the scan.
864  */
865  }
866 
867  slot = node->ss.ps.ps_ResultTupleSlot;
868  ExecCopySlot(slot, outerslot);
869  return slot;
870  }
871 
873  {
874  TupleTableSlot *outerslot;
875 
876  /*
877  * When in bypass mode we just continue to read tuples without
878  * caching. We need to wait until the next rescan before we
879  * can come out of this mode.
880  */
881  outerNode = outerPlanState(node);
882  outerslot = ExecProcNode(outerNode);
883  if (TupIsNull(outerslot))
884  {
885  node->mstatus = MEMO_END_OF_SCAN;
886  return NULL;
887  }
888 
889  slot = node->ss.ps.ps_ResultTupleSlot;
890  ExecCopySlot(slot, outerslot);
891  return slot;
892  }
893 
894  case MEMO_END_OF_SCAN:
895 
896  /*
897  * We've already returned NULL for this scan, but just in case
898  * something calls us again by mistake.
899  */
900  return NULL;
901 
902  default:
903  elog(ERROR, "unrecognized memoize state: %d",
904  (int) node->mstatus);
905  return NULL;
906  } /* switch */
907 }
908 
910 ExecInitMemoize(Memoize *node, EState *estate, int eflags)
911 {
913  Plan *outerNode;
914  int i;
915  int nkeys;
916  Oid *eqfuncoids;
917 
918  /* check for unsupported flags */
919  Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
920 
921  mstate->ss.ps.plan = (Plan *) node;
922  mstate->ss.ps.state = estate;
923  mstate->ss.ps.ExecProcNode = ExecMemoize;
924 
925  /*
926  * Miscellaneous initialization
927  *
928  * create expression context for node
929  */
930  ExecAssignExprContext(estate, &mstate->ss.ps);
931 
932  outerNode = outerPlan(node);
933  outerPlanState(mstate) = ExecInitNode(outerNode, estate, eflags);
934 
935  /*
936  * Initialize return slot and type. No need to initialize projection info
937  * because this node doesn't do projections.
938  */
940  mstate->ss.ps.ps_ProjInfo = NULL;
941 
942  /*
943  * Initialize scan slot and type.
944  */
946 
947  /*
948  * Set the state machine to lookup the cache. We won't find anything
949  * until we cache something, but this saves a special case to create the
950  * first entry.
951  */
952  mstate->mstatus = MEMO_CACHE_LOOKUP;
953 
954  mstate->nkeys = nkeys = node->numKeys;
959  &TTSOpsVirtual);
960 
961  mstate->param_exprs = (ExprState **) palloc(nkeys * sizeof(ExprState *));
962  mstate->collations = node->collations; /* Just point directly to the plan
963  * data */
964  mstate->hashfunctions = (FmgrInfo *) palloc(nkeys * sizeof(FmgrInfo));
965 
966  eqfuncoids = palloc(nkeys * sizeof(Oid));
967 
968  for (i = 0; i < nkeys; i++)
969  {
970  Oid hashop = node->hashOperators[i];
971  Oid left_hashfn;
972  Oid right_hashfn;
973  Expr *param_expr = (Expr *) list_nth(node->param_exprs, i);
974 
975  if (!get_op_hash_functions(hashop, &left_hashfn, &right_hashfn))
976  elog(ERROR, "could not find hash function for hash operator %u",
977  hashop);
978 
979  fmgr_info(left_hashfn, &mstate->hashfunctions[i]);
980 
981  mstate->param_exprs[i] = ExecInitExpr(param_expr, (PlanState *) mstate);
982  eqfuncoids[i] = get_opcode(hashop);
983  }
984 
987  &TTSOpsVirtual,
988  eqfuncoids,
989  node->collations,
990  node->param_exprs,
991  (PlanState *) mstate);
992 
993  pfree(eqfuncoids);
994  mstate->mem_used = 0;
995 
996  /* Limit the total memory consumed by the cache to this */
997  mstate->mem_limit = get_hash_memory_limit();
998 
999  /* A memory context dedicated for the cache */
1001  "MemoizeHashTable",
1003 
1004  dlist_init(&mstate->lru_list);
1005  mstate->last_tuple = NULL;
1006  mstate->entry = NULL;
1007 
1008  /*
1009  * Mark if we can assume the cache entry is completed after we get the
1010  * first record for it. Some callers might not call us again after
1011  * getting the first match. e.g. A join operator performing a unique join
1012  * is able to skip to the next outer tuple after getting the first
1013  * matching inner tuple. In this case, the cache entry is complete after
1014  * getting the first tuple. This allows us to mark it as so.
1015  */
1016  mstate->singlerow = node->singlerow;
1017  mstate->keyparamids = node->keyparamids;
1018 
1019  /*
1020  * Record if the cache keys should be compared bit by bit, or logically
1021  * using the type's hash equality operator
1022  */
1023  mstate->binary_mode = node->binary_mode;
1024 
1025  /* Zero the statistics counters */
1026  memset(&mstate->stats, 0, sizeof(MemoizeInstrumentation));
1027 
1028  /* Allocate and set up the actual cache */
1029  build_hash_table(mstate, node->est_entries);
1030 
1031  return mstate;
1032 }
1033 
1034 void
1036 {
1037 #ifdef USE_ASSERT_CHECKING
1038  /* Validate the memory accounting code is correct in assert builds. */
1039  {
1040  int count;
1041  uint64 mem = 0;
1042  memoize_iterator i;
1043  MemoizeEntry *entry;
1044 
1045  memoize_start_iterate(node->hashtable, &i);
1046 
1047  count = 0;
1048  while ((entry = memoize_iterate(node->hashtable, &i)) != NULL)
1049  {
1050  MemoizeTuple *tuple = entry->tuplehead;
1051 
1052  mem += EMPTY_ENTRY_MEMORY_BYTES(entry);
1053  while (tuple != NULL)
1054  {
1055  mem += CACHE_TUPLE_BYTES(tuple);
1056  tuple = tuple->next;
1057  }
1058  count++;
1059  }
1060 
1061  Assert(count == node->hashtable->members);
1062  Assert(mem == node->mem_used);
1063  }
1064 #endif
1065 
1066  /*
1067  * When ending a parallel worker, copy the statistics gathered by the
1068  * worker back into shared memory so that it can be picked up by the main
1069  * process to report in EXPLAIN ANALYZE.
1070  */
1071  if (node->shared_info != NULL && IsParallelWorker())
1072  {
1074 
1075  /* Make mem_peak available for EXPLAIN */
1076  if (node->stats.mem_peak == 0)
1077  node->stats.mem_peak = node->mem_used;
1078 
1079  Assert(ParallelWorkerNumber <= node->shared_info->num_workers);
1081  memcpy(si, &node->stats, sizeof(MemoizeInstrumentation));
1082  }
1083 
1084  /* Remove the cache context */
1086 
1088  /* must drop pointer to cache result tuple */
1090 
1091  /*
1092  * free exprcontext
1093  */
1094  ExecFreeExprContext(&node->ss.ps);
1095 
1096  /*
1097  * shut down the subplan
1098  */
1099  ExecEndNode(outerPlanState(node));
1100 }
1101 
1102 void
1104 {
1106 
1107  /* Mark that we must lookup the cache for a new set of parameters */
1108  node->mstatus = MEMO_CACHE_LOOKUP;
1109 
1110  /* nullify pointers used for the last scan */
1111  node->entry = NULL;
1112  node->last_tuple = NULL;
1113 
1114  /*
1115  * if chgParam of subnode is not null then plan will be re-scanned by
1116  * first ExecProcNode.
1117  */
1118  if (outerPlan->chgParam == NULL)
1120 
1121  /*
1122  * Purge the entire cache if a parameter changed that is not part of the
1123  * cache key.
1124  */
1125  if (bms_nonempty_difference(outerPlan->chgParam, node->keyparamids))
1126  cache_purge_all(node);
1127 }
1128 
1129 /*
1130  * ExecEstimateCacheEntryOverheadBytes
1131  * For use in the query planner to help it estimate the amount of memory
1132  * required to store a single entry in the cache.
1133  */
1134 double
1136 {
1137  return sizeof(MemoizeEntry) + sizeof(MemoizeKey) + sizeof(MemoizeTuple) *
1138  ntuples;
1139 }
1140 
1141 /* ----------------------------------------------------------------
1142  * Parallel Query Support
1143  * ----------------------------------------------------------------
1144  */
1145 
1146  /* ----------------------------------------------------------------
1147  * ExecMemoizeEstimate
1148  *
1149  * Estimate space required to propagate memoize statistics.
1150  * ----------------------------------------------------------------
1151  */
1152 void
1154 {
1155  Size size;
1156 
1157  /* don't need this if not instrumenting or no workers */
1158  if (!node->ss.ps.instrument || pcxt->nworkers == 0)
1159  return;
1160 
1161  size = mul_size(pcxt->nworkers, sizeof(MemoizeInstrumentation));
1162  size = add_size(size, offsetof(SharedMemoizeInfo, sinstrument));
1163  shm_toc_estimate_chunk(&pcxt->estimator, size);
1164  shm_toc_estimate_keys(&pcxt->estimator, 1);
1165 }
1166 
1167 /* ----------------------------------------------------------------
1168  * ExecMemoizeInitializeDSM
1169  *
1170  * Initialize DSM space for memoize statistics.
1171  * ----------------------------------------------------------------
1172  */
1173 void
1175 {
1176  Size size;
1177 
1178  /* don't need this if not instrumenting or no workers */
1179  if (!node->ss.ps.instrument || pcxt->nworkers == 0)
1180  return;
1181 
1182  size = offsetof(SharedMemoizeInfo, sinstrument)
1183  + pcxt->nworkers * sizeof(MemoizeInstrumentation);
1184  node->shared_info = shm_toc_allocate(pcxt->toc, size);
1185  /* ensure any unfilled slots will contain zeroes */
1186  memset(node->shared_info, 0, size);
1187  node->shared_info->num_workers = pcxt->nworkers;
1188  shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id,
1189  node->shared_info);
1190 }
1191 
1192 /* ----------------------------------------------------------------
1193  * ExecMemoizeInitializeWorker
1194  *
1195  * Attach worker to DSM space for memoize statistics.
1196  * ----------------------------------------------------------------
1197  */
1198 void
1200 {
1201  node->shared_info =
1202  shm_toc_lookup(pwcxt->toc, node->ss.ps.plan->plan_node_id, true);
1203 }
1204 
1205 /* ----------------------------------------------------------------
1206  * ExecMemoizeRetrieveInstrumentation
1207  *
1208  * Transfer memoize statistics from DSM to private memory.
1209  * ----------------------------------------------------------------
1210  */
1211 void
1213 {
1214  Size size;
1215  SharedMemoizeInfo *si;
1216 
1217  if (node->shared_info == NULL)
1218  return;
1219 
1220  size = offsetof(SharedMemoizeInfo, sinstrument)
1221  + node->shared_info->num_workers * sizeof(MemoizeInstrumentation);
1222  si = palloc(size);
1223  memcpy(si, node->shared_info, size);
1224  node->shared_info = si;
1225 }
int ParallelWorkerNumber
Definition: parallel.c:113
bool bms_nonempty_difference(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:548
static int32 next
Definition: blutils.c:219
unsigned int uint32
Definition: c.h:442
#define likely(x)
Definition: c.h:294
#define unlikely(x)
Definition: c.h:295
size_t Size
Definition: c.h:541
uint32 datum_image_hash(Datum value, bool typByVal, int typLen)
Definition: datum.c:338
bool datum_image_eq(Datum value1, Datum value2, bool typByVal, int typLen)
Definition: datum.c:266
#define ERROR
Definition: elog.h:39
void ExecReScan(PlanState *node)
Definition: execAmi.c:78
ExprState * ExecInitExpr(Expr *node, PlanState *parent)
Definition: execExpr.c:124
ExprState * ExecBuildParamSetEqual(TupleDesc desc, const TupleTableSlotOps *lops, const TupleTableSlotOps *rops, const Oid *eqfunctions, const Oid *collations, const List *param_exprs, PlanState *parent)
Definition: execExpr.c:3876
void ExecEndNode(PlanState *node)
Definition: execProcnode.c:557
PlanState * ExecInitNode(Plan *node, EState *estate, int eflags)
Definition: execProcnode.c:142
const TupleTableSlotOps TTSOpsVirtual
Definition: execTuples.c:83
TupleTableSlot * ExecStoreVirtualTuple(TupleTableSlot *slot)
Definition: execTuples.c:1552
TupleTableSlot * ExecStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot, bool shouldFree)
Definition: execTuples.c:1446
void ExecInitResultTupleSlotTL(PlanState *planstate, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1799
const TupleTableSlotOps TTSOpsMinimalTuple
Definition: execTuples.c:85
TupleDesc ExecTypeFromExprList(List *exprList)
Definition: execTuples.c:1997
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1238
void ExecCreateScanSlotFromOuterPlan(EState *estate, ScanState *scanstate, const TupleTableSlotOps *tts_ops)
Definition: execUtils.c:688
void ExecAssignExprContext(EState *estate, PlanState *planstate)
Definition: execUtils.c:486
void ExecFreeExprContext(PlanState *planstate)
Definition: execUtils.c:656
#define outerPlanState(node)
Definition: execnodes.h:1124
struct MemoizeInstrumentation MemoizeInstrumentation
#define EXEC_FLAG_BACKWARD
Definition: executor.h:58
static bool ExecQualAndReset(ExprState *state, ExprContext *econtext)
Definition: executor.h:428
static Datum ExecEvalExpr(ExprState *state, ExprContext *econtext, bool *isNull)
Definition: executor.h:321
#define EXEC_FLAG_MARK
Definition: executor.h:59
static TupleTableSlot * ExecProcNode(PlanState *node)
Definition: executor.h:257
void fmgr_info(Oid functionId, FmgrInfo *finfo)
Definition: fmgr.c:127
Datum FunctionCall1Coll(FmgrInfo *flinfo, Oid collation, Datum arg1)
Definition: fmgr.c:1116
static uint32 murmurhash32(uint32 data)
Definition: hashfn.h:92
static void dlist_init(dlist_head *head)
Definition: ilist.h:314
static void dlist_delete(dlist_node *node)
Definition: ilist.h:394
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:590
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:353
static void dlist_move_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:451
#define dlist_container(type, membername, ptr)
Definition: ilist.h:543
#define IsParallelWorker()
Definition: parallel.h:61
int i
Definition: isn.c:73
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:77
Assert(fmt[strlen(fmt) - 1] !='\n')
RegProcedure get_opcode(Oid opno)
Definition: lsyscache.c:1267
bool get_op_hash_functions(Oid opno, RegProcedure *lhs_procno, RegProcedure *rhs_procno)
Definition: lsyscache.c:509
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:303
void pfree(void *pointer)
Definition: mcxt.c:1306
MemoryContext CurrentMemoryContext
Definition: mcxt.c:124
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:376
void * palloc(Size size)
Definition: mcxt.c:1199
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:153
size_t get_hash_memory_limit(void)
Definition: nodeHash.c:3390
#define MEMO_CACHE_LOOKUP
Definition: nodeMemoize.c:78
#define MEMO_CACHE_FETCH_NEXT_TUPLE
Definition: nodeMemoize.c:79
static bool cache_store_tuple(MemoizeState *mstate, TupleTableSlot *slot)
Definition: nodeMemoize.c:595
#define MEMO_CACHE_BYPASS_MODE
Definition: nodeMemoize.c:81
static uint32 MemoizeHash_hash(struct memoize_hash *tb, const MemoizeKey *key)
Definition: nodeMemoize.c:157
void ExecMemoizeInitializeDSM(MemoizeState *node, ParallelContext *pcxt)
Definition: nodeMemoize.c:1173
static void cache_purge_all(MemoizeState *mstate)
Definition: nodeMemoize.c:374
#define MEMO_END_OF_SCAN
Definition: nodeMemoize.c:82
MemoizeState * ExecInitMemoize(Memoize *node, EState *estate, int eflags)
Definition: nodeMemoize.c:909
void ExecReScanMemoize(MemoizeState *node)
Definition: nodeMemoize.c:1102
double ExecEstimateCacheEntryOverheadBytes(double ntuples)
Definition: nodeMemoize.c:1134
static bool cache_reduce_memory(MemoizeState *mstate, MemoizeKey *specialkey)
Definition: nodeMemoize.c:410
#define MEMO_FILLING_CACHE
Definition: nodeMemoize.c:80
static void prepare_probe_slot(MemoizeState *mstate, MemoizeKey *key)
Definition: nodeMemoize.c:281
static MemoizeEntry * cache_lookup(MemoizeState *mstate, bool *found)
Definition: nodeMemoize.c:498
#define CACHE_TUPLE_BYTES(t)
Definition: nodeMemoize.c:89
void ExecMemoizeEstimate(MemoizeState *node, ParallelContext *pcxt)
Definition: nodeMemoize.c:1152
void ExecMemoizeRetrieveInstrumentation(MemoizeState *node)
Definition: nodeMemoize.c:1211
struct MemoizeTuple MemoizeTuple
static void entry_purge_tuples(MemoizeState *mstate, MemoizeEntry *entry)
Definition: nodeMemoize.c:316
static void remove_cache_entry(MemoizeState *mstate, MemoizeEntry *entry)
Definition: nodeMemoize.c:346
void ExecMemoizeInitializeWorker(MemoizeState *node, ParallelWorkerContext *pwcxt)
Definition: nodeMemoize.c:1198
struct MemoizeEntry MemoizeEntry
static void build_hash_table(MemoizeState *mstate, uint32 size)
Definition: nodeMemoize.c:264
static bool MemoizeHash_equal(struct memoize_hash *tb, const MemoizeKey *key1, const MemoizeKey *key2)
Definition: nodeMemoize.c:215
#define EMPTY_ENTRY_MEMORY_BYTES(e)
Definition: nodeMemoize.c:86
void ExecEndMemoize(MemoizeState *node)
Definition: nodeMemoize.c:1034
static TupleTableSlot * ExecMemoize(PlanState *pstate)
Definition: nodeMemoize.c:667
struct MemoizeKey MemoizeKey
#define makeNode(_type_)
Definition: nodes.h:165
#define castNode(_type_, nodeptr)
Definition: nodes.h:186
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:135
FormData_pg_attribute
Definition: pg_attribute.h:191
static uint32 pg_rotate_left32(uint32 word, int n)
Definition: pg_bitutils.h:277
static void * list_nth(const List *list, int n)
Definition: pg_list.h:297
#define outerPlan(node)
Definition: plannodes.h:186
static uint32 DatumGetUInt32(Datum X)
Definition: postgres.h:570
uintptr_t Datum
Definition: postgres.h:412
unsigned int Oid
Definition: postgres_ext.h:31
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
Definition: shm_toc.c:171
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
Definition: shm_toc.c:88
void * shm_toc_lookup(shm_toc *toc, uint64 key, bool noError)
Definition: shm_toc.c:232
#define shm_toc_estimate_chunk(e, sz)
Definition: shm_toc.h:51
#define shm_toc_estimate_keys(e, cnt)
Definition: shm_toc.h:53
Size add_size(Size s1, Size s2)
Definition: shmem.c:502
Size mul_size(Size s1, Size s2)
Definition: shmem.c:519
Definition: fmgr.h:57
MemoizeKey * key
Definition: nodeMemoize.c:116
MemoizeTuple * tuplehead
Definition: nodeMemoize.c:117
MinimalTuple params
Definition: nodeMemoize.c:106
dlist_node lru_node
Definition: nodeMemoize.c:107
TupleDesc hashkeydesc
Definition: execnodes.h:2168
uint64 mem_used
Definition: execnodes.h:2176
FmgrInfo * hashfunctions
Definition: execnodes.h:2174
Oid * collations
Definition: execnodes.h:2175
TupleTableSlot * probeslot
Definition: execnodes.h:2170
SharedMemoizeInfo * shared_info
Definition: execnodes.h:2191
struct MemoizeEntry * entry
Definition: execnodes.h:2184
ExprState * cache_eq_expr
Definition: execnodes.h:2171
MemoizeInstrumentation stats
Definition: execnodes.h:2190
bool singlerow
Definition: execnodes.h:2186
dlist_head lru_list
Definition: execnodes.h:2179
MemoryContext tableContext
Definition: execnodes.h:2178
bool binary_mode
Definition: execnodes.h:2188
Bitmapset * keyparamids
Definition: execnodes.h:2192
ScanState ss
Definition: execnodes.h:2164
uint64 mem_limit
Definition: execnodes.h:2177
ExprState ** param_exprs
Definition: execnodes.h:2172
struct memoize_hash * hashtable
Definition: execnodes.h:2167
TupleTableSlot * tableslot
Definition: execnodes.h:2169
struct MemoizeTuple * last_tuple
Definition: execnodes.h:2180
MinimalTuple mintuple
Definition: nodeMemoize.c:95
struct MemoizeTuple * next
Definition: nodeMemoize.c:96
bool singlerow
Definition: plannodes.h:908
Bitmapset * keyparamids
Definition: plannodes.h:923
bool binary_mode
Definition: plannodes.h:914
int numKeys
Definition: plannodes.h:893
List * param_exprs
Definition: plannodes.h:902
uint32 est_entries
Definition: plannodes.h:920
shm_toc_estimator estimator
Definition: parallel.h:42
shm_toc * toc
Definition: parallel.h:45
Instrumentation * instrument
Definition: execnodes.h:1038
Plan * plan
Definition: execnodes.h:1028
EState * state
Definition: execnodes.h:1030
ExprContext * ps_ExprContext
Definition: execnodes.h:1067
TupleTableSlot * ps_ResultTupleSlot
Definition: execnodes.h:1066
ProjectionInfo * ps_ProjInfo
Definition: execnodes.h:1068
ExecProcNodeMtd ExecProcNode
Definition: execnodes.h:1034
int plan_node_id
Definition: plannodes.h:155
TupleTableSlot * ss_ScanTupleSlot
Definition: execnodes.h:1456
PlanState ps
Definition: execnodes.h:1453
MemoizeInstrumentation sinstrument[FLEXIBLE_ARRAY_MEMBER]
Definition: execnodes.h:2152
bool * tts_isnull
Definition: tuptable.h:128
Datum * tts_values
Definition: tuptable.h:126
dlist_node * cur
Definition: ilist.h:200
static MinimalTuple ExecCopySlotMinimalTuple(TupleTableSlot *slot)
Definition: tuptable.h:471
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:433
static TupleTableSlot * ExecCopySlot(TupleTableSlot *dstslot, TupleTableSlot *srcslot)
Definition: tuptable.h:483
#define TupIsNull(slot)
Definition: tuptable.h:300
static void slot_getallattrs(TupleTableSlot *slot)
Definition: tuptable.h:362