PostgreSQL Source Code  git master
slab.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * slab.c
4  * SLAB allocator definitions.
5  *
6  * SLAB is a MemoryContext implementation designed for cases where large
7  * numbers of equally-sized objects can be allocated and freed efficiently
8  * with minimal memory wastage and fragmentation.
9  *
10  *
11  * Portions Copyright (c) 2017-2024, PostgreSQL Global Development Group
12  *
13  * IDENTIFICATION
14  * src/backend/utils/mmgr/slab.c
15  *
16  *
17  * NOTE:
18  * The constant allocation size allows significant simplification and various
19  * optimizations over more general purpose allocators. The blocks are carved
20  * into chunks of exactly the right size, wasting only the space required to
21  * MAXALIGN the allocated chunks.
22  *
23  * Slab can also help reduce memory fragmentation in cases where longer-lived
24  * chunks remain stored on blocks while most of the other chunks have already
25  * been pfree'd. We give priority to putting new allocations into the
26  * "fullest" block. This help avoid having too many sparsely used blocks
27  * around and allows blocks to more easily become completely unused which
28  * allows them to be eventually free'd.
29  *
30  * We identify the "fullest" block to put new allocations on by using a block
31  * from the lowest populated element of the context's "blocklist" array.
32  * This is an array of dlists containing blocks which we partition by the
33  * number of free chunks which block has. Blocks with fewer free chunks are
34  * stored in a lower indexed dlist array slot. Full blocks go on the 0th
35  * element of the blocklist array. So that we don't have to have too many
36  * elements in the array, each dlist in the array is responsible for a range
37  * of free chunks. When a chunk is palloc'd or pfree'd we may need to move
38  * the block onto another dlist if the number of free chunks crosses the
39  * range boundary that the current list is responsible for. Having just a
40  * few blocklist elements reduces the number of times we must move the block
41  * onto another dlist element.
42  *
43  * We keep track of free chunks within each block by using a block-level free
44  * list. We consult this list when we allocate a new chunk in the block.
45  * The free list is a linked list, the head of which is pointed to with
46  * SlabBlock's freehead field. Each subsequent list item is stored in the
47  * free chunk's memory. We ensure chunks are large enough to store this
48  * address.
49  *
50  * When we allocate a new block, technically all chunks are free, however, to
51  * avoid having to write out the entire block to set the linked list for the
52  * free chunks for every chunk in the block, we instead store a pointer to
53  * the next "unused" chunk on the block and keep track of how many of these
54  * unused chunks there are. When a new block is malloc'd, all chunks are
55  * unused. The unused pointer starts with the first chunk on the block and
56  * as chunks are allocated, the unused pointer is incremented. As chunks are
57  * pfree'd, the unused pointer never goes backwards. The unused pointer can
58  * be thought of as a high watermark for the maximum number of chunks in the
59  * block which have been in use concurrently. When a chunk is pfree'd the
60  * chunk is put onto the head of the free list and the unused pointer is not
61  * changed. We only consume more unused chunks if we run out of free chunks
62  * on the free list. This method effectively gives priority to using
63  * previously used chunks over previously unused chunks, which should perform
64  * better due to CPU caching effects.
65  *
66  *-------------------------------------------------------------------------
67  */
68 
69 #include "postgres.h"
70 
71 #include "lib/ilist.h"
72 #include "utils/memdebug.h"
73 #include "utils/memutils.h"
76 
77 #define Slab_BLOCKHDRSZ MAXALIGN(sizeof(SlabBlock))
78 
79 #ifdef MEMORY_CONTEXT_CHECKING
80 /*
81  * Size of the memory required to store the SlabContext.
82  * MEMORY_CONTEXT_CHECKING builds need some extra memory for the isChunkFree
83  * array.
84  */
85 #define Slab_CONTEXT_HDRSZ(chunksPerBlock) \
86  (sizeof(SlabContext) + ((chunksPerBlock) * sizeof(bool)))
87 #else
88 #define Slab_CONTEXT_HDRSZ(chunksPerBlock) sizeof(SlabContext)
89 #endif
90 
91 /*
92  * The number of partitions to divide the blocklist into based their number of
93  * free chunks. There must be at least 2.
94  */
95 #define SLAB_BLOCKLIST_COUNT 3
96 
97 /* The maximum number of completely empty blocks to keep around for reuse. */
98 #define SLAB_MAXIMUM_EMPTY_BLOCKS 10
99 
100 /*
101  * SlabContext is a specialized implementation of MemoryContext.
102  */
103 typedef struct SlabContext
104 {
105  MemoryContextData header; /* Standard memory-context fields */
106  /* Allocation parameters for this context: */
107  uint32 chunkSize; /* the requested (non-aligned) chunk size */
108  uint32 fullChunkSize; /* chunk size with chunk header and alignment */
109  uint32 blockSize; /* the size to make each block of chunks */
110  int32 chunksPerBlock; /* number of chunks that fit in 1 block */
111  int32 curBlocklistIndex; /* index into the blocklist[] element
112  * containing the fullest, blocks */
113 #ifdef MEMORY_CONTEXT_CHECKING
114  bool *isChunkFree; /* array to mark free chunks in a block during
115  * SlabCheck */
116 #endif
117 
118  int32 blocklist_shift; /* number of bits to shift the nfree count
119  * by to get the index into blocklist[] */
120  dclist_head emptyblocks; /* empty blocks to use up first instead of
121  * mallocing new blocks */
122 
123  /*
124  * Blocks with free space, grouped by the number of free chunks they
125  * contain. Completely full blocks are stored in the 0th element.
126  * Completely empty blocks are stored in emptyblocks or free'd if we have
127  * enough empty blocks already.
128  */
131 
132 /*
133  * SlabBlock
134  * Structure of a single slab block.
135  *
136  * slab: pointer back to the owning MemoryContext
137  * nfree: number of chunks on the block which are unallocated
138  * nunused: number of chunks on the block unallocated and not on the block's
139  * freelist.
140  * freehead: linked-list header storing a pointer to the first free chunk on
141  * the block. Subsequent pointers are stored in the chunk's memory. NULL
142  * indicates the end of the list.
143  * unused: pointer to the next chunk which has yet to be used.
144  * node: doubly-linked list node for the context's blocklist
145  */
146 typedef struct SlabBlock
147 {
148  SlabContext *slab; /* owning context */
149  int32 nfree; /* number of chunks on free + unused chunks */
150  int32 nunused; /* number of unused chunks */
151  MemoryChunk *freehead; /* pointer to the first free chunk */
152  MemoryChunk *unused; /* pointer to the next unused chunk */
153  dlist_node node; /* doubly-linked list for blocklist[] */
155 
156 
157 #define Slab_CHUNKHDRSZ sizeof(MemoryChunk)
158 #define SlabChunkGetPointer(chk) \
159  ((void *) (((char *) (chk)) + sizeof(MemoryChunk)))
160 
161 /*
162  * SlabBlockGetChunk
163  * Obtain a pointer to the nth (0-based) chunk in the block
164  */
165 #define SlabBlockGetChunk(slab, block, n) \
166  ((MemoryChunk *) ((char *) (block) + Slab_BLOCKHDRSZ \
167  + ((n) * (slab)->fullChunkSize)))
168 
169 #if defined(MEMORY_CONTEXT_CHECKING) || defined(USE_ASSERT_CHECKING)
170 
171 /*
172  * SlabChunkIndex
173  * Get the 0-based index of how many chunks into the block the given
174  * chunk is.
175 */
176 #define SlabChunkIndex(slab, block, chunk) \
177  (((char *) (chunk) - (char *) SlabBlockGetChunk(slab, block, 0)) / \
178  (slab)->fullChunkSize)
179 
180 /*
181  * SlabChunkMod
182  * A MemoryChunk should always be at an address which is a multiple of
183  * fullChunkSize starting from the 0th chunk position. This will return
184  * non-zero if it's not.
185  */
186 #define SlabChunkMod(slab, block, chunk) \
187  (((char *) (chunk) - (char *) SlabBlockGetChunk(slab, block, 0)) % \
188  (slab)->fullChunkSize)
189 
190 #endif
191 
192 /*
193  * SlabIsValid
194  * True iff set is a valid slab allocation set.
195  */
196 #define SlabIsValid(set) (PointerIsValid(set) && IsA(set, SlabContext))
197 
198 /*
199  * SlabBlockIsValid
200  * True iff block is a valid block of slab allocation set.
201  */
202 #define SlabBlockIsValid(block) \
203  (PointerIsValid(block) && SlabIsValid((block)->slab))
204 
205 /*
206  * SlabBlocklistIndex
207  * Determine the blocklist index that a block should be in for the given
208  * number of free chunks.
209  */
210 static inline int32
212 {
213  int32 index;
214  int32 blocklist_shift = slab->blocklist_shift;
215 
216  Assert(nfree >= 0 && nfree <= slab->chunksPerBlock);
217 
218  /*
219  * Determine the blocklist index based on the number of free chunks. We
220  * must ensure that 0 free chunks is dedicated to index 0. Everything
221  * else must be >= 1 and < SLAB_BLOCKLIST_COUNT.
222  *
223  * To make this as efficient as possible, we exploit some two's complement
224  * arithmetic where we reverse the sign before bit shifting. This results
225  * in an nfree of 0 using index 0 and anything non-zero staying non-zero.
226  * This is exploiting 0 and -0 being the same in two's complement. When
227  * we're done, we just need to flip the sign back over again for a
228  * positive index.
229  */
230  index = -((-nfree) >> blocklist_shift);
231 
232  if (nfree == 0)
233  Assert(index == 0);
234  else
236 
237  return index;
238 }
239 
240 /*
241  * SlabFindNextBlockListIndex
242  * Search blocklist for blocks which have free chunks and return the
243  * index of the blocklist found containing at least 1 block with free
244  * chunks. If no block can be found we return 0.
245  *
246  * Note: We give priority to fuller blocks so that these are filled before
247  * emptier blocks. This is done to increase the chances that mostly-empty
248  * blocks will eventually become completely empty so they can be free'd.
249  */
250 static int32
252 {
253  /* start at 1 as blocklist[0] is for full blocks. */
254  for (int i = 1; i < SLAB_BLOCKLIST_COUNT; i++)
255  {
256  /* return the first found non-empty index */
257  if (!dlist_is_empty(&slab->blocklist[i]))
258  return i;
259  }
260 
261  /* no blocks with free space */
262  return 0;
263 }
264 
265 /*
266  * SlabGetNextFreeChunk
267  * Return the next free chunk in block and update the block to account
268  * for the returned chunk now being used.
269  */
270 static inline MemoryChunk *
272 {
274 
275  Assert(block->nfree > 0);
276 
277  if (block->freehead != NULL)
278  {
279  chunk = block->freehead;
280 
281  /*
282  * Pop the chunk from the linked list of free chunks. The pointer to
283  * the next free chunk is stored in the chunk itself.
284  */
287 
288  /* check nothing stomped on the free chunk's memory */
289  Assert(block->freehead == NULL ||
290  (block->freehead >= SlabBlockGetChunk(slab, block, 0) &&
291  block->freehead <= SlabBlockGetChunk(slab, block, slab->chunksPerBlock - 1) &&
292  SlabChunkMod(slab, block, block->freehead) == 0));
293  }
294  else
295  {
296  Assert(block->nunused > 0);
297 
298  chunk = block->unused;
299  block->unused = (MemoryChunk *) (((char *) block->unused) + slab->fullChunkSize);
300  block->nunused--;
301  }
302 
303  block->nfree--;
304 
305  return chunk;
306 }
307 
308 /*
309  * SlabContextCreate
310  * Create a new Slab context.
311  *
312  * parent: parent context, or NULL if top-level context
313  * name: name of context (must be statically allocated)
314  * blockSize: allocation block size
315  * chunkSize: allocation chunk size
316  *
317  * The Slab_CHUNKHDRSZ + MAXALIGN(chunkSize + 1) may not exceed
318  * MEMORYCHUNK_MAX_VALUE.
319  * 'blockSize' may not exceed MEMORYCHUNK_MAX_BLOCKOFFSET.
320  */
323  const char *name,
324  Size blockSize,
325  Size chunkSize)
326 {
327  int chunksPerBlock;
328  Size fullChunkSize;
329  SlabContext *slab;
330  int i;
331 
332  /* ensure MemoryChunk's size is properly maxaligned */
334  "sizeof(MemoryChunk) is not maxaligned");
335  Assert(blockSize <= MEMORYCHUNK_MAX_BLOCKOFFSET);
336 
337  /*
338  * Ensure there's enough space to store the pointer to the next free chunk
339  * in the memory of the (otherwise) unused allocation.
340  */
341  if (chunkSize < sizeof(MemoryChunk *))
342  chunkSize = sizeof(MemoryChunk *);
343 
344  /* length of the maxaligned chunk including the chunk header */
345 #ifdef MEMORY_CONTEXT_CHECKING
346  /* ensure there's always space for the sentinel byte */
347  fullChunkSize = Slab_CHUNKHDRSZ + MAXALIGN(chunkSize + 1);
348 #else
349  fullChunkSize = Slab_CHUNKHDRSZ + MAXALIGN(chunkSize);
350 #endif
351 
352  Assert(fullChunkSize <= MEMORYCHUNK_MAX_VALUE);
353 
354  /* compute the number of chunks that will fit on each block */
355  chunksPerBlock = (blockSize - Slab_BLOCKHDRSZ) / fullChunkSize;
356 
357  /* Make sure the block can store at least one chunk. */
358  if (chunksPerBlock == 0)
359  elog(ERROR, "block size %zu for slab is too small for %zu-byte chunks",
360  blockSize, chunkSize);
361 
362 
363 
364  slab = (SlabContext *) malloc(Slab_CONTEXT_HDRSZ(chunksPerBlock));
365  if (slab == NULL)
366  {
368  ereport(ERROR,
369  (errcode(ERRCODE_OUT_OF_MEMORY),
370  errmsg("out of memory"),
371  errdetail("Failed while creating memory context \"%s\".",
372  name)));
373  }
374 
375  /*
376  * Avoid writing code that can fail between here and MemoryContextCreate;
377  * we'd leak the header if we ereport in this stretch.
378  */
379 
380  /* Fill in SlabContext-specific header fields */
381  slab->chunkSize = (uint32) chunkSize;
382  slab->fullChunkSize = (uint32) fullChunkSize;
383  slab->blockSize = (uint32) blockSize;
384  slab->chunksPerBlock = chunksPerBlock;
385  slab->curBlocklistIndex = 0;
386 
387  /*
388  * Compute a shift that guarantees that shifting chunksPerBlock with it is
389  * < SLAB_BLOCKLIST_COUNT - 1. The reason that we subtract 1 from
390  * SLAB_BLOCKLIST_COUNT in this calculation is that we reserve the 0th
391  * blocklist element for blocks which have no free chunks.
392  *
393  * We calculate the number of bits to shift by rather than a divisor to
394  * divide by as performing division each time we need to find the
395  * blocklist index would be much slower.
396  */
397  slab->blocklist_shift = 0;
398  while ((slab->chunksPerBlock >> slab->blocklist_shift) >= (SLAB_BLOCKLIST_COUNT - 1))
399  slab->blocklist_shift++;
400 
401  /* initialize the list to store empty blocks to be reused */
402  dclist_init(&slab->emptyblocks);
403 
404  /* initialize each blocklist slot */
405  for (i = 0; i < SLAB_BLOCKLIST_COUNT; i++)
406  dlist_init(&slab->blocklist[i]);
407 
408 #ifdef MEMORY_CONTEXT_CHECKING
409  /* set the isChunkFree pointer right after the end of the context */
410  slab->isChunkFree = (bool *) ((char *) slab + sizeof(SlabContext));
411 #endif
412 
413  /* Finally, do the type-independent part of context creation */
415  T_SlabContext,
416  MCTX_SLAB_ID,
417  parent,
418  name);
419 
420  return (MemoryContext) slab;
421 }
422 
423 /*
424  * SlabReset
425  * Frees all memory which is allocated in the given set.
426  *
427  * The code simply frees all the blocks in the context - we don't keep any
428  * keeper blocks or anything like that.
429  */
430 void
432 {
433  SlabContext *slab = (SlabContext *) context;
434  dlist_mutable_iter miter;
435  int i;
436 
437  Assert(SlabIsValid(slab));
438 
439 #ifdef MEMORY_CONTEXT_CHECKING
440  /* Check for corruption and leaks before freeing */
441  SlabCheck(context);
442 #endif
443 
444  /* release any retained empty blocks */
445  dclist_foreach_modify(miter, &slab->emptyblocks)
446  {
447  SlabBlock *block = dlist_container(SlabBlock, node, miter.cur);
448 
449  dclist_delete_from(&slab->emptyblocks, miter.cur);
450 
451 #ifdef CLOBBER_FREED_MEMORY
452  wipe_mem(block, slab->blockSize);
453 #endif
454  free(block);
455  context->mem_allocated -= slab->blockSize;
456  }
457 
458  /* walk over blocklist and free the blocks */
459  for (i = 0; i < SLAB_BLOCKLIST_COUNT; i++)
460  {
461  dlist_foreach_modify(miter, &slab->blocklist[i])
462  {
463  SlabBlock *block = dlist_container(SlabBlock, node, miter.cur);
464 
465  dlist_delete(miter.cur);
466 
467 #ifdef CLOBBER_FREED_MEMORY
468  wipe_mem(block, slab->blockSize);
469 #endif
470  free(block);
471  context->mem_allocated -= slab->blockSize;
472  }
473  }
474 
475  slab->curBlocklistIndex = 0;
476 
477  Assert(context->mem_allocated == 0);
478 }
479 
480 /*
481  * SlabDelete
482  * Free all memory which is allocated in the given context.
483  */
484 void
486 {
487  /* Reset to release all the SlabBlocks */
489  /* And free the context header */
490  free(context);
491 }
492 
493 /*
494  * Small helper for allocating a new chunk from a chunk, to avoid duplicating
495  * the code between SlabAlloc() and SlabAllocFromNewBlock().
496  */
497 static inline void *
500 {
501  SlabContext *slab = (SlabContext *) context;
502 
503  /*
504  * Check that the chunk pointer is actually somewhere on the block and is
505  * aligned as expected.
506  */
507  Assert(chunk >= SlabBlockGetChunk(slab, block, 0));
508  Assert(chunk <= SlabBlockGetChunk(slab, block, slab->chunksPerBlock - 1));
509  Assert(SlabChunkMod(slab, block, chunk) == 0);
510 
511  /* Prepare to initialize the chunk header. */
513 
515 
516 #ifdef MEMORY_CONTEXT_CHECKING
517  /* slab mark to catch clobber of "unused" space */
518  Assert(slab->chunkSize < (slab->fullChunkSize - Slab_CHUNKHDRSZ));
519  set_sentinel(MemoryChunkGetPointer(chunk), size);
521  slab->chunkSize,
522  slab->fullChunkSize -
523  (slab->chunkSize + Slab_CHUNKHDRSZ));
524 #endif
525 
526 #ifdef RANDOMIZE_ALLOCATED_MEMORY
527  /* fill the allocated space with junk */
528  randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
529 #endif
530 
531  /* Disallow access to the chunk header. */
533 
535 }
536 
538 static void *
540 {
541  SlabContext *slab = (SlabContext *) context;
542  SlabBlock *block;
544  dlist_head *blocklist;
545  int blocklist_idx;
546 
547  /* to save allocating a new one, first check the empty blocks list */
548  if (dclist_count(&slab->emptyblocks) > 0)
549  {
551 
552  block = dlist_container(SlabBlock, node, node);
553 
554  /*
555  * SlabFree() should have left this block in a valid state with all
556  * chunks free. Ensure that's the case.
557  */
558  Assert(block->nfree == slab->chunksPerBlock);
559 
560  /* fetch the next chunk from this block */
561  chunk = SlabGetNextFreeChunk(slab, block);
562  }
563  else
564  {
565  block = (SlabBlock *) malloc(slab->blockSize);
566 
567  if (unlikely(block == NULL))
569 
570  block->slab = slab;
571  context->mem_allocated += slab->blockSize;
572 
573  /* use the first chunk in the new block */
574  chunk = SlabBlockGetChunk(slab, block, 0);
575 
576  block->nfree = slab->chunksPerBlock - 1;
577  block->unused = SlabBlockGetChunk(slab, block, 1);
578  block->freehead = NULL;
579  block->nunused = slab->chunksPerBlock - 1;
580  }
581 
582  /* find the blocklist element for storing blocks with 1 used chunk */
583  blocklist_idx = SlabBlocklistIndex(slab, block->nfree);
584  blocklist = &slab->blocklist[blocklist_idx];
585 
586  /* this better be empty. We just added a block thinking it was */
587  Assert(dlist_is_empty(blocklist));
588 
589  dlist_push_head(blocklist, &block->node);
590 
591  slab->curBlocklistIndex = blocklist_idx;
592 
593  return SlabAllocSetupNewChunk(context, block, chunk, size);
594 }
595 
596 /*
597  * SlabAllocInvalidSize
598  * Handle raising an ERROR for an invalid size request. We don't do this
599  * in slab alloc as calling the elog functions would force the compiler
600  * to setup the stack frame in SlabAlloc. For performance reasons, we
601  * want to avoid that.
602  */
604 static void
606 SlabAllocInvalidSize(MemoryContext context, Size size)
607 {
608  SlabContext *slab = (SlabContext *) context;
609 
610  elog(ERROR, "unexpected alloc chunk size %zu (expected %u)", size,
611  slab->chunkSize);
612 }
613 
614 /*
615  * SlabAlloc
616  * Returns a pointer to a newly allocated memory chunk or raises an ERROR
617  * on allocation failure, or returns NULL when flags contains
618  * MCXT_ALLOC_NO_OOM. 'size' must be the same size as was specified
619  * during SlabContextCreate().
620  *
621  * This function should only contain the most common code paths. Everything
622  * else should be in pg_noinline helper functions, thus avoiding the overhead
623  * of creating a stack frame for the common cases. Allocating memory is often
624  * a bottleneck in many workloads, so avoiding stack frame setup is
625  * worthwhile. Helper functions should always directly return the newly
626  * allocated memory so that we can just return that address directly as a tail
627  * call.
628  */
629 void *
631 {
632  SlabContext *slab = (SlabContext *) context;
633  SlabBlock *block;
635 
636  Assert(SlabIsValid(slab));
637 
638  /* sanity check that this is pointing to a valid blocklist */
639  Assert(slab->curBlocklistIndex >= 0);
641 
642  /*
643  * Make sure we only allow correct request size. This doubles as the
644  * MemoryContextCheckSize check.
645  */
646  if (unlikely(size != slab->chunkSize))
647  SlabAllocInvalidSize(context, size);
648 
649  if (unlikely(slab->curBlocklistIndex == 0))
650  {
651  /*
652  * Handle the case when there are no partially filled blocks
653  * available. This happens either when the last allocation took the
654  * last chunk in the block, or when SlabFree() free'd the final block.
655  */
656  return SlabAllocFromNewBlock(context, size, flags);
657  }
658  else
659  {
660  dlist_head *blocklist = &slab->blocklist[slab->curBlocklistIndex];
661  int new_blocklist_idx;
662 
663  Assert(!dlist_is_empty(blocklist));
664 
665  /* grab the block from the blocklist */
666  block = dlist_head_element(SlabBlock, node, blocklist);
667 
668  /* make sure we actually got a valid block, with matching nfree */
669  Assert(block != NULL);
670  Assert(slab->curBlocklistIndex == SlabBlocklistIndex(slab, block->nfree));
671  Assert(block->nfree > 0);
672 
673  /* fetch the next chunk from this block */
674  chunk = SlabGetNextFreeChunk(slab, block);
675 
676  /* get the new blocklist index based on the new free chunk count */
677  new_blocklist_idx = SlabBlocklistIndex(slab, block->nfree);
678 
679  /*
680  * Handle the case where the blocklist index changes. This also deals
681  * with blocks becoming full as only full blocks go at index 0.
682  */
683  if (unlikely(slab->curBlocklistIndex != new_blocklist_idx))
684  {
685  dlist_delete_from(blocklist, &block->node);
686  dlist_push_head(&slab->blocklist[new_blocklist_idx], &block->node);
687 
688  if (dlist_is_empty(blocklist))
690  }
691  }
692 
693  return SlabAllocSetupNewChunk(context, block, chunk, size);
694 }
695 
696 /*
697  * SlabFree
698  * Frees allocated memory; memory is removed from the slab.
699  */
700 void
701 SlabFree(void *pointer)
702 {
704  SlabBlock *block;
705  SlabContext *slab;
706  int curBlocklistIdx;
707  int newBlocklistIdx;
708 
709  /* Allow access to the chunk header. */
711 
712  block = MemoryChunkGetBlock(chunk);
713 
714  /*
715  * For speed reasons we just Assert that the referenced block is good.
716  * Future field experience may show that this Assert had better become a
717  * regular runtime test-and-elog check.
718  */
719  Assert(SlabBlockIsValid(block));
720  slab = block->slab;
721 
722 #ifdef MEMORY_CONTEXT_CHECKING
723  /* Test for someone scribbling on unused space in chunk */
724  Assert(slab->chunkSize < (slab->fullChunkSize - Slab_CHUNKHDRSZ));
725  if (!sentinel_ok(pointer, slab->chunkSize))
726  elog(WARNING, "detected write past chunk end in %s %p",
727  slab->header.name, chunk);
728 #endif
729 
730  /* push this chunk onto the head of the block's free list */
731  *(MemoryChunk **) pointer = block->freehead;
732  block->freehead = chunk;
733 
734  block->nfree++;
735 
736  Assert(block->nfree > 0);
737  Assert(block->nfree <= slab->chunksPerBlock);
738 
739 #ifdef CLOBBER_FREED_MEMORY
740  /* don't wipe the free list MemoryChunk pointer stored in the chunk */
741  wipe_mem((char *) pointer + sizeof(MemoryChunk *),
742  slab->chunkSize - sizeof(MemoryChunk *));
743 #endif
744 
745  curBlocklistIdx = SlabBlocklistIndex(slab, block->nfree - 1);
746  newBlocklistIdx = SlabBlocklistIndex(slab, block->nfree);
747 
748  /*
749  * Check if the block needs to be moved to another element on the
750  * blocklist based on it now having 1 more free chunk.
751  */
752  if (unlikely(curBlocklistIdx != newBlocklistIdx))
753  {
754  /* do the move */
755  dlist_delete_from(&slab->blocklist[curBlocklistIdx], &block->node);
756  dlist_push_head(&slab->blocklist[newBlocklistIdx], &block->node);
757 
758  /*
759  * The blocklist[curBlocklistIdx] may now be empty or we may now be
760  * able to use a lower-element blocklist. We'll need to redetermine
761  * what the slab->curBlocklistIndex is if the current blocklist was
762  * changed or if a lower element one was changed. We must ensure we
763  * use the list with the fullest block(s).
764  */
765  if (slab->curBlocklistIndex >= curBlocklistIdx)
766  {
768 
769  /*
770  * We know there must be a block with at least 1 unused chunk as
771  * we just pfree'd one. Ensure curBlocklistIndex reflects this.
772  */
773  Assert(slab->curBlocklistIndex > 0);
774  }
775  }
776 
777  /* Handle when a block becomes completely empty */
778  if (unlikely(block->nfree == slab->chunksPerBlock))
779  {
780  /* remove the block */
781  dlist_delete_from(&slab->blocklist[newBlocklistIdx], &block->node);
782 
783  /*
784  * To avoid thrashing malloc/free, we keep a list of empty blocks that
785  * we can reuse again instead of having to malloc a new one.
786  */
788  dclist_push_head(&slab->emptyblocks, &block->node);
789  else
790  {
791  /*
792  * When we have enough empty blocks stored already, we actually
793  * free the block.
794  */
795 #ifdef CLOBBER_FREED_MEMORY
796  wipe_mem(block, slab->blockSize);
797 #endif
798  free(block);
799  slab->header.mem_allocated -= slab->blockSize;
800  }
801 
802  /*
803  * Check if we need to reset the blocklist index. This is required
804  * when the blocklist this block is on has become completely empty.
805  */
806  if (slab->curBlocklistIndex == newBlocklistIdx &&
807  dlist_is_empty(&slab->blocklist[newBlocklistIdx]))
809  }
810 }
811 
812 /*
813  * SlabRealloc
814  * Change the allocated size of a chunk.
815  *
816  * As Slab is designed for allocating equally-sized chunks of memory, it can't
817  * do an actual chunk size change. We try to be gentle and allow calls with
818  * exactly the same size, as in that case we can simply return the same
819  * chunk. When the size differs, we throw an error.
820  *
821  * We could also allow requests with size < chunkSize. That however seems
822  * rather pointless - Slab is meant for chunks of constant size, and moreover
823  * realloc is usually used to enlarge the chunk.
824  */
825 void *
826 SlabRealloc(void *pointer, Size size, int flags)
827 {
829  SlabBlock *block;
830  SlabContext *slab;
831 
832  /* Allow access to the chunk header. */
834 
835  block = MemoryChunkGetBlock(chunk);
836 
837  /* Disallow access to the chunk header. */
839 
840  /*
841  * Try to verify that we have a sane block pointer: the block header
842  * should reference a slab context. (We use a test-and-elog, not just
843  * Assert, because it seems highly likely that we're here in error in the
844  * first place.)
845  */
846  if (!SlabBlockIsValid(block))
847  elog(ERROR, "could not find block containing chunk %p", chunk);
848  slab = block->slab;
849 
850  /* can't do actual realloc with slab, but let's try to be gentle */
851  if (size == slab->chunkSize)
852  return pointer;
853 
854  elog(ERROR, "slab allocator does not support realloc()");
855  return NULL; /* keep compiler quiet */
856 }
857 
858 /*
859  * SlabGetChunkContext
860  * Return the MemoryContext that 'pointer' belongs to.
861  */
863 SlabGetChunkContext(void *pointer)
864 {
866  SlabBlock *block;
867 
868  /* Allow access to the chunk header. */
870 
871  block = MemoryChunkGetBlock(chunk);
872 
873  /* Disallow access to the chunk header. */
875 
876  Assert(SlabBlockIsValid(block));
877 
878  return &block->slab->header;
879 }
880 
881 /*
882  * SlabGetChunkSpace
883  * Given a currently-allocated chunk, determine the total space
884  * it occupies (including all memory-allocation overhead).
885  */
886 Size
887 SlabGetChunkSpace(void *pointer)
888 {
890  SlabBlock *block;
891  SlabContext *slab;
892 
893  /* Allow access to the chunk header. */
895 
896  block = MemoryChunkGetBlock(chunk);
897 
898  /* Disallow access to the chunk header. */
900 
901  Assert(SlabBlockIsValid(block));
902  slab = block->slab;
903 
904  return slab->fullChunkSize;
905 }
906 
907 /*
908  * SlabIsEmpty
909  * Is the slab empty of any allocated space?
910  */
911 bool
913 {
915 
916  return (context->mem_allocated == 0);
917 }
918 
919 /*
920  * SlabStats
921  * Compute stats about memory consumption of a Slab context.
922  *
923  * printfunc: if not NULL, pass a human-readable stats string to this.
924  * passthru: pass this pointer through to printfunc.
925  * totals: if not NULL, add stats about this context into *totals.
926  * print_to_stderr: print stats to stderr if true, elog otherwise.
927  */
928 void
930  MemoryStatsPrintFunc printfunc, void *passthru,
931  MemoryContextCounters *totals,
932  bool print_to_stderr)
933 {
934  SlabContext *slab = (SlabContext *) context;
935  Size nblocks = 0;
936  Size freechunks = 0;
937  Size totalspace;
938  Size freespace = 0;
939  int i;
940 
941  Assert(SlabIsValid(slab));
942 
943  /* Include context header in totalspace */
944  totalspace = Slab_CONTEXT_HDRSZ(slab->chunksPerBlock);
945 
946  /* Add the space consumed by blocks in the emptyblocks list */
947  totalspace += dclist_count(&slab->emptyblocks) * slab->blockSize;
948 
949  for (i = 0; i < SLAB_BLOCKLIST_COUNT; i++)
950  {
951  dlist_iter iter;
952 
953  dlist_foreach(iter, &slab->blocklist[i])
954  {
955  SlabBlock *block = dlist_container(SlabBlock, node, iter.cur);
956 
957  nblocks++;
958  totalspace += slab->blockSize;
959  freespace += slab->fullChunkSize * block->nfree;
960  freechunks += block->nfree;
961  }
962  }
963 
964  if (printfunc)
965  {
966  char stats_string[200];
967 
968  /* XXX should we include free chunks on empty blocks? */
969  snprintf(stats_string, sizeof(stats_string),
970  "%zu total in %zu blocks; %u empty blocks; %zu free (%zu chunks); %zu used",
971  totalspace, nblocks, dclist_count(&slab->emptyblocks),
972  freespace, freechunks, totalspace - freespace);
973  printfunc(context, passthru, stats_string, print_to_stderr);
974  }
975 
976  if (totals)
977  {
978  totals->nblocks += nblocks;
979  totals->freechunks += freechunks;
980  totals->totalspace += totalspace;
981  totals->freespace += freespace;
982  }
983 }
984 
985 
986 #ifdef MEMORY_CONTEXT_CHECKING
987 
988 /*
989  * SlabCheck
990  * Walk through all blocks looking for inconsistencies.
991  *
992  * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
993  * find yourself in an infinite loop when trouble occurs, because this
994  * routine will be entered again when elog cleanup tries to release memory!
995  */
996 void
997 SlabCheck(MemoryContext context)
998 {
999  SlabContext *slab = (SlabContext *) context;
1000  int i;
1001  int nblocks = 0;
1002  const char *name = slab->header.name;
1003  dlist_iter iter;
1004 
1005  Assert(SlabIsValid(slab));
1006  Assert(slab->chunksPerBlock > 0);
1007 
1008  /*
1009  * Have a look at the empty blocks. These should have all their chunks
1010  * marked as free. Ensure that's the case.
1011  */
1012  dclist_foreach(iter, &slab->emptyblocks)
1013  {
1014  SlabBlock *block = dlist_container(SlabBlock, node, iter.cur);
1015 
1016  if (block->nfree != slab->chunksPerBlock)
1017  elog(WARNING, "problem in slab %s: empty block %p should have %d free chunks but has %d chunks free",
1018  name, block, slab->chunksPerBlock, block->nfree);
1019  }
1020 
1021  /* walk the non-empty block lists */
1022  for (i = 0; i < SLAB_BLOCKLIST_COUNT; i++)
1023  {
1024  int j,
1025  nfree;
1026 
1027  /* walk all blocks on this blocklist */
1028  dlist_foreach(iter, &slab->blocklist[i])
1029  {
1030  SlabBlock *block = dlist_container(SlabBlock, node, iter.cur);
1031  MemoryChunk *cur_chunk;
1032 
1033  /*
1034  * Make sure the number of free chunks (in the block header)
1035  * matches the position in the blocklist.
1036  */
1037  if (SlabBlocklistIndex(slab, block->nfree) != i)
1038  elog(WARNING, "problem in slab %s: block %p is on blocklist %d but should be on blocklist %d",
1039  name, block, i, SlabBlocklistIndex(slab, block->nfree));
1040 
1041  /* make sure the block is not empty */
1042  if (block->nfree >= slab->chunksPerBlock)
1043  elog(WARNING, "problem in slab %s: empty block %p incorrectly stored on blocklist element %d",
1044  name, block, i);
1045 
1046  /* make sure the slab pointer correctly points to this context */
1047  if (block->slab != slab)
1048  elog(WARNING, "problem in slab %s: bogus slab link in block %p",
1049  name, block);
1050 
1051  /* reset the array of free chunks for this block */
1052  memset(slab->isChunkFree, 0, (slab->chunksPerBlock * sizeof(bool)));
1053  nfree = 0;
1054 
1055  /* walk through the block's free list chunks */
1056  cur_chunk = block->freehead;
1057  while (cur_chunk != NULL)
1058  {
1059  int chunkidx = SlabChunkIndex(slab, block, cur_chunk);
1060 
1061  /*
1062  * Ensure the free list link points to something on the block
1063  * at an address aligned according to the full chunk size.
1064  */
1065  if (cur_chunk < SlabBlockGetChunk(slab, block, 0) ||
1066  cur_chunk > SlabBlockGetChunk(slab, block, slab->chunksPerBlock - 1) ||
1067  SlabChunkMod(slab, block, cur_chunk) != 0)
1068  elog(WARNING, "problem in slab %s: bogus free list link %p in block %p",
1069  name, cur_chunk, block);
1070 
1071  /* count the chunk and mark it free on the free chunk array */
1072  nfree++;
1073  slab->isChunkFree[chunkidx] = true;
1074 
1075  /* read pointer of the next free chunk */
1077  cur_chunk = *(MemoryChunk **) SlabChunkGetPointer(cur_chunk);
1078  }
1079 
1080  /* check that the unused pointer matches what nunused claims */
1081  if (SlabBlockGetChunk(slab, block, slab->chunksPerBlock - block->nunused) !=
1082  block->unused)
1083  elog(WARNING, "problem in slab %s: mismatch detected between nunused chunks and unused pointer in block %p",
1084  name, block);
1085 
1086  /*
1087  * count the remaining free chunks that have yet to make it onto
1088  * the block's free list.
1089  */
1090  cur_chunk = block->unused;
1091  for (j = 0; j < block->nunused; j++)
1092  {
1093  int chunkidx = SlabChunkIndex(slab, block, cur_chunk);
1094 
1095 
1096  /* count the chunk as free and mark it as so in the array */
1097  nfree++;
1098  if (chunkidx < slab->chunksPerBlock)
1099  slab->isChunkFree[chunkidx] = true;
1100 
1101  /* move forward 1 chunk */
1102  cur_chunk = (MemoryChunk *) (((char *) cur_chunk) + slab->fullChunkSize);
1103  }
1104 
1105  for (j = 0; j < slab->chunksPerBlock; j++)
1106  {
1107  if (!slab->isChunkFree[j])
1108  {
1109  MemoryChunk *chunk = SlabBlockGetChunk(slab, block, j);
1110  SlabBlock *chunkblock;
1111 
1112  /* Allow access to the chunk header. */
1114 
1115  chunkblock = (SlabBlock *) MemoryChunkGetBlock(chunk);
1116 
1117  /* Disallow access to the chunk header. */
1119 
1120  /*
1121  * check the chunk's blockoffset correctly points back to
1122  * the block
1123  */
1124  if (chunkblock != block)
1125  elog(WARNING, "problem in slab %s: bogus block link in block %p, chunk %p",
1126  name, block, chunk);
1127 
1128  /* check the sentinel byte is intact */
1129  Assert(slab->chunkSize < (slab->fullChunkSize - Slab_CHUNKHDRSZ));
1130  if (!sentinel_ok(chunk, Slab_CHUNKHDRSZ + slab->chunkSize))
1131  elog(WARNING, "problem in slab %s: detected write past chunk end in block %p, chunk %p",
1132  name, block, chunk);
1133  }
1134  }
1135 
1136  /*
1137  * Make sure we got the expected number of free chunks (as tracked
1138  * in the block header).
1139  */
1140  if (nfree != block->nfree)
1141  elog(WARNING, "problem in slab %s: nfree in block %p is %d but %d chunk were found as free",
1142  name, block, block->nfree, nfree);
1143 
1144  nblocks++;
1145  }
1146  }
1147 
1148  /* the stored empty blocks are tracked in mem_allocated too */
1149  nblocks += dclist_count(&slab->emptyblocks);
1150 
1151  Assert(nblocks * slab->blockSize == context->mem_allocated);
1152 }
1153 
1154 #endif /* MEMORY_CONTEXT_CHECKING */
unsigned int uint32
Definition: c.h:506
#define pg_noinline
Definition: c.h:250
#define MAXALIGN(LEN)
Definition: c.h:811
signed int int32
Definition: c.h:494
#define Assert(condition)
Definition: c.h:858
#define unlikely(x)
Definition: c.h:311
#define StaticAssertDecl(condition, errmessage)
Definition: c.h:936
size_t Size
Definition: c.h:605
int errdetail(const char *fmt,...)
Definition: elog.c:1205
int errcode(int sqlerrcode)
Definition: elog.c:859
int errmsg(const char *fmt,...)
Definition: elog.c:1072
#define WARNING
Definition: elog.h:36
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
uint64 chunk
#define free(a)
Definition: header.h:65
#define malloc(a)
Definition: header.h:50
#define dlist_foreach(iter, lhead)
Definition: ilist.h:623
static void dlist_init(dlist_head *head)
Definition: ilist.h:314
static void dlist_delete_from(dlist_head *head, dlist_node *node)
Definition: ilist.h:429
#define dlist_head_element(type, membername, lhead)
Definition: ilist.h:603
static void dlist_delete(dlist_node *node)
Definition: ilist.h:405
static uint32 dclist_count(const dclist_head *head)
Definition: ilist.h:932
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:347
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:640
static bool dlist_is_empty(const dlist_head *head)
Definition: ilist.h:336
static void dclist_delete_from(dclist_head *head, dlist_node *node)
Definition: ilist.h:763
static void dclist_push_head(dclist_head *head, dlist_node *node)
Definition: ilist.h:693
static void dclist_init(dclist_head *head)
Definition: ilist.h:671
#define dclist_foreach_modify(iter, lhead)
Definition: ilist.h:973
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
#define dclist_foreach(iter, lhead)
Definition: ilist.h:970
static dlist_node * dclist_pop_head_node(dclist_head *head)
Definition: ilist.h:789
int j
Definition: isn.c:74
int i
Definition: isn.c:73
void MemoryContextCreate(MemoryContext node, NodeTag tag, MemoryContextMethodID method_id, MemoryContext parent, const char *name)
Definition: mcxt.c:1100
MemoryContext TopMemoryContext
Definition: mcxt.c:149
void MemoryContextStats(MemoryContext context)
Definition: mcxt.c:814
void * MemoryContextAllocationFailure(MemoryContext context, Size size, int flags)
Definition: mcxt.c:1147
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition: memdebug.h:27
#define VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
Definition: memdebug.h:28
void(* MemoryStatsPrintFunc)(MemoryContext context, void *passthru, const char *stats_string, bool print_to_stderr)
Definition: memnodes.h:54
@ MCTX_SLAB_ID
#define MEMORYCHUNK_MAX_BLOCKOFFSET
#define MEMORYCHUNK_MAX_VALUE
#define MemoryChunkGetPointer(c)
static void * MemoryChunkGetBlock(MemoryChunk *chunk)
#define PointerGetMemoryChunk(p)
static void MemoryChunkSetHdrMask(MemoryChunk *chunk, void *block, Size value, MemoryContextMethodID methodid)
#define snprintf
Definition: port.h:238
tree context
Definition: radixtree.h:1829
static pg_noinline void pg_attribute_noreturn() SlabAllocInvalidSize(MemoryContext context
elog(ERROR, "unexpected alloc chunk size %zu (expected %u)", size, slab->chunkSize)
#define Slab_BLOCKHDRSZ
Definition: slab.c:77
struct SlabBlock SlabBlock
static pg_noinline void Size size
Definition: slab.c:607
static pg_noinline void * SlabAllocFromNewBlock(MemoryContext context, Size size, int flags)
Definition: slab.c:539
#define SlabIsValid(set)
Definition: slab.c:196
void * SlabAlloc(MemoryContext context, Size size, int flags)
Definition: slab.c:630
void SlabFree(void *pointer)
Definition: slab.c:701
void SlabReset(MemoryContext context)
Definition: slab.c:431
#define Slab_CHUNKHDRSZ
Definition: slab.c:157
struct SlabContext SlabContext
#define SlabChunkGetPointer(chk)
Definition: slab.c:158
MemoryContext SlabContextCreate(MemoryContext parent, const char *name, Size blockSize, Size chunkSize)
Definition: slab.c:322
static int32 SlabBlocklistIndex(SlabContext *slab, int nfree)
Definition: slab.c:211
Size SlabGetChunkSpace(void *pointer)
Definition: slab.c:887
static void * SlabAllocSetupNewChunk(MemoryContext context, SlabBlock *block, MemoryChunk *chunk, Size size)
Definition: slab.c:498
#define Slab_CONTEXT_HDRSZ(chunksPerBlock)
Definition: slab.c:88
bool SlabIsEmpty(MemoryContext context)
Definition: slab.c:912
MemoryContext SlabGetChunkContext(void *pointer)
Definition: slab.c:863
static int32 SlabFindNextBlockListIndex(SlabContext *slab)
Definition: slab.c:251
static MemoryChunk * SlabGetNextFreeChunk(SlabContext *slab, SlabBlock *block)
Definition: slab.c:271
#define SlabBlockGetChunk(slab, block, n)
Definition: slab.c:165
void SlabStats(MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
Definition: slab.c:929
void SlabDelete(MemoryContext context)
Definition: slab.c:485
#define SLAB_BLOCKLIST_COUNT
Definition: slab.c:95
#define SlabBlockIsValid(block)
Definition: slab.c:202
#define SLAB_MAXIMUM_EMPTY_BLOCKS
Definition: slab.c:98
void * SlabRealloc(void *pointer, Size size, int flags)
Definition: slab.c:826
const char * name
Definition: memnodes.h:131
int32 nfree
Definition: slab.c:149
MemoryChunk * freehead
Definition: slab.c:151
MemoryChunk * unused
Definition: slab.c:152
SlabContext * slab
Definition: slab.c:148
dlist_node node
Definition: slab.c:153
int32 nunused
Definition: slab.c:150
dlist_head blocklist[SLAB_BLOCKLIST_COUNT]
Definition: slab.c:129
int32 chunksPerBlock
Definition: slab.c:110
uint32 fullChunkSize
Definition: slab.c:108
MemoryContextData header
Definition: slab.c:105
uint32 blockSize
Definition: slab.c:109
int32 curBlocklistIndex
Definition: slab.c:111
int32 blocklist_shift
Definition: slab.c:118
uint32 chunkSize
Definition: slab.c:107
dclist_head emptyblocks
Definition: slab.c:120
dlist_node * cur
Definition: ilist.h:179
dlist_node * cur
Definition: ilist.h:200
Definition: type.h:95
const char * name