PostgreSQL Source Code  git master
slab.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * slab.c
4  * SLAB allocator definitions.
5  *
6  * SLAB is a MemoryContext implementation designed for cases where large
7  * numbers of equally-sized objects are allocated (and freed).
8  *
9  *
10  * Portions Copyright (c) 2017-2021, PostgreSQL Global Development Group
11  *
12  * IDENTIFICATION
13  * src/backend/utils/mmgr/slab.c
14  *
15  *
16  * NOTE:
17  * The constant allocation size allows significant simplification and various
18  * optimizations over more general purpose allocators. The blocks are carved
19  * into chunks of exactly the right size (plus alignment), not wasting any
20  * memory.
21  *
22  * The information about free chunks is maintained both at the block level and
23  * global (context) level. This is possible as the chunk size (and thus also
24  * the number of chunks per block) is fixed.
25  *
26  * On each block, free chunks are tracked in a simple linked list. Contents
27  * of free chunks is replaced with an index of the next free chunk, forming
28  * a very simple linked list. Each block also contains a counter of free
29  * chunks. Combined with the local block-level freelist, it makes it trivial
30  * to eventually free the whole block.
31  *
32  * At the context level, we use 'freelist' to track blocks ordered by number
33  * of free chunks, starting with blocks having a single allocated chunk, and
34  * with completely full blocks on the tail.
35  *
36  * This also allows various optimizations - for example when searching for
37  * free chunk, the allocator reuses space from the fullest blocks first, in
38  * the hope that some of the less full blocks will get completely empty (and
39  * returned back to the OS).
40  *
41  * For each block, we maintain pointer to the first free chunk - this is quite
42  * cheap and allows us to skip all the preceding used chunks, eliminating
43  * a significant number of lookups in many common usage patterns. In the worst
44  * case this performs as if the pointer was not maintained.
45  *
46  * We cache the freelist index for the blocks with the fewest free chunks
47  * (minFreeChunks), so that we don't have to search the freelist on every
48  * SlabAlloc() call, which is quite expensive.
49  *
50  *-------------------------------------------------------------------------
51  */
52 
53 #include "postgres.h"
54 
55 #include "lib/ilist.h"
56 #include "utils/memdebug.h"
57 #include "utils/memutils.h"
58 
59 /*
60  * SlabContext is a specialized implementation of MemoryContext.
61  */
62 typedef struct SlabContext
63 {
64  MemoryContextData header; /* Standard memory-context fields */
65  /* Allocation parameters for this context: */
66  Size chunkSize; /* chunk size */
67  Size fullChunkSize; /* chunk size including header and alignment */
68  Size blockSize; /* block size */
69  Size headerSize; /* allocated size of context header */
70  int chunksPerBlock; /* number of chunks per block */
71  int minFreeChunks; /* min number of free chunks in any block */
72  int nblocks; /* number of blocks allocated */
73 #ifdef MEMORY_CONTEXT_CHECKING
74  bool *freechunks; /* bitmap of free chunks in a block */
75 #endif
76  /* blocks with free space, grouped by number of free chunks: */
78 } SlabContext;
79 
80 /*
81  * SlabBlock
82  * Structure of a single block in SLAB allocator.
83  *
84  * node: doubly-linked list of blocks in global freelist
85  * nfree: number of free chunks in this block
86  * firstFreeChunk: index of the first free chunk
87  */
88 typedef struct SlabBlock
89 {
90  dlist_node node; /* doubly-linked list */
91  int nfree; /* number of free chunks */
92  int firstFreeChunk; /* index of the first free chunk in the block */
93 } SlabBlock;
94 
95 /*
96  * SlabChunk
97  * The prefix of each piece of memory in a SlabBlock
98  *
99  * Note: to meet the memory context APIs, the payload area of the chunk must
100  * be maxaligned, and the "slab" link must be immediately adjacent to the
101  * payload area (cf. GetMemoryChunkContext). Since we support no machines on
102  * which MAXALIGN is more than twice sizeof(void *), this happens without any
103  * special hacking in this struct declaration. But there is a static
104  * assertion below that the alignment is done correctly.
105  */
106 typedef struct SlabChunk
107 {
108  SlabBlock *block; /* block owning this chunk */
109  SlabContext *slab; /* owning context */
110  /* there must not be any padding to reach a MAXALIGN boundary here! */
111 } SlabChunk;
112 
113 
114 #define SlabPointerGetChunk(ptr) \
115  ((SlabChunk *)(((char *)(ptr)) - sizeof(SlabChunk)))
116 #define SlabChunkGetPointer(chk) \
117  ((void *)(((char *)(chk)) + sizeof(SlabChunk)))
118 #define SlabBlockGetChunk(slab, block, idx) \
119  ((SlabChunk *) ((char *) (block) + sizeof(SlabBlock) \
120  + (idx * slab->fullChunkSize)))
121 #define SlabBlockStart(block) \
122  ((char *) block + sizeof(SlabBlock))
123 #define SlabChunkIndex(slab, block, chunk) \
124  (((char *) chunk - SlabBlockStart(block)) / slab->fullChunkSize)
125 
126 /*
127  * These functions implement the MemoryContext API for Slab contexts.
128  */
129 static void *SlabAlloc(MemoryContext context, Size size);
130 static void SlabFree(MemoryContext context, void *pointer);
131 static void *SlabRealloc(MemoryContext context, void *pointer, Size size);
132 static void SlabReset(MemoryContext context);
133 static void SlabDelete(MemoryContext context);
134 static Size SlabGetChunkSpace(MemoryContext context, void *pointer);
135 static bool SlabIsEmpty(MemoryContext context);
136 static void SlabStats(MemoryContext context,
137  MemoryStatsPrintFunc printfunc, void *passthru,
138  MemoryContextCounters *totals,
139  bool print_to_stderr);
140 #ifdef MEMORY_CONTEXT_CHECKING
141 static void SlabCheck(MemoryContext context);
142 #endif
143 
144 /*
145  * This is the virtual function table for Slab contexts.
146  */
148  SlabAlloc,
149  SlabFree,
150  SlabRealloc,
151  SlabReset,
152  SlabDelete,
154  SlabIsEmpty,
155  SlabStats
156 #ifdef MEMORY_CONTEXT_CHECKING
157  ,SlabCheck
158 #endif
159 };
160 
161 
162 /*
163  * SlabContextCreate
164  * Create a new Slab context.
165  *
166  * parent: parent context, or NULL if top-level context
167  * name: name of context (must be statically allocated)
168  * blockSize: allocation block size
169  * chunkSize: allocation chunk size
170  *
171  * The chunkSize may not exceed:
172  * MAXALIGN_DOWN(SIZE_MAX) - MAXALIGN(sizeof(SlabBlock)) - sizeof(SlabChunk)
173  */
176  const char *name,
177  Size blockSize,
178  Size chunkSize)
179 {
180  int chunksPerBlock;
182  Size freelistSize;
184  SlabContext *slab;
185  int i;
186 
187  /* Assert we padded SlabChunk properly */
188  StaticAssertStmt(sizeof(SlabChunk) == MAXALIGN(sizeof(SlabChunk)),
189  "sizeof(SlabChunk) is not maxaligned");
191  sizeof(SlabChunk),
192  "padding calculation in SlabChunk is wrong");
193 
194  /* Make sure the linked list node fits inside a freed chunk */
195  if (chunkSize < sizeof(int))
196  chunkSize = sizeof(int);
197 
198  /* chunk, including SLAB header (both addresses nicely aligned) */
199  fullChunkSize = sizeof(SlabChunk) + MAXALIGN(chunkSize);
200 
201  /* Make sure the block can store at least one chunk. */
202  if (blockSize < fullChunkSize + sizeof(SlabBlock))
203  elog(ERROR, "block size %zu for slab is too small for %zu chunks",
204  blockSize, chunkSize);
205 
206  /* Compute maximum number of chunks per block */
207  chunksPerBlock = (blockSize - sizeof(SlabBlock)) / fullChunkSize;
208 
209  /* The freelist starts with 0, ends with chunksPerBlock. */
210  freelistSize = sizeof(dlist_head) * (chunksPerBlock + 1);
211 
212  /*
213  * Allocate the context header. Unlike aset.c, we never try to combine
214  * this with the first regular block; not worth the extra complication.
215  */
216 
217  /* Size of the memory context header */
218  headerSize = offsetof(SlabContext, freelist) + freelistSize;
219 
220 #ifdef MEMORY_CONTEXT_CHECKING
221 
222  /*
223  * With memory checking, we need to allocate extra space for the bitmap of
224  * free chunks. The bitmap is an array of bools, so we don't need to worry
225  * about alignment.
226  */
227  headerSize += chunksPerBlock * sizeof(bool);
228 #endif
229 
230  slab = (SlabContext *) malloc(headerSize);
231  if (slab == NULL)
232  {
234  ereport(ERROR,
235  (errcode(ERRCODE_OUT_OF_MEMORY),
236  errmsg("out of memory"),
237  errdetail("Failed while creating memory context \"%s\".",
238  name)));
239  }
240 
241  /*
242  * Avoid writing code that can fail between here and MemoryContextCreate;
243  * we'd leak the header if we ereport in this stretch.
244  */
245 
246  /* Fill in SlabContext-specific header fields */
247  slab->chunkSize = chunkSize;
248  slab->fullChunkSize = fullChunkSize;
249  slab->blockSize = blockSize;
250  slab->headerSize = headerSize;
251  slab->chunksPerBlock = chunksPerBlock;
252  slab->minFreeChunks = 0;
253  slab->nblocks = 0;
254 
255  /* initialize the freelist slots */
256  for (i = 0; i < (slab->chunksPerBlock + 1); i++)
257  dlist_init(&slab->freelist[i]);
258 
259 #ifdef MEMORY_CONTEXT_CHECKING
260  /* set the freechunks pointer right after the freelists array */
261  slab->freechunks
262  = (bool *) slab + offsetof(SlabContext, freelist) + freelistSize;
263 #endif
264 
265  /* Finally, do the type-independent part of context creation */
268  &SlabMethods,
269  parent,
270  name);
271 
272  return (MemoryContext) slab;
273 }
274 
275 /*
276  * SlabReset
277  * Frees all memory which is allocated in the given set.
278  *
279  * The code simply frees all the blocks in the context - we don't keep any
280  * keeper blocks or anything like that.
281  */
282 static void
284 {
285  int i;
286  SlabContext *slab = castNode(SlabContext, context);
287 
288  Assert(slab);
289 
290 #ifdef MEMORY_CONTEXT_CHECKING
291  /* Check for corruption and leaks before freeing */
292  SlabCheck(context);
293 #endif
294 
295  /* walk over freelists and free the blocks */
296  for (i = 0; i <= slab->chunksPerBlock; i++)
297  {
298  dlist_mutable_iter miter;
299 
300  dlist_foreach_modify(miter, &slab->freelist[i])
301  {
302  SlabBlock *block = dlist_container(SlabBlock, node, miter.cur);
303 
304  dlist_delete(miter.cur);
305 
306 #ifdef CLOBBER_FREED_MEMORY
307  wipe_mem(block, slab->blockSize);
308 #endif
309  free(block);
310  slab->nblocks--;
311  context->mem_allocated -= slab->blockSize;
312  }
313  }
314 
315  slab->minFreeChunks = 0;
316 
317  Assert(slab->nblocks == 0);
318  Assert(context->mem_allocated == 0);
319 }
320 
321 /*
322  * SlabDelete
323  * Free all memory which is allocated in the given context.
324  */
325 static void
327 {
328  /* Reset to release all the SlabBlocks */
329  SlabReset(context);
330  /* And free the context header */
331  free(context);
332 }
333 
334 /*
335  * SlabAlloc
336  * Returns pointer to allocated memory of given size or NULL if
337  * request could not be completed; memory is added to the slab.
338  */
339 static void *
341 {
342  SlabContext *slab = castNode(SlabContext, context);
343  SlabBlock *block;
344  SlabChunk *chunk;
345  int idx;
346 
347  Assert(slab);
348 
349  Assert((slab->minFreeChunks >= 0) &&
350  (slab->minFreeChunks < slab->chunksPerBlock));
351 
352  /* make sure we only allow correct request size */
353  if (size != slab->chunkSize)
354  elog(ERROR, "unexpected alloc chunk size %zu (expected %zu)",
355  size, slab->chunkSize);
356 
357  /*
358  * If there are no free chunks in any existing block, create a new block
359  * and put it to the last freelist bucket.
360  *
361  * slab->minFreeChunks == 0 means there are no blocks with free chunks,
362  * thanks to how minFreeChunks is updated at the end of SlabAlloc().
363  */
364  if (slab->minFreeChunks == 0)
365  {
366  block = (SlabBlock *) malloc(slab->blockSize);
367 
368  if (block == NULL)
369  return NULL;
370 
371  block->nfree = slab->chunksPerBlock;
372  block->firstFreeChunk = 0;
373 
374  /*
375  * Put all the chunks on a freelist. Walk the chunks and point each
376  * one to the next one.
377  */
378  for (idx = 0; idx < slab->chunksPerBlock; idx++)
379  {
380  chunk = SlabBlockGetChunk(slab, block, idx);
381  *(int32 *) SlabChunkGetPointer(chunk) = (idx + 1);
382  }
383 
384  /*
385  * And add it to the last freelist with all chunks empty.
386  *
387  * We know there are no blocks in the freelist, otherwise we wouldn't
388  * need a new block.
389  */
391 
392  dlist_push_head(&slab->freelist[slab->chunksPerBlock], &block->node);
393 
394  slab->minFreeChunks = slab->chunksPerBlock;
395  slab->nblocks += 1;
396  context->mem_allocated += slab->blockSize;
397  }
398 
399  /* grab the block from the freelist (even the new block is there) */
400  block = dlist_head_element(SlabBlock, node,
401  &slab->freelist[slab->minFreeChunks]);
402 
403  /* make sure we actually got a valid block, with matching nfree */
404  Assert(block != NULL);
405  Assert(slab->minFreeChunks == block->nfree);
406  Assert(block->nfree > 0);
407 
408  /* we know index of the first free chunk in the block */
409  idx = block->firstFreeChunk;
410 
411  /* make sure the chunk index is valid, and that it's marked as empty */
412  Assert((idx >= 0) && (idx < slab->chunksPerBlock));
413 
414  /* compute the chunk location block start (after the block header) */
415  chunk = SlabBlockGetChunk(slab, block, idx);
416 
417  /*
418  * Update the block nfree count, and also the minFreeChunks as we've
419  * decreased nfree for a block with the minimum number of free chunks
420  * (because that's how we chose the block).
421  */
422  block->nfree--;
423  slab->minFreeChunks = block->nfree;
424 
425  /*
426  * Remove the chunk from the freelist head. The index of the next free
427  * chunk is stored in the chunk itself.
428  */
430  block->firstFreeChunk = *(int32 *) SlabChunkGetPointer(chunk);
431 
432  Assert(block->firstFreeChunk >= 0);
433  Assert(block->firstFreeChunk <= slab->chunksPerBlock);
434 
435  Assert((block->nfree != 0 &&
436  block->firstFreeChunk < slab->chunksPerBlock) ||
437  (block->nfree == 0 &&
438  block->firstFreeChunk == slab->chunksPerBlock));
439 
440  /* move the whole block to the right place in the freelist */
441  dlist_delete(&block->node);
442  dlist_push_head(&slab->freelist[block->nfree], &block->node);
443 
444  /*
445  * And finally update minFreeChunks, i.e. the index to the block with the
446  * lowest number of free chunks. We only need to do that when the block
447  * got full (otherwise we know the current block is the right one). We'll
448  * simply walk the freelist until we find a non-empty entry.
449  */
450  if (slab->minFreeChunks == 0)
451  {
452  for (idx = 1; idx <= slab->chunksPerBlock; idx++)
453  {
454  if (dlist_is_empty(&slab->freelist[idx]))
455  continue;
456 
457  /* found a non-empty freelist */
458  slab->minFreeChunks = idx;
459  break;
460  }
461  }
462 
463  if (slab->minFreeChunks == slab->chunksPerBlock)
464  slab->minFreeChunks = 0;
465 
466  /* Prepare to initialize the chunk header. */
467  VALGRIND_MAKE_MEM_UNDEFINED(chunk, sizeof(SlabChunk));
468 
469  chunk->block = block;
470  chunk->slab = slab;
471 
472 #ifdef MEMORY_CONTEXT_CHECKING
473  /* slab mark to catch clobber of "unused" space */
474  if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
475  {
476  set_sentinel(SlabChunkGetPointer(chunk), size);
477  VALGRIND_MAKE_MEM_NOACCESS(((char *) chunk) +
478  sizeof(SlabChunk) + slab->chunkSize,
479  slab->fullChunkSize -
480  (slab->chunkSize + sizeof(SlabChunk)));
481  }
482 #endif
483 #ifdef RANDOMIZE_ALLOCATED_MEMORY
484  /* fill the allocated space with junk */
485  randomize_mem((char *) SlabChunkGetPointer(chunk), size);
486 #endif
487 
488  Assert(slab->nblocks * slab->blockSize == context->mem_allocated);
489 
490  return SlabChunkGetPointer(chunk);
491 }
492 
493 /*
494  * SlabFree
495  * Frees allocated memory; memory is removed from the slab.
496  */
497 static void
498 SlabFree(MemoryContext context, void *pointer)
499 {
500  int idx;
501  SlabContext *slab = castNode(SlabContext, context);
502  SlabChunk *chunk = SlabPointerGetChunk(pointer);
503  SlabBlock *block = chunk->block;
504 
505 #ifdef MEMORY_CONTEXT_CHECKING
506  /* Test for someone scribbling on unused space in chunk */
507  if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
508  if (!sentinel_ok(pointer, slab->chunkSize))
509  elog(WARNING, "detected write past chunk end in %s %p",
510  slab->header.name, chunk);
511 #endif
512 
513  /* compute index of the chunk with respect to block start */
514  idx = SlabChunkIndex(slab, block, chunk);
515 
516  /* add chunk to freelist, and update block nfree count */
517  *(int32 *) pointer = block->firstFreeChunk;
518  block->firstFreeChunk = idx;
519  block->nfree++;
520 
521  Assert(block->nfree > 0);
522  Assert(block->nfree <= slab->chunksPerBlock);
523 
524 #ifdef CLOBBER_FREED_MEMORY
525  /* XXX don't wipe the int32 index, used for block-level freelist */
526  wipe_mem((char *) pointer + sizeof(int32),
527  slab->chunkSize - sizeof(int32));
528 #endif
529 
530  /* remove the block from a freelist */
531  dlist_delete(&block->node);
532 
533  /*
534  * See if we need to update the minFreeChunks field for the slab - we only
535  * need to do that if there the block had that number of free chunks
536  * before we freed one. In that case, we check if there still are blocks
537  * in the original freelist and we either keep the current value (if there
538  * still are blocks) or increment it by one (the new block is still the
539  * one with minimum free chunks).
540  *
541  * The one exception is when the block will get completely free - in that
542  * case we will free it, se we can't use it for minFreeChunks. It however
543  * means there are no more blocks with free chunks.
544  */
545  if (slab->minFreeChunks == (block->nfree - 1))
546  {
547  /* Have we removed the last chunk from the freelist? */
548  if (dlist_is_empty(&slab->freelist[slab->minFreeChunks]))
549  {
550  /* but if we made the block entirely free, we'll free it */
551  if (block->nfree == slab->chunksPerBlock)
552  slab->minFreeChunks = 0;
553  else
554  slab->minFreeChunks++;
555  }
556  }
557 
558  /* If the block is now completely empty, free it. */
559  if (block->nfree == slab->chunksPerBlock)
560  {
561  free(block);
562  slab->nblocks--;
563  context->mem_allocated -= slab->blockSize;
564  }
565  else
566  dlist_push_head(&slab->freelist[block->nfree], &block->node);
567 
568  Assert(slab->nblocks >= 0);
569  Assert(slab->nblocks * slab->blockSize == context->mem_allocated);
570 }
571 
572 /*
573  * SlabRealloc
574  * Change the allocated size of a chunk.
575  *
576  * As Slab is designed for allocating equally-sized chunks of memory, it can't
577  * do an actual chunk size change. We try to be gentle and allow calls with
578  * exactly the same size, as in that case we can simply return the same
579  * chunk. When the size differs, we throw an error.
580  *
581  * We could also allow requests with size < chunkSize. That however seems
582  * rather pointless - Slab is meant for chunks of constant size, and moreover
583  * realloc is usually used to enlarge the chunk.
584  */
585 static void *
586 SlabRealloc(MemoryContext context, void *pointer, Size size)
587 {
588  SlabContext *slab = castNode(SlabContext, context);
589 
590  Assert(slab);
591 
592  /* can't do actual realloc with slab, but let's try to be gentle */
593  if (size == slab->chunkSize)
594  return pointer;
595 
596  elog(ERROR, "slab allocator does not support realloc()");
597  return NULL; /* keep compiler quiet */
598 }
599 
600 /*
601  * SlabGetChunkSpace
602  * Given a currently-allocated chunk, determine the total space
603  * it occupies (including all memory-allocation overhead).
604  */
605 static Size
606 SlabGetChunkSpace(MemoryContext context, void *pointer)
607 {
608  SlabContext *slab = castNode(SlabContext, context);
609 
610  Assert(slab);
611 
612  return slab->fullChunkSize;
613 }
614 
615 /*
616  * SlabIsEmpty
617  * Is an Slab empty of any allocated space?
618  */
619 static bool
621 {
622  SlabContext *slab = castNode(SlabContext, context);
623 
624  Assert(slab);
625 
626  return (slab->nblocks == 0);
627 }
628 
629 /*
630  * SlabStats
631  * Compute stats about memory consumption of a Slab context.
632  *
633  * printfunc: if not NULL, pass a human-readable stats string to this.
634  * passthru: pass this pointer through to printfunc.
635  * totals: if not NULL, add stats about this context into *totals.
636  * print_to_stderr: print stats to stderr if true, elog otherwise.
637  */
638 static void
640  MemoryStatsPrintFunc printfunc, void *passthru,
641  MemoryContextCounters *totals,
642  bool print_to_stderr)
643 {
644  SlabContext *slab = castNode(SlabContext, context);
645  Size nblocks = 0;
646  Size freechunks = 0;
647  Size totalspace;
648  Size freespace = 0;
649  int i;
650 
651  /* Include context header in totalspace */
652  totalspace = slab->headerSize;
653 
654  for (i = 0; i <= slab->chunksPerBlock; i++)
655  {
656  dlist_iter iter;
657 
658  dlist_foreach(iter, &slab->freelist[i])
659  {
660  SlabBlock *block = dlist_container(SlabBlock, node, iter.cur);
661 
662  nblocks++;
663  totalspace += slab->blockSize;
664  freespace += slab->fullChunkSize * block->nfree;
665  freechunks += block->nfree;
666  }
667  }
668 
669  if (printfunc)
670  {
671  char stats_string[200];
672 
673  snprintf(stats_string, sizeof(stats_string),
674  "%zu total in %zd blocks; %zu free (%zd chunks); %zu used",
675  totalspace, nblocks, freespace, freechunks,
676  totalspace - freespace);
677  printfunc(context, passthru, stats_string, print_to_stderr);
678  }
679 
680  if (totals)
681  {
682  totals->nblocks += nblocks;
683  totals->freechunks += freechunks;
684  totals->totalspace += totalspace;
685  totals->freespace += freespace;
686  }
687 }
688 
689 
690 #ifdef MEMORY_CONTEXT_CHECKING
691 
692 /*
693  * SlabCheck
694  * Walk through chunks and check consistency of memory.
695  *
696  * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
697  * find yourself in an infinite loop when trouble occurs, because this
698  * routine will be entered again when elog cleanup tries to release memory!
699  */
700 static void
701 SlabCheck(MemoryContext context)
702 {
703  int i;
704  SlabContext *slab = castNode(SlabContext, context);
705  const char *name = slab->header.name;
706 
707  Assert(slab);
708  Assert(slab->chunksPerBlock > 0);
709 
710  /* walk all the freelists */
711  for (i = 0; i <= slab->chunksPerBlock; i++)
712  {
713  int j,
714  nfree;
715  dlist_iter iter;
716 
717  /* walk all blocks on this freelist */
718  dlist_foreach(iter, &slab->freelist[i])
719  {
720  int idx;
721  SlabBlock *block = dlist_container(SlabBlock, node, iter.cur);
722 
723  /*
724  * Make sure the number of free chunks (in the block header)
725  * matches position in the freelist.
726  */
727  if (block->nfree != i)
728  elog(WARNING, "problem in slab %s: number of free chunks %d in block %p does not match freelist %d",
729  name, block->nfree, block, i);
730 
731  /* reset the bitmap of free chunks for this block */
732  memset(slab->freechunks, 0, (slab->chunksPerBlock * sizeof(bool)));
733  idx = block->firstFreeChunk;
734 
735  /*
736  * Now walk through the chunks, count the free ones and also
737  * perform some additional checks for the used ones. As the chunk
738  * freelist is stored within the chunks themselves, we have to
739  * walk through the chunks and construct our own bitmap.
740  */
741 
742  nfree = 0;
743  while (idx < slab->chunksPerBlock)
744  {
745  SlabChunk *chunk;
746 
747  /* count the chunk as free, add it to the bitmap */
748  nfree++;
749  slab->freechunks[idx] = true;
750 
751  /* read index of the next free chunk */
752  chunk = SlabBlockGetChunk(slab, block, idx);
754  idx = *(int32 *) SlabChunkGetPointer(chunk);
755  }
756 
757  for (j = 0; j < slab->chunksPerBlock; j++)
758  {
759  /* non-zero bit in the bitmap means chunk the chunk is used */
760  if (!slab->freechunks[j])
761  {
762  SlabChunk *chunk = SlabBlockGetChunk(slab, block, j);
763 
764  /* chunks have both block and slab pointers, so check both */
765  if (chunk->block != block)
766  elog(WARNING, "problem in slab %s: bogus block link in block %p, chunk %p",
767  name, block, chunk);
768 
769  if (chunk->slab != slab)
770  elog(WARNING, "problem in slab %s: bogus slab link in block %p, chunk %p",
771  name, block, chunk);
772 
773  /* there might be sentinel (thanks to alignment) */
774  if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
775  if (!sentinel_ok(chunk, slab->chunkSize))
776  elog(WARNING, "problem in slab %s: detected write past chunk end in block %p, chunk %p",
777  name, block, chunk);
778  }
779  }
780 
781  /*
782  * Make sure we got the expected number of free chunks (as tracked
783  * in the block header).
784  */
785  if (nfree != block->nfree)
786  elog(WARNING, "problem in slab %s: number of free chunks %d in block %p does not match bitmap %d",
787  name, block->nfree, block, nfree);
788  }
789  }
790 
791  Assert(slab->nblocks * slab->blockSize == context->mem_allocated);
792 }
793 
794 #endif /* MEMORY_CONTEXT_CHECKING */
static Size SlabGetChunkSpace(MemoryContext context, void *pointer)
Definition: slab.c:606
static void SlabReset(MemoryContext context)
Definition: slab.c:283
struct SlabContext SlabContext
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
dlist_node * cur
Definition: ilist.h:180
MemoryContextData header
Definition: slab.c:64
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:543
int nblocks
Definition: slab.c:72
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:300
#define castNode(_type_, nodeptr)
Definition: nodes.h:608
Size blockSize
Definition: slab.c:68
struct dlist_head dlist_head
#define VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
Definition: memdebug.h:28
SlabBlock * block
Definition: slab.c:108
#define dlist_foreach(iter, lhead)
Definition: ilist.h:526
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition: memdebug.h:27
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:350
static void * SlabRealloc(MemoryContext context, void *pointer, Size size)
Definition: slab.c:586
int errcode(int sqlerrcode)
Definition: elog.c:698
dlist_head freelist[FLEXIBLE_ARRAY_MEMBER]
Definition: slab.c:77
void(* MemoryStatsPrintFunc)(MemoryContext context, void *passthru, const char *stats_string, bool print_to_stderr)
Definition: memnodes.h:54
MemoryContext SlabContextCreate(MemoryContext parent, const char *name, Size blockSize, Size chunkSize)
Definition: slab.c:175
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:259
dlist_node node
Definition: slab.c:90
signed int int32
Definition: c.h:429
static void * SlabAlloc(MemoryContext context, Size size)
Definition: slab.c:340
Size chunkSize
Definition: slab.c:66
#define malloc(a)
Definition: header.h:50
static const MemoryContextMethods SlabMethods
Definition: slab.c:147
Size headerSize
Definition: slab.c:69
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:918
#define dlist_container(type, membername, ptr)
Definition: ilist.h:496
int firstFreeChunk
Definition: slab.c:92
#define ERROR
Definition: elog.h:46
SlabContext * slab
Definition: slab.c:109
int chunksPerBlock
Definition: slab.c:70
void MemoryContextStats(MemoryContext context)
Definition: mcxt.c:505
Size fullChunkSize
Definition: slab.c:67
#define SlabChunkIndex(slab, block, chunk)
Definition: slab.c:123
static void SlabStats(MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
Definition: slab.c:639
int errdetail(const char *fmt,...)
Definition: elog.c:1042
static bool SlabIsEmpty(MemoryContext context)
Definition: slab.c:620
static void dlist_delete(dlist_node *node)
Definition: ilist.h:358
void MemoryContextCreate(MemoryContext node, NodeTag tag, const MemoryContextMethods *methods, MemoryContext parent, const char *name)
Definition: mcxt.c:815
int nfree
Definition: slab.c:91
MemoryContext TopMemoryContext
Definition: mcxt.c:48
#define WARNING
Definition: elog.h:40
static void SlabFree(MemoryContext context, void *pointer)
Definition: slab.c:498
#define dlist_head_element(type, membername, lhead)
Definition: ilist.h:506
dlist_node * cur
Definition: ilist.h:161
static void dlist_init(dlist_head *head)
Definition: ilist.h:278
#define ereport(elevel,...)
Definition: elog.h:157
#define free(a)
Definition: header.h:65
#define Assert(condition)
Definition: c.h:804
struct SlabBlock SlabBlock
static bool dlist_is_empty(dlist_head *head)
Definition: ilist.h:289
size_t Size
Definition: c.h:540
#define MAXALIGN(LEN)
Definition: c.h:757
const char * name
Definition: encode.c:515
Size mem_allocated
Definition: memnodes.h:84
Definition: slab.c:88
#define SlabBlockGetChunk(slab, block, idx)
Definition: slab.c:118
int errmsg(const char *fmt,...)
Definition: elog.c:909
#define elog(elevel,...)
Definition: elog.h:232
int i
int minFreeChunks
Definition: slab.c:71
struct SlabChunk SlabChunk
#define SlabChunkGetPointer(chk)
Definition: slab.c:116
#define SlabPointerGetChunk(ptr)
Definition: slab.c:114
#define snprintf
Definition: port.h:216
const char * name
Definition: memnodes.h:90
#define offsetof(type, field)
Definition: c.h:727
unsigned char bool
Definition: c.h:391
static void SlabDelete(MemoryContext context)
Definition: slab.c:326