PostgreSQL Source Code  git master
slab.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * slab.c
4  * SLAB allocator definitions.
5  *
6  * SLAB is a MemoryContext implementation designed for cases where large
7  * numbers of equally-sized objects are allocated (and freed).
8  *
9  *
10  * Portions Copyright (c) 2017-2019, PostgreSQL Global Development Group
11  *
12  * IDENTIFICATION
13  * src/backend/utils/mmgr/slab.c
14  *
15  *
16  * NOTE:
17  * The constant allocation size allows significant simplification and various
18  * optimizations over more general purpose allocators. The blocks are carved
19  * into chunks of exactly the right size (plus alignment), not wasting any
20  * memory.
21  *
22  * The information about free chunks is maintained both at the block level and
23  * global (context) level. This is possible as the chunk size (and thus also
24  * the number of chunks per block) is fixed.
25  *
26  * On each block, free chunks are tracked in a simple linked list. Contents
27  * of free chunks is replaced with an index of the next free chunk, forming
28  * a very simple linked list. Each block also contains a counter of free
29  * chunks. Combined with the local block-level freelist, it makes it trivial
30  * to eventually free the whole block.
31  *
32  * At the context level, we use 'freelist' to track blocks ordered by number
33  * of free chunks, starting with blocks having a single allocated chunk, and
34  * with completely full blocks on the tail.
35  *
36  * This also allows various optimizations - for example when searching for
37  * free chunk, the allocator reuses space from the fullest blocks first, in
38  * the hope that some of the less full blocks will get completely empty (and
39  * returned back to the OS).
40  *
41  * For each block, we maintain pointer to the first free chunk - this is quite
42  * cheap and allows us to skip all the preceding used chunks, eliminating
43  * a significant number of lookups in many common usage patterns. In the worst
44  * case this performs as if the pointer was not maintained.
45  *
46  * We cache the freelist index for the blocks with the fewest free chunks
47  * (minFreeChunks), so that we don't have to search the freelist on every
48  * SlabAlloc() call, which is quite expensive.
49  *
50  *-------------------------------------------------------------------------
51  */
52 
53 #include "postgres.h"
54 
55 #include "lib/ilist.h"
56 #include "utils/memdebug.h"
57 #include "utils/memutils.h"
58 
59 /*
60  * SlabContext is a specialized implementation of MemoryContext.
61  */
62 typedef struct SlabContext
63 {
64  MemoryContextData header; /* Standard memory-context fields */
65  /* Allocation parameters for this context: */
66  Size chunkSize; /* chunk size */
67  Size fullChunkSize; /* chunk size including header and alignment */
68  Size blockSize; /* block size */
69  Size headerSize; /* allocated size of context header */
70  int chunksPerBlock; /* number of chunks per block */
71  int minFreeChunks; /* min number of free chunks in any block */
72  int nblocks; /* number of blocks allocated */
73  /* blocks with free space, grouped by number of free chunks: */
74  dlist_head freelist[FLEXIBLE_ARRAY_MEMBER];
75 } SlabContext;
76 
77 /*
78  * SlabBlock
79  * Structure of a single block in SLAB allocator.
80  *
81  * node: doubly-linked list of blocks in global freelist
82  * nfree: number of free chunks in this block
83  * firstFreeChunk: index of the first free chunk
84  */
85 typedef struct SlabBlock
86 {
87  dlist_node node; /* doubly-linked list */
88  int nfree; /* number of free chunks */
89  int firstFreeChunk; /* index of the first free chunk in the block */
90 } SlabBlock;
91 
92 /*
93  * SlabChunk
94  * The prefix of each piece of memory in a SlabBlock
95  *
96  * Note: to meet the memory context APIs, the payload area of the chunk must
97  * be maxaligned, and the "slab" link must be immediately adjacent to the
98  * payload area (cf. GetMemoryChunkContext). Since we support no machines on
99  * which MAXALIGN is more than twice sizeof(void *), this happens without any
100  * special hacking in this struct declaration. But there is a static
101  * assertion below that the alignment is done correctly.
102  */
103 typedef struct SlabChunk
104 {
105  SlabBlock *block; /* block owning this chunk */
106  SlabContext *slab; /* owning context */
107  /* there must not be any padding to reach a MAXALIGN boundary here! */
108 } SlabChunk;
109 
110 
111 #define SlabPointerGetChunk(ptr) \
112  ((SlabChunk *)(((char *)(ptr)) - sizeof(SlabChunk)))
113 #define SlabChunkGetPointer(chk) \
114  ((void *)(((char *)(chk)) + sizeof(SlabChunk)))
115 #define SlabBlockGetChunk(slab, block, idx) \
116  ((SlabChunk *) ((char *) (block) + sizeof(SlabBlock) \
117  + (idx * slab->fullChunkSize)))
118 #define SlabBlockStart(block) \
119  ((char *) block + sizeof(SlabBlock))
120 #define SlabChunkIndex(slab, block, chunk) \
121  (((char *) chunk - SlabBlockStart(block)) / slab->fullChunkSize)
122 
123 /*
124  * These functions implement the MemoryContext API for Slab contexts.
125  */
126 static void *SlabAlloc(MemoryContext context, Size size);
127 static void SlabFree(MemoryContext context, void *pointer);
128 static void *SlabRealloc(MemoryContext context, void *pointer, Size size);
129 static void SlabReset(MemoryContext context);
130 static void SlabDelete(MemoryContext context);
131 static Size SlabGetChunkSpace(MemoryContext context, void *pointer);
132 static bool SlabIsEmpty(MemoryContext context);
133 static void SlabStats(MemoryContext context,
134  MemoryStatsPrintFunc printfunc, void *passthru,
135  MemoryContextCounters *totals);
136 #ifdef MEMORY_CONTEXT_CHECKING
137 static void SlabCheck(MemoryContext context);
138 #endif
139 
140 /*
141  * This is the virtual function table for Slab contexts.
142  */
144  SlabAlloc,
145  SlabFree,
146  SlabRealloc,
147  SlabReset,
148  SlabDelete,
150  SlabIsEmpty,
151  SlabStats
152 #ifdef MEMORY_CONTEXT_CHECKING
153  ,SlabCheck
154 #endif
155 };
156 
157 /* ----------
158  * Debug macros
159  * ----------
160  */
161 #ifdef HAVE_ALLOCINFO
162 #define SlabFreeInfo(_cxt, _chunk) \
163  fprintf(stderr, "SlabFree: %s: %p, %zu\n", \
164  (_cxt)->header.name, (_chunk), (_chunk)->header.size)
165 #define SlabAllocInfo(_cxt, _chunk) \
166  fprintf(stderr, "SlabAlloc: %s: %p, %zu\n", \
167  (_cxt)->header.name, (_chunk), (_chunk)->header.size)
168 #else
169 #define SlabFreeInfo(_cxt, _chunk)
170 #define SlabAllocInfo(_cxt, _chunk)
171 #endif
172 
173 
174 /*
175  * SlabContextCreate
176  * Create a new Slab context.
177  *
178  * parent: parent context, or NULL if top-level context
179  * name: name of context (must be statically allocated)
180  * blockSize: allocation block size
181  * chunkSize: allocation chunk size
182  *
183  * The chunkSize may not exceed:
184  * MAXALIGN_DOWN(SIZE_MAX) - MAXALIGN(sizeof(SlabBlock)) - sizeof(SlabChunk)
185  */
188  const char *name,
189  Size blockSize,
190  Size chunkSize)
191 {
192  int chunksPerBlock;
194  Size freelistSize;
196  SlabContext *slab;
197  int i;
198 
199  /* Assert we padded SlabChunk properly */
200  StaticAssertStmt(sizeof(SlabChunk) == MAXALIGN(sizeof(SlabChunk)),
201  "sizeof(SlabChunk) is not maxaligned");
203  sizeof(SlabChunk),
204  "padding calculation in SlabChunk is wrong");
205 
206  /* Make sure the linked list node fits inside a freed chunk */
207  if (chunkSize < sizeof(int))
208  chunkSize = sizeof(int);
209 
210  /* chunk, including SLAB header (both addresses nicely aligned) */
211  fullChunkSize = sizeof(SlabChunk) + MAXALIGN(chunkSize);
212 
213  /* Make sure the block can store at least one chunk. */
214  if (blockSize < fullChunkSize + sizeof(SlabBlock))
215  elog(ERROR, "block size %zu for slab is too small for %zu chunks",
216  blockSize, chunkSize);
217 
218  /* Compute maximum number of chunks per block */
219  chunksPerBlock = (blockSize - sizeof(SlabBlock)) / fullChunkSize;
220 
221  /* The freelist starts with 0, ends with chunksPerBlock. */
222  freelistSize = sizeof(dlist_head) * (chunksPerBlock + 1);
223 
224  /*
225  * Allocate the context header. Unlike aset.c, we never try to combine
226  * this with the first regular block; not worth the extra complication.
227  */
228 
229  /* Size of the memory context header */
230  headerSize = offsetof(SlabContext, freelist) + freelistSize;
231 
232  slab = (SlabContext *) malloc(headerSize);
233  if (slab == NULL)
234  {
236  ereport(ERROR,
237  (errcode(ERRCODE_OUT_OF_MEMORY),
238  errmsg("out of memory"),
239  errdetail("Failed while creating memory context \"%s\".",
240  name)));
241  }
242 
243  /*
244  * Avoid writing code that can fail between here and MemoryContextCreate;
245  * we'd leak the header if we ereport in this stretch.
246  */
247 
248  /* Fill in SlabContext-specific header fields */
249  slab->chunkSize = chunkSize;
251  slab->blockSize = blockSize;
252  slab->headerSize = headerSize;
254  slab->minFreeChunks = 0;
255  slab->nblocks = 0;
256 
257  /* initialize the freelist slots */
258  for (i = 0; i < (slab->chunksPerBlock + 1); i++)
259  dlist_init(&slab->freelist[i]);
260 
261  /* Finally, do the type-independent part of context creation */
264  &SlabMethods,
265  parent,
266  name);
267 
268  return (MemoryContext) slab;
269 }
270 
271 /*
272  * SlabReset
273  * Frees all memory which is allocated in the given set.
274  *
275  * The code simply frees all the blocks in the context - we don't keep any
276  * keeper blocks or anything like that.
277  */
278 static void
280 {
281  int i;
282  SlabContext *slab = castNode(SlabContext, context);
283 
284  Assert(slab);
285 
286 #ifdef MEMORY_CONTEXT_CHECKING
287  /* Check for corruption and leaks before freeing */
288  SlabCheck(context);
289 #endif
290 
291  /* walk over freelists and free the blocks */
292  for (i = 0; i <= slab->chunksPerBlock; i++)
293  {
294  dlist_mutable_iter miter;
295 
296  dlist_foreach_modify(miter, &slab->freelist[i])
297  {
298  SlabBlock *block = dlist_container(SlabBlock, node, miter.cur);
299 
300  dlist_delete(miter.cur);
301 
302 #ifdef CLOBBER_FREED_MEMORY
303  wipe_mem(block, slab->blockSize);
304 #endif
305  free(block);
306  slab->nblocks--;
307  context->mem_allocated -= slab->blockSize;
308  }
309  }
310 
311  slab->minFreeChunks = 0;
312 
313  Assert(slab->nblocks == 0);
314  Assert(context->mem_allocated == 0);
315 }
316 
317 /*
318  * SlabDelete
319  * Free all memory which is allocated in the given context.
320  */
321 static void
323 {
324  /* Reset to release all the SlabBlocks */
325  SlabReset(context);
326  /* And free the context header */
327  free(context);
328 }
329 
330 /*
331  * SlabAlloc
332  * Returns pointer to allocated memory of given size or NULL if
333  * request could not be completed; memory is added to the slab.
334  */
335 static void *
337 {
338  SlabContext *slab = castNode(SlabContext, context);
339  SlabBlock *block;
340  SlabChunk *chunk;
341  int idx;
342 
343  Assert(slab);
344 
345  Assert((slab->minFreeChunks >= 0) &&
346  (slab->minFreeChunks < slab->chunksPerBlock));
347 
348  /* make sure we only allow correct request size */
349  if (size != slab->chunkSize)
350  elog(ERROR, "unexpected alloc chunk size %zu (expected %zu)",
351  size, slab->chunkSize);
352 
353  /*
354  * If there are no free chunks in any existing block, create a new block
355  * and put it to the last freelist bucket.
356  *
357  * slab->minFreeChunks == 0 means there are no blocks with free chunks,
358  * thanks to how minFreeChunks is updated at the end of SlabAlloc().
359  */
360  if (slab->minFreeChunks == 0)
361  {
362  block = (SlabBlock *) malloc(slab->blockSize);
363 
364  if (block == NULL)
365  return NULL;
366 
367  block->nfree = slab->chunksPerBlock;
368  block->firstFreeChunk = 0;
369 
370  /*
371  * Put all the chunks on a freelist. Walk the chunks and point each
372  * one to the next one.
373  */
374  for (idx = 0; idx < slab->chunksPerBlock; idx++)
375  {
376  chunk = SlabBlockGetChunk(slab, block, idx);
377  *(int32 *) SlabChunkGetPointer(chunk) = (idx + 1);
378  }
379 
380  /*
381  * And add it to the last freelist with all chunks empty.
382  *
383  * We know there are no blocks in the freelist, otherwise we wouldn't
384  * need a new block.
385  */
387 
388  dlist_push_head(&slab->freelist[slab->chunksPerBlock], &block->node);
389 
390  slab->minFreeChunks = slab->chunksPerBlock;
391  slab->nblocks += 1;
392  context->mem_allocated += slab->blockSize;
393  }
394 
395  /* grab the block from the freelist (even the new block is there) */
396  block = dlist_head_element(SlabBlock, node,
397  &slab->freelist[slab->minFreeChunks]);
398 
399  /* make sure we actually got a valid block, with matching nfree */
400  Assert(block != NULL);
401  Assert(slab->minFreeChunks == block->nfree);
402  Assert(block->nfree > 0);
403 
404  /* we know index of the first free chunk in the block */
405  idx = block->firstFreeChunk;
406 
407  /* make sure the chunk index is valid, and that it's marked as empty */
408  Assert((idx >= 0) && (idx < slab->chunksPerBlock));
409 
410  /* compute the chunk location block start (after the block header) */
411  chunk = SlabBlockGetChunk(slab, block, idx);
412 
413  /*
414  * Update the block nfree count, and also the minFreeChunks as we've
415  * decreased nfree for a block with the minimum number of free chunks
416  * (because that's how we chose the block).
417  */
418  block->nfree--;
419  slab->minFreeChunks = block->nfree;
420 
421  /*
422  * Remove the chunk from the freelist head. The index of the next free
423  * chunk is stored in the chunk itself.
424  */
426  block->firstFreeChunk = *(int32 *) SlabChunkGetPointer(chunk);
427 
428  Assert(block->firstFreeChunk >= 0);
429  Assert(block->firstFreeChunk <= slab->chunksPerBlock);
430 
431  Assert((block->nfree != 0 &&
432  block->firstFreeChunk < slab->chunksPerBlock) ||
433  (block->nfree == 0 &&
434  block->firstFreeChunk == slab->chunksPerBlock));
435 
436  /* move the whole block to the right place in the freelist */
437  dlist_delete(&block->node);
438  dlist_push_head(&slab->freelist[block->nfree], &block->node);
439 
440  /*
441  * And finally update minFreeChunks, i.e. the index to the block with the
442  * lowest number of free chunks. We only need to do that when the block
443  * got full (otherwise we know the current block is the right one). We'll
444  * simply walk the freelist until we find a non-empty entry.
445  */
446  if (slab->minFreeChunks == 0)
447  {
448  for (idx = 1; idx <= slab->chunksPerBlock; idx++)
449  {
450  if (dlist_is_empty(&slab->freelist[idx]))
451  continue;
452 
453  /* found a non-empty freelist */
454  slab->minFreeChunks = idx;
455  break;
456  }
457  }
458 
459  if (slab->minFreeChunks == slab->chunksPerBlock)
460  slab->minFreeChunks = 0;
461 
462  /* Prepare to initialize the chunk header. */
463  VALGRIND_MAKE_MEM_UNDEFINED(chunk, sizeof(SlabChunk));
464 
465  chunk->block = block;
466  chunk->slab = slab;
467 
468 #ifdef MEMORY_CONTEXT_CHECKING
469  /* slab mark to catch clobber of "unused" space */
470  if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
471  {
472  set_sentinel(SlabChunkGetPointer(chunk), size);
473  VALGRIND_MAKE_MEM_NOACCESS(((char *) chunk) +
474  sizeof(SlabChunk) + slab->chunkSize,
475  slab->fullChunkSize -
476  (slab->chunkSize + sizeof(SlabChunk)));
477  }
478 #endif
479 #ifdef RANDOMIZE_ALLOCATED_MEMORY
480  /* fill the allocated space with junk */
481  randomize_mem((char *) SlabChunkGetPointer(chunk), size);
482 #endif
483 
484  SlabAllocInfo(slab, chunk);
485 
486  Assert(slab->nblocks * slab->blockSize == context->mem_allocated);
487 
488  return SlabChunkGetPointer(chunk);
489 }
490 
491 /*
492  * SlabFree
493  * Frees allocated memory; memory is removed from the slab.
494  */
495 static void
496 SlabFree(MemoryContext context, void *pointer)
497 {
498  int idx;
499  SlabContext *slab = castNode(SlabContext, context);
500  SlabChunk *chunk = SlabPointerGetChunk(pointer);
501  SlabBlock *block = chunk->block;
502 
503  SlabFreeInfo(slab, chunk);
504 
505 #ifdef MEMORY_CONTEXT_CHECKING
506  /* Test for someone scribbling on unused space in chunk */
507  if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
508  if (!sentinel_ok(pointer, slab->chunkSize))
509  elog(WARNING, "detected write past chunk end in %s %p",
510  slab->header.name, chunk);
511 #endif
512 
513  /* compute index of the chunk with respect to block start */
514  idx = SlabChunkIndex(slab, block, chunk);
515 
516  /* add chunk to freelist, and update block nfree count */
517  *(int32 *) pointer = block->firstFreeChunk;
518  block->firstFreeChunk = idx;
519  block->nfree++;
520 
521  Assert(block->nfree > 0);
522  Assert(block->nfree <= slab->chunksPerBlock);
523 
524 #ifdef CLOBBER_FREED_MEMORY
525  /* XXX don't wipe the int32 index, used for block-level freelist */
526  wipe_mem((char *) pointer + sizeof(int32),
527  slab->chunkSize - sizeof(int32));
528 #endif
529 
530  /* remove the block from a freelist */
531  dlist_delete(&block->node);
532 
533  /*
534  * See if we need to update the minFreeChunks field for the slab - we only
535  * need to do that if there the block had that number of free chunks
536  * before we freed one. In that case, we check if there still are blocks
537  * in the original freelist and we either keep the current value (if there
538  * still are blocks) or increment it by one (the new block is still the
539  * one with minimum free chunks).
540  *
541  * The one exception is when the block will get completely free - in that
542  * case we will free it, se we can't use it for minFreeChunks. It however
543  * means there are no more blocks with free chunks.
544  */
545  if (slab->minFreeChunks == (block->nfree - 1))
546  {
547  /* Have we removed the last chunk from the freelist? */
548  if (dlist_is_empty(&slab->freelist[slab->minFreeChunks]))
549  {
550  /* but if we made the block entirely free, we'll free it */
551  if (block->nfree == slab->chunksPerBlock)
552  slab->minFreeChunks = 0;
553  else
554  slab->minFreeChunks++;
555  }
556  }
557 
558  /* If the block is now completely empty, free it. */
559  if (block->nfree == slab->chunksPerBlock)
560  {
561  free(block);
562  slab->nblocks--;
563  context->mem_allocated -= slab->blockSize;
564  }
565  else
566  dlist_push_head(&slab->freelist[block->nfree], &block->node);
567 
568  Assert(slab->nblocks >= 0);
569  Assert(slab->nblocks * slab->blockSize == context->mem_allocated);
570 }
571 
572 /*
573  * SlabRealloc
574  * Change the allocated size of a chunk.
575  *
576  * As Slab is designed for allocating equally-sized chunks of memory, it can't
577  * do an actual chunk size change. We try to be gentle and allow calls with
578  * exactly the same size, as in that case we can simply return the same
579  * chunk. When the size differs, we throw an error.
580  *
581  * We could also allow requests with size < chunkSize. That however seems
582  * rather pointless - Slab is meant for chunks of constant size, and moreover
583  * realloc is usually used to enlarge the chunk.
584  */
585 static void *
586 SlabRealloc(MemoryContext context, void *pointer, Size size)
587 {
588  SlabContext *slab = castNode(SlabContext, context);
589 
590  Assert(slab);
591 
592  /* can't do actual realloc with slab, but let's try to be gentle */
593  if (size == slab->chunkSize)
594  return pointer;
595 
596  elog(ERROR, "slab allocator does not support realloc()");
597  return NULL; /* keep compiler quiet */
598 }
599 
600 /*
601  * SlabGetChunkSpace
602  * Given a currently-allocated chunk, determine the total space
603  * it occupies (including all memory-allocation overhead).
604  */
605 static Size
606 SlabGetChunkSpace(MemoryContext context, void *pointer)
607 {
608  SlabContext *slab = castNode(SlabContext, context);
609 
610  Assert(slab);
611 
612  return slab->fullChunkSize;
613 }
614 
615 /*
616  * SlabIsEmpty
617  * Is an Slab empty of any allocated space?
618  */
619 static bool
621 {
622  SlabContext *slab = castNode(SlabContext, context);
623 
624  Assert(slab);
625 
626  return (slab->nblocks == 0);
627 }
628 
629 /*
630  * SlabStats
631  * Compute stats about memory consumption of a Slab context.
632  *
633  * printfunc: if not NULL, pass a human-readable stats string to this.
634  * passthru: pass this pointer through to printfunc.
635  * totals: if not NULL, add stats about this context into *totals.
636  */
637 static void
639  MemoryStatsPrintFunc printfunc, void *passthru,
640  MemoryContextCounters *totals)
641 {
642  SlabContext *slab = castNode(SlabContext, context);
643  Size nblocks = 0;
644  Size freechunks = 0;
645  Size totalspace;
646  Size freespace = 0;
647  int i;
648 
649  /* Include context header in totalspace */
650  totalspace = slab->headerSize;
651 
652  for (i = 0; i <= slab->chunksPerBlock; i++)
653  {
654  dlist_iter iter;
655 
656  dlist_foreach(iter, &slab->freelist[i])
657  {
658  SlabBlock *block = dlist_container(SlabBlock, node, iter.cur);
659 
660  nblocks++;
661  totalspace += slab->blockSize;
662  freespace += slab->fullChunkSize * block->nfree;
663  freechunks += block->nfree;
664  }
665  }
666 
667  if (printfunc)
668  {
669  char stats_string[200];
670 
671  snprintf(stats_string, sizeof(stats_string),
672  "%zu total in %zd blocks; %zu free (%zd chunks); %zu used",
673  totalspace, nblocks, freespace, freechunks,
674  totalspace - freespace);
675  printfunc(context, passthru, stats_string);
676  }
677 
678  if (totals)
679  {
680  totals->nblocks += nblocks;
681  totals->freechunks += freechunks;
682  totals->totalspace += totalspace;
683  totals->freespace += freespace;
684  }
685 }
686 
687 
688 #ifdef MEMORY_CONTEXT_CHECKING
689 
690 /*
691  * SlabCheck
692  * Walk through chunks and check consistency of memory.
693  *
694  * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
695  * find yourself in an infinite loop when trouble occurs, because this
696  * routine will be entered again when elog cleanup tries to release memory!
697  */
698 static void
699 SlabCheck(MemoryContext context)
700 {
701  int i;
702  SlabContext *slab = castNode(SlabContext, context);
703  const char *name = slab->header.name;
704  char *freechunks;
705 
706  Assert(slab);
707  Assert(slab->chunksPerBlock > 0);
708 
709  /* bitmap of free chunks on a block */
710  freechunks = palloc(slab->chunksPerBlock * sizeof(bool));
711 
712  /* walk all the freelists */
713  for (i = 0; i <= slab->chunksPerBlock; i++)
714  {
715  int j,
716  nfree;
717  dlist_iter iter;
718 
719  /* walk all blocks on this freelist */
720  dlist_foreach(iter, &slab->freelist[i])
721  {
722  int idx;
723  SlabBlock *block = dlist_container(SlabBlock, node, iter.cur);
724 
725  /*
726  * Make sure the number of free chunks (in the block header)
727  * matches position in the freelist.
728  */
729  if (block->nfree != i)
730  elog(WARNING, "problem in slab %s: number of free chunks %d in block %p does not match freelist %d",
731  name, block->nfree, block, i);
732 
733  /* reset the bitmap of free chunks for this block */
734  memset(freechunks, 0, (slab->chunksPerBlock * sizeof(bool)));
735  idx = block->firstFreeChunk;
736 
737  /*
738  * Now walk through the chunks, count the free ones and also
739  * perform some additional checks for the used ones. As the chunk
740  * freelist is stored within the chunks themselves, we have to
741  * walk through the chunks and construct our own bitmap.
742  */
743 
744  nfree = 0;
745  while (idx < slab->chunksPerBlock)
746  {
747  SlabChunk *chunk;
748 
749  /* count the chunk as free, add it to the bitmap */
750  nfree++;
751  freechunks[idx] = true;
752 
753  /* read index of the next free chunk */
754  chunk = SlabBlockGetChunk(slab, block, idx);
756  idx = *(int32 *) SlabChunkGetPointer(chunk);
757  }
758 
759  for (j = 0; j < slab->chunksPerBlock; j++)
760  {
761  /* non-zero bit in the bitmap means chunk the chunk is used */
762  if (!freechunks[j])
763  {
764  SlabChunk *chunk = SlabBlockGetChunk(slab, block, j);
765 
766  /* chunks have both block and slab pointers, so check both */
767  if (chunk->block != block)
768  elog(WARNING, "problem in slab %s: bogus block link in block %p, chunk %p",
769  name, block, chunk);
770 
771  if (chunk->slab != slab)
772  elog(WARNING, "problem in slab %s: bogus slab link in block %p, chunk %p",
773  name, block, chunk);
774 
775  /* there might be sentinel (thanks to alignment) */
776  if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
777  if (!sentinel_ok(chunk, slab->chunkSize))
778  elog(WARNING, "problem in slab %s: detected write past chunk end in block %p, chunk %p",
779  name, block, chunk);
780  }
781  }
782 
783  /*
784  * Make sure we got the expected number of free chunks (as tracked
785  * in the block header).
786  */
787  if (nfree != block->nfree)
788  elog(WARNING, "problem in slab %s: number of free chunks %d in block %p does not match bitmap %d",
789  name, block->nfree, block, nfree);
790  }
791  }
792 
793  Assert(slab->nblocks * slab->blockSize == context->mem_allocated);
794 }
795 
796 #endif /* MEMORY_CONTEXT_CHECKING */
static Size SlabGetChunkSpace(MemoryContext context, void *pointer)
Definition: slab.c:606
static void SlabReset(MemoryContext context)
Definition: slab.c:279
struct SlabContext SlabContext
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
dlist_node * cur
Definition: ilist.h:180
void(* MemoryStatsPrintFunc)(MemoryContext context, void *passthru, const char *stats_string)
Definition: memnodes.h:54
MemoryContextData header
Definition: slab.c:64
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:524
int nblocks
Definition: slab.c:72
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:300
#define castNode(_type_, nodeptr)
Definition: nodes.h:594
Size blockSize
Definition: slab.c:68
struct dlist_head dlist_head
#define VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
Definition: memdebug.h:28
SlabBlock * block
Definition: slab.c:105
#define dlist_foreach(iter, lhead)
Definition: ilist.h:507
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition: memdebug.h:27
static void * SlabRealloc(MemoryContext context, void *pointer, Size size)
Definition: slab.c:586
int errcode(int sqlerrcode)
Definition: elog.c:608
dlist_head freelist[FLEXIBLE_ARRAY_MEMBER]
Definition: slab.c:74
MemoryContext SlabContextCreate(MemoryContext parent, const char *name, Size blockSize, Size chunkSize)
Definition: slab.c:187
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:263
dlist_node node
Definition: slab.c:87
#define SlabFreeInfo(_cxt, _chunk)
Definition: slab.c:169
signed int int32
Definition: c.h:347
static void * SlabAlloc(MemoryContext context, Size size)
Definition: slab.c:336
Size chunkSize
Definition: slab.c:66
#define malloc(a)
Definition: header.h:50
static const MemoryContextMethods SlabMethods
Definition: slab.c:143
Size headerSize
Definition: slab.c:69
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:849
#define dlist_container(type, membername, ptr)
Definition: ilist.h:477
int firstFreeChunk
Definition: slab.c:89
#define ERROR
Definition: elog.h:43
SlabContext * slab
Definition: slab.c:106
int chunksPerBlock
Definition: slab.c:70
void MemoryContextStats(MemoryContext context)
Definition: mcxt.c:498
Size fullChunkSize
Definition: slab.c:67
#define SlabChunkIndex(slab, block, chunk)
Definition: slab.c:120
int errdetail(const char *fmt,...)
Definition: elog.c:955
static bool SlabIsEmpty(MemoryContext context)
Definition: slab.c:620
static void dlist_delete(dlist_node *node)
Definition: ilist.h:358
void MemoryContextCreate(MemoryContext node, NodeTag tag, const MemoryContextMethods *methods, MemoryContext parent, const char *name)
Definition: mcxt.c:748
#define ereport(elevel, rest)
Definition: elog.h:141
int nfree
Definition: slab.c:88
MemoryContext TopMemoryContext
Definition: mcxt.c:44
#define WARNING
Definition: elog.h:40
static void SlabFree(MemoryContext context, void *pointer)
Definition: slab.c:496
#define dlist_head_element(type, membername, lhead)
Definition: ilist.h:487
dlist_node * cur
Definition: ilist.h:161
#define SlabAllocInfo(_cxt, _chunk)
Definition: slab.c:170
static void dlist_init(dlist_head *head)
Definition: ilist.h:278
#define free(a)
Definition: header.h:65
#define Assert(condition)
Definition: c.h:739
struct SlabBlock SlabBlock
static bool dlist_is_empty(dlist_head *head)
Definition: ilist.h:289
size_t Size
Definition: c.h:467
#define MAXALIGN(LEN)
Definition: c.h:692
const char * name
Definition: encode.c:521
Size mem_allocated
Definition: memnodes.h:82
Definition: slab.c:85
#define SlabBlockGetChunk(slab, block, idx)
Definition: slab.c:115
void * palloc(Size size)
Definition: mcxt.c:949
int errmsg(const char *fmt,...)
Definition: elog.c:822
#define elog(elevel,...)
Definition: elog.h:228
int i
int minFreeChunks
Definition: slab.c:71
struct SlabChunk SlabChunk
#define SlabChunkGetPointer(chk)
Definition: slab.c:113
#define SlabPointerGetChunk(ptr)
Definition: slab.c:111
#define snprintf
Definition: port.h:192
const char * name
Definition: memnodes.h:88
#define offsetof(type, field)
Definition: c.h:662
static void SlabDelete(MemoryContext context)
Definition: slab.c:322
static void SlabStats(MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals)
Definition: slab.c:638