PostgreSQL Source Code  git master
slab.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * slab.c
4  * SLAB allocator definitions.
5  *
6  * SLAB is a MemoryContext implementation designed for cases where large
7  * numbers of equally-sized objects are allocated (and freed).
8  *
9  *
10  * Portions Copyright (c) 2017-2020, PostgreSQL Global Development Group
11  *
12  * IDENTIFICATION
13  * src/backend/utils/mmgr/slab.c
14  *
15  *
16  * NOTE:
17  * The constant allocation size allows significant simplification and various
18  * optimizations over more general purpose allocators. The blocks are carved
19  * into chunks of exactly the right size (plus alignment), not wasting any
20  * memory.
21  *
22  * The information about free chunks is maintained both at the block level and
23  * global (context) level. This is possible as the chunk size (and thus also
24  * the number of chunks per block) is fixed.
25  *
26  * On each block, free chunks are tracked in a simple linked list. Contents
27  * of free chunks is replaced with an index of the next free chunk, forming
28  * a very simple linked list. Each block also contains a counter of free
29  * chunks. Combined with the local block-level freelist, it makes it trivial
30  * to eventually free the whole block.
31  *
32  * At the context level, we use 'freelist' to track blocks ordered by number
33  * of free chunks, starting with blocks having a single allocated chunk, and
34  * with completely full blocks on the tail.
35  *
36  * This also allows various optimizations - for example when searching for
37  * free chunk, the allocator reuses space from the fullest blocks first, in
38  * the hope that some of the less full blocks will get completely empty (and
39  * returned back to the OS).
40  *
41  * For each block, we maintain pointer to the first free chunk - this is quite
42  * cheap and allows us to skip all the preceding used chunks, eliminating
43  * a significant number of lookups in many common usage patterns. In the worst
44  * case this performs as if the pointer was not maintained.
45  *
46  * We cache the freelist index for the blocks with the fewest free chunks
47  * (minFreeChunks), so that we don't have to search the freelist on every
48  * SlabAlloc() call, which is quite expensive.
49  *
50  *-------------------------------------------------------------------------
51  */
52 
53 #include "postgres.h"
54 
55 #include "lib/ilist.h"
56 #include "utils/memdebug.h"
57 #include "utils/memutils.h"
58 
59 /*
60  * SlabContext is a specialized implementation of MemoryContext.
61  */
62 typedef struct SlabContext
63 {
64  MemoryContextData header; /* Standard memory-context fields */
65  /* Allocation parameters for this context: */
66  Size chunkSize; /* chunk size */
67  Size fullChunkSize; /* chunk size including header and alignment */
68  Size blockSize; /* block size */
69  Size headerSize; /* allocated size of context header */
70  int chunksPerBlock; /* number of chunks per block */
71  int minFreeChunks; /* min number of free chunks in any block */
72  int nblocks; /* number of blocks allocated */
73 #ifdef MEMORY_CONTEXT_CHECKING
74  bool *freechunks; /* bitmap of free chunks in a block */
75 #endif
76  /* blocks with free space, grouped by number of free chunks: */
78 } SlabContext;
79 
80 /*
81  * SlabBlock
82  * Structure of a single block in SLAB allocator.
83  *
84  * node: doubly-linked list of blocks in global freelist
85  * nfree: number of free chunks in this block
86  * firstFreeChunk: index of the first free chunk
87  */
88 typedef struct SlabBlock
89 {
90  dlist_node node; /* doubly-linked list */
91  int nfree; /* number of free chunks */
92  int firstFreeChunk; /* index of the first free chunk in the block */
93 } SlabBlock;
94 
95 /*
96  * SlabChunk
97  * The prefix of each piece of memory in a SlabBlock
98  *
99  * Note: to meet the memory context APIs, the payload area of the chunk must
100  * be maxaligned, and the "slab" link must be immediately adjacent to the
101  * payload area (cf. GetMemoryChunkContext). Since we support no machines on
102  * which MAXALIGN is more than twice sizeof(void *), this happens without any
103  * special hacking in this struct declaration. But there is a static
104  * assertion below that the alignment is done correctly.
105  */
106 typedef struct SlabChunk
107 {
108  SlabBlock *block; /* block owning this chunk */
109  SlabContext *slab; /* owning context */
110  /* there must not be any padding to reach a MAXALIGN boundary here! */
111 } SlabChunk;
112 
113 
114 #define SlabPointerGetChunk(ptr) \
115  ((SlabChunk *)(((char *)(ptr)) - sizeof(SlabChunk)))
116 #define SlabChunkGetPointer(chk) \
117  ((void *)(((char *)(chk)) + sizeof(SlabChunk)))
118 #define SlabBlockGetChunk(slab, block, idx) \
119  ((SlabChunk *) ((char *) (block) + sizeof(SlabBlock) \
120  + (idx * slab->fullChunkSize)))
121 #define SlabBlockStart(block) \
122  ((char *) block + sizeof(SlabBlock))
123 #define SlabChunkIndex(slab, block, chunk) \
124  (((char *) chunk - SlabBlockStart(block)) / slab->fullChunkSize)
125 
126 /*
127  * These functions implement the MemoryContext API for Slab contexts.
128  */
129 static void *SlabAlloc(MemoryContext context, Size size);
130 static void SlabFree(MemoryContext context, void *pointer);
131 static void *SlabRealloc(MemoryContext context, void *pointer, Size size);
132 static void SlabReset(MemoryContext context);
133 static void SlabDelete(MemoryContext context);
134 static Size SlabGetChunkSpace(MemoryContext context, void *pointer);
135 static bool SlabIsEmpty(MemoryContext context);
136 static void SlabStats(MemoryContext context,
137  MemoryStatsPrintFunc printfunc, void *passthru,
138  MemoryContextCounters *totals);
139 #ifdef MEMORY_CONTEXT_CHECKING
140 static void SlabCheck(MemoryContext context);
141 #endif
142 
143 /*
144  * This is the virtual function table for Slab contexts.
145  */
147  SlabAlloc,
148  SlabFree,
149  SlabRealloc,
150  SlabReset,
151  SlabDelete,
153  SlabIsEmpty,
154  SlabStats
155 #ifdef MEMORY_CONTEXT_CHECKING
156  ,SlabCheck
157 #endif
158 };
159 
160 
161 /*
162  * SlabContextCreate
163  * Create a new Slab context.
164  *
165  * parent: parent context, or NULL if top-level context
166  * name: name of context (must be statically allocated)
167  * blockSize: allocation block size
168  * chunkSize: allocation chunk size
169  *
170  * The chunkSize may not exceed:
171  * MAXALIGN_DOWN(SIZE_MAX) - MAXALIGN(sizeof(SlabBlock)) - sizeof(SlabChunk)
172  */
175  const char *name,
176  Size blockSize,
177  Size chunkSize)
178 {
179  int chunksPerBlock;
181  Size freelistSize;
183  SlabContext *slab;
184  int i;
185 
186  /* Assert we padded SlabChunk properly */
187  StaticAssertStmt(sizeof(SlabChunk) == MAXALIGN(sizeof(SlabChunk)),
188  "sizeof(SlabChunk) is not maxaligned");
190  sizeof(SlabChunk),
191  "padding calculation in SlabChunk is wrong");
192 
193  /* Make sure the linked list node fits inside a freed chunk */
194  if (chunkSize < sizeof(int))
195  chunkSize = sizeof(int);
196 
197  /* chunk, including SLAB header (both addresses nicely aligned) */
198  fullChunkSize = sizeof(SlabChunk) + MAXALIGN(chunkSize);
199 
200  /* Make sure the block can store at least one chunk. */
201  if (blockSize < fullChunkSize + sizeof(SlabBlock))
202  elog(ERROR, "block size %zu for slab is too small for %zu chunks",
203  blockSize, chunkSize);
204 
205  /* Compute maximum number of chunks per block */
206  chunksPerBlock = (blockSize - sizeof(SlabBlock)) / fullChunkSize;
207 
208  /* The freelist starts with 0, ends with chunksPerBlock. */
209  freelistSize = sizeof(dlist_head) * (chunksPerBlock + 1);
210 
211  /*
212  * Allocate the context header. Unlike aset.c, we never try to combine
213  * this with the first regular block; not worth the extra complication.
214  */
215 
216  /* Size of the memory context header */
217  headerSize = offsetof(SlabContext, freelist) + freelistSize;
218 
219 #ifdef MEMORY_CONTEXT_CHECKING
220 
221  /*
222  * With memory checking, we need to allocate extra space for the bitmap of
223  * free chunks. The bitmap is an array of bools, so we don't need to worry
224  * about alignment.
225  */
226  headerSize += chunksPerBlock * sizeof(bool);
227 #endif
228 
229  slab = (SlabContext *) malloc(headerSize);
230  if (slab == NULL)
231  {
233  ereport(ERROR,
234  (errcode(ERRCODE_OUT_OF_MEMORY),
235  errmsg("out of memory"),
236  errdetail("Failed while creating memory context \"%s\".",
237  name)));
238  }
239 
240  /*
241  * Avoid writing code that can fail between here and MemoryContextCreate;
242  * we'd leak the header if we ereport in this stretch.
243  */
244 
245  /* Fill in SlabContext-specific header fields */
246  slab->chunkSize = chunkSize;
247  slab->fullChunkSize = fullChunkSize;
248  slab->blockSize = blockSize;
249  slab->headerSize = headerSize;
250  slab->chunksPerBlock = chunksPerBlock;
251  slab->minFreeChunks = 0;
252  slab->nblocks = 0;
253 
254  /* initialize the freelist slots */
255  for (i = 0; i < (slab->chunksPerBlock + 1); i++)
256  dlist_init(&slab->freelist[i]);
257 
258 #ifdef MEMORY_CONTEXT_CHECKING
259  /* set the freechunks pointer right after the freelists array */
260  slab->freechunks
261  = (bool *) slab + offsetof(SlabContext, freelist) + freelistSize;
262 #endif
263 
264  /* Finally, do the type-independent part of context creation */
267  &SlabMethods,
268  parent,
269  name);
270 
271  return (MemoryContext) slab;
272 }
273 
274 /*
275  * SlabReset
276  * Frees all memory which is allocated in the given set.
277  *
278  * The code simply frees all the blocks in the context - we don't keep any
279  * keeper blocks or anything like that.
280  */
281 static void
283 {
284  int i;
285  SlabContext *slab = castNode(SlabContext, context);
286 
287  Assert(slab);
288 
289 #ifdef MEMORY_CONTEXT_CHECKING
290  /* Check for corruption and leaks before freeing */
291  SlabCheck(context);
292 #endif
293 
294  /* walk over freelists and free the blocks */
295  for (i = 0; i <= slab->chunksPerBlock; i++)
296  {
297  dlist_mutable_iter miter;
298 
299  dlist_foreach_modify(miter, &slab->freelist[i])
300  {
301  SlabBlock *block = dlist_container(SlabBlock, node, miter.cur);
302 
303  dlist_delete(miter.cur);
304 
305 #ifdef CLOBBER_FREED_MEMORY
306  wipe_mem(block, slab->blockSize);
307 #endif
308  free(block);
309  slab->nblocks--;
310  context->mem_allocated -= slab->blockSize;
311  }
312  }
313 
314  slab->minFreeChunks = 0;
315 
316  Assert(slab->nblocks == 0);
317  Assert(context->mem_allocated == 0);
318 }
319 
320 /*
321  * SlabDelete
322  * Free all memory which is allocated in the given context.
323  */
324 static void
326 {
327  /* Reset to release all the SlabBlocks */
328  SlabReset(context);
329  /* And free the context header */
330  free(context);
331 }
332 
333 /*
334  * SlabAlloc
335  * Returns pointer to allocated memory of given size or NULL if
336  * request could not be completed; memory is added to the slab.
337  */
338 static void *
340 {
341  SlabContext *slab = castNode(SlabContext, context);
342  SlabBlock *block;
343  SlabChunk *chunk;
344  int idx;
345 
346  Assert(slab);
347 
348  Assert((slab->minFreeChunks >= 0) &&
349  (slab->minFreeChunks < slab->chunksPerBlock));
350 
351  /* make sure we only allow correct request size */
352  if (size != slab->chunkSize)
353  elog(ERROR, "unexpected alloc chunk size %zu (expected %zu)",
354  size, slab->chunkSize);
355 
356  /*
357  * If there are no free chunks in any existing block, create a new block
358  * and put it to the last freelist bucket.
359  *
360  * slab->minFreeChunks == 0 means there are no blocks with free chunks,
361  * thanks to how minFreeChunks is updated at the end of SlabAlloc().
362  */
363  if (slab->minFreeChunks == 0)
364  {
365  block = (SlabBlock *) malloc(slab->blockSize);
366 
367  if (block == NULL)
368  return NULL;
369 
370  block->nfree = slab->chunksPerBlock;
371  block->firstFreeChunk = 0;
372 
373  /*
374  * Put all the chunks on a freelist. Walk the chunks and point each
375  * one to the next one.
376  */
377  for (idx = 0; idx < slab->chunksPerBlock; idx++)
378  {
379  chunk = SlabBlockGetChunk(slab, block, idx);
380  *(int32 *) SlabChunkGetPointer(chunk) = (idx + 1);
381  }
382 
383  /*
384  * And add it to the last freelist with all chunks empty.
385  *
386  * We know there are no blocks in the freelist, otherwise we wouldn't
387  * need a new block.
388  */
390 
391  dlist_push_head(&slab->freelist[slab->chunksPerBlock], &block->node);
392 
393  slab->minFreeChunks = slab->chunksPerBlock;
394  slab->nblocks += 1;
395  context->mem_allocated += slab->blockSize;
396  }
397 
398  /* grab the block from the freelist (even the new block is there) */
399  block = dlist_head_element(SlabBlock, node,
400  &slab->freelist[slab->minFreeChunks]);
401 
402  /* make sure we actually got a valid block, with matching nfree */
403  Assert(block != NULL);
404  Assert(slab->minFreeChunks == block->nfree);
405  Assert(block->nfree > 0);
406 
407  /* we know index of the first free chunk in the block */
408  idx = block->firstFreeChunk;
409 
410  /* make sure the chunk index is valid, and that it's marked as empty */
411  Assert((idx >= 0) && (idx < slab->chunksPerBlock));
412 
413  /* compute the chunk location block start (after the block header) */
414  chunk = SlabBlockGetChunk(slab, block, idx);
415 
416  /*
417  * Update the block nfree count, and also the minFreeChunks as we've
418  * decreased nfree for a block with the minimum number of free chunks
419  * (because that's how we chose the block).
420  */
421  block->nfree--;
422  slab->minFreeChunks = block->nfree;
423 
424  /*
425  * Remove the chunk from the freelist head. The index of the next free
426  * chunk is stored in the chunk itself.
427  */
429  block->firstFreeChunk = *(int32 *) SlabChunkGetPointer(chunk);
430 
431  Assert(block->firstFreeChunk >= 0);
432  Assert(block->firstFreeChunk <= slab->chunksPerBlock);
433 
434  Assert((block->nfree != 0 &&
435  block->firstFreeChunk < slab->chunksPerBlock) ||
436  (block->nfree == 0 &&
437  block->firstFreeChunk == slab->chunksPerBlock));
438 
439  /* move the whole block to the right place in the freelist */
440  dlist_delete(&block->node);
441  dlist_push_head(&slab->freelist[block->nfree], &block->node);
442 
443  /*
444  * And finally update minFreeChunks, i.e. the index to the block with the
445  * lowest number of free chunks. We only need to do that when the block
446  * got full (otherwise we know the current block is the right one). We'll
447  * simply walk the freelist until we find a non-empty entry.
448  */
449  if (slab->minFreeChunks == 0)
450  {
451  for (idx = 1; idx <= slab->chunksPerBlock; idx++)
452  {
453  if (dlist_is_empty(&slab->freelist[idx]))
454  continue;
455 
456  /* found a non-empty freelist */
457  slab->minFreeChunks = idx;
458  break;
459  }
460  }
461 
462  if (slab->minFreeChunks == slab->chunksPerBlock)
463  slab->minFreeChunks = 0;
464 
465  /* Prepare to initialize the chunk header. */
466  VALGRIND_MAKE_MEM_UNDEFINED(chunk, sizeof(SlabChunk));
467 
468  chunk->block = block;
469  chunk->slab = slab;
470 
471 #ifdef MEMORY_CONTEXT_CHECKING
472  /* slab mark to catch clobber of "unused" space */
473  if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
474  {
475  set_sentinel(SlabChunkGetPointer(chunk), size);
476  VALGRIND_MAKE_MEM_NOACCESS(((char *) chunk) +
477  sizeof(SlabChunk) + slab->chunkSize,
478  slab->fullChunkSize -
479  (slab->chunkSize + sizeof(SlabChunk)));
480  }
481 #endif
482 #ifdef RANDOMIZE_ALLOCATED_MEMORY
483  /* fill the allocated space with junk */
484  randomize_mem((char *) SlabChunkGetPointer(chunk), size);
485 #endif
486 
487  Assert(slab->nblocks * slab->blockSize == context->mem_allocated);
488 
489  return SlabChunkGetPointer(chunk);
490 }
491 
492 /*
493  * SlabFree
494  * Frees allocated memory; memory is removed from the slab.
495  */
496 static void
497 SlabFree(MemoryContext context, void *pointer)
498 {
499  int idx;
500  SlabContext *slab = castNode(SlabContext, context);
501  SlabChunk *chunk = SlabPointerGetChunk(pointer);
502  SlabBlock *block = chunk->block;
503 
504 #ifdef MEMORY_CONTEXT_CHECKING
505  /* Test for someone scribbling on unused space in chunk */
506  if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
507  if (!sentinel_ok(pointer, slab->chunkSize))
508  elog(WARNING, "detected write past chunk end in %s %p",
509  slab->header.name, chunk);
510 #endif
511 
512  /* compute index of the chunk with respect to block start */
513  idx = SlabChunkIndex(slab, block, chunk);
514 
515  /* add chunk to freelist, and update block nfree count */
516  *(int32 *) pointer = block->firstFreeChunk;
517  block->firstFreeChunk = idx;
518  block->nfree++;
519 
520  Assert(block->nfree > 0);
521  Assert(block->nfree <= slab->chunksPerBlock);
522 
523 #ifdef CLOBBER_FREED_MEMORY
524  /* XXX don't wipe the int32 index, used for block-level freelist */
525  wipe_mem((char *) pointer + sizeof(int32),
526  slab->chunkSize - sizeof(int32));
527 #endif
528 
529  /* remove the block from a freelist */
530  dlist_delete(&block->node);
531 
532  /*
533  * See if we need to update the minFreeChunks field for the slab - we only
534  * need to do that if there the block had that number of free chunks
535  * before we freed one. In that case, we check if there still are blocks
536  * in the original freelist and we either keep the current value (if there
537  * still are blocks) or increment it by one (the new block is still the
538  * one with minimum free chunks).
539  *
540  * The one exception is when the block will get completely free - in that
541  * case we will free it, se we can't use it for minFreeChunks. It however
542  * means there are no more blocks with free chunks.
543  */
544  if (slab->minFreeChunks == (block->nfree - 1))
545  {
546  /* Have we removed the last chunk from the freelist? */
547  if (dlist_is_empty(&slab->freelist[slab->minFreeChunks]))
548  {
549  /* but if we made the block entirely free, we'll free it */
550  if (block->nfree == slab->chunksPerBlock)
551  slab->minFreeChunks = 0;
552  else
553  slab->minFreeChunks++;
554  }
555  }
556 
557  /* If the block is now completely empty, free it. */
558  if (block->nfree == slab->chunksPerBlock)
559  {
560  free(block);
561  slab->nblocks--;
562  context->mem_allocated -= slab->blockSize;
563  }
564  else
565  dlist_push_head(&slab->freelist[block->nfree], &block->node);
566 
567  Assert(slab->nblocks >= 0);
568  Assert(slab->nblocks * slab->blockSize == context->mem_allocated);
569 }
570 
571 /*
572  * SlabRealloc
573  * Change the allocated size of a chunk.
574  *
575  * As Slab is designed for allocating equally-sized chunks of memory, it can't
576  * do an actual chunk size change. We try to be gentle and allow calls with
577  * exactly the same size, as in that case we can simply return the same
578  * chunk. When the size differs, we throw an error.
579  *
580  * We could also allow requests with size < chunkSize. That however seems
581  * rather pointless - Slab is meant for chunks of constant size, and moreover
582  * realloc is usually used to enlarge the chunk.
583  */
584 static void *
585 SlabRealloc(MemoryContext context, void *pointer, Size size)
586 {
587  SlabContext *slab = castNode(SlabContext, context);
588 
589  Assert(slab);
590 
591  /* can't do actual realloc with slab, but let's try to be gentle */
592  if (size == slab->chunkSize)
593  return pointer;
594 
595  elog(ERROR, "slab allocator does not support realloc()");
596  return NULL; /* keep compiler quiet */
597 }
598 
599 /*
600  * SlabGetChunkSpace
601  * Given a currently-allocated chunk, determine the total space
602  * it occupies (including all memory-allocation overhead).
603  */
604 static Size
605 SlabGetChunkSpace(MemoryContext context, void *pointer)
606 {
607  SlabContext *slab = castNode(SlabContext, context);
608 
609  Assert(slab);
610 
611  return slab->fullChunkSize;
612 }
613 
614 /*
615  * SlabIsEmpty
616  * Is an Slab empty of any allocated space?
617  */
618 static bool
620 {
621  SlabContext *slab = castNode(SlabContext, context);
622 
623  Assert(slab);
624 
625  return (slab->nblocks == 0);
626 }
627 
628 /*
629  * SlabStats
630  * Compute stats about memory consumption of a Slab context.
631  *
632  * printfunc: if not NULL, pass a human-readable stats string to this.
633  * passthru: pass this pointer through to printfunc.
634  * totals: if not NULL, add stats about this context into *totals.
635  */
636 static void
638  MemoryStatsPrintFunc printfunc, void *passthru,
639  MemoryContextCounters *totals)
640 {
641  SlabContext *slab = castNode(SlabContext, context);
642  Size nblocks = 0;
643  Size freechunks = 0;
644  Size totalspace;
645  Size freespace = 0;
646  int i;
647 
648  /* Include context header in totalspace */
649  totalspace = slab->headerSize;
650 
651  for (i = 0; i <= slab->chunksPerBlock; i++)
652  {
653  dlist_iter iter;
654 
655  dlist_foreach(iter, &slab->freelist[i])
656  {
657  SlabBlock *block = dlist_container(SlabBlock, node, iter.cur);
658 
659  nblocks++;
660  totalspace += slab->blockSize;
661  freespace += slab->fullChunkSize * block->nfree;
662  freechunks += block->nfree;
663  }
664  }
665 
666  if (printfunc)
667  {
668  char stats_string[200];
669 
670  snprintf(stats_string, sizeof(stats_string),
671  "%zu total in %zd blocks; %zu free (%zd chunks); %zu used",
672  totalspace, nblocks, freespace, freechunks,
673  totalspace - freespace);
674  printfunc(context, passthru, stats_string);
675  }
676 
677  if (totals)
678  {
679  totals->nblocks += nblocks;
680  totals->freechunks += freechunks;
681  totals->totalspace += totalspace;
682  totals->freespace += freespace;
683  }
684 }
685 
686 
687 #ifdef MEMORY_CONTEXT_CHECKING
688 
689 /*
690  * SlabCheck
691  * Walk through chunks and check consistency of memory.
692  *
693  * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
694  * find yourself in an infinite loop when trouble occurs, because this
695  * routine will be entered again when elog cleanup tries to release memory!
696  */
697 static void
698 SlabCheck(MemoryContext context)
699 {
700  int i;
701  SlabContext *slab = castNode(SlabContext, context);
702  const char *name = slab->header.name;
703 
704  Assert(slab);
705  Assert(slab->chunksPerBlock > 0);
706 
707  /* walk all the freelists */
708  for (i = 0; i <= slab->chunksPerBlock; i++)
709  {
710  int j,
711  nfree;
712  dlist_iter iter;
713 
714  /* walk all blocks on this freelist */
715  dlist_foreach(iter, &slab->freelist[i])
716  {
717  int idx;
718  SlabBlock *block = dlist_container(SlabBlock, node, iter.cur);
719 
720  /*
721  * Make sure the number of free chunks (in the block header)
722  * matches position in the freelist.
723  */
724  if (block->nfree != i)
725  elog(WARNING, "problem in slab %s: number of free chunks %d in block %p does not match freelist %d",
726  name, block->nfree, block, i);
727 
728  /* reset the bitmap of free chunks for this block */
729  memset(slab->freechunks, 0, (slab->chunksPerBlock * sizeof(bool)));
730  idx = block->firstFreeChunk;
731 
732  /*
733  * Now walk through the chunks, count the free ones and also
734  * perform some additional checks for the used ones. As the chunk
735  * freelist is stored within the chunks themselves, we have to
736  * walk through the chunks and construct our own bitmap.
737  */
738 
739  nfree = 0;
740  while (idx < slab->chunksPerBlock)
741  {
742  SlabChunk *chunk;
743 
744  /* count the chunk as free, add it to the bitmap */
745  nfree++;
746  slab->freechunks[idx] = true;
747 
748  /* read index of the next free chunk */
749  chunk = SlabBlockGetChunk(slab, block, idx);
751  idx = *(int32 *) SlabChunkGetPointer(chunk);
752  }
753 
754  for (j = 0; j < slab->chunksPerBlock; j++)
755  {
756  /* non-zero bit in the bitmap means chunk the chunk is used */
757  if (!slab->freechunks[j])
758  {
759  SlabChunk *chunk = SlabBlockGetChunk(slab, block, j);
760 
761  /* chunks have both block and slab pointers, so check both */
762  if (chunk->block != block)
763  elog(WARNING, "problem in slab %s: bogus block link in block %p, chunk %p",
764  name, block, chunk);
765 
766  if (chunk->slab != slab)
767  elog(WARNING, "problem in slab %s: bogus slab link in block %p, chunk %p",
768  name, block, chunk);
769 
770  /* there might be sentinel (thanks to alignment) */
771  if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
772  if (!sentinel_ok(chunk, slab->chunkSize))
773  elog(WARNING, "problem in slab %s: detected write past chunk end in block %p, chunk %p",
774  name, block, chunk);
775  }
776  }
777 
778  /*
779  * Make sure we got the expected number of free chunks (as tracked
780  * in the block header).
781  */
782  if (nfree != block->nfree)
783  elog(WARNING, "problem in slab %s: number of free chunks %d in block %p does not match bitmap %d",
784  name, block->nfree, block, nfree);
785  }
786  }
787 
788  Assert(slab->nblocks * slab->blockSize == context->mem_allocated);
789 }
790 
791 #endif /* MEMORY_CONTEXT_CHECKING */
static Size SlabGetChunkSpace(MemoryContext context, void *pointer)
Definition: slab.c:605
static void SlabReset(MemoryContext context)
Definition: slab.c:282
struct SlabContext SlabContext
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
dlist_node * cur
Definition: ilist.h:180
void(* MemoryStatsPrintFunc)(MemoryContext context, void *passthru, const char *stats_string)
Definition: memnodes.h:54
MemoryContextData header
Definition: slab.c:64
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:524
int nblocks
Definition: slab.c:72
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:300
#define castNode(_type_, nodeptr)
Definition: nodes.h:598
Size blockSize
Definition: slab.c:68
struct dlist_head dlist_head
#define VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
Definition: memdebug.h:28
SlabBlock * block
Definition: slab.c:108
#define dlist_foreach(iter, lhead)
Definition: ilist.h:507
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition: memdebug.h:27
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:283
static void * SlabRealloc(MemoryContext context, void *pointer, Size size)
Definition: slab.c:585
int errcode(int sqlerrcode)
Definition: elog.c:610
dlist_head freelist[FLEXIBLE_ARRAY_MEMBER]
Definition: slab.c:77
MemoryContext SlabContextCreate(MemoryContext parent, const char *name, Size blockSize, Size chunkSize)
Definition: slab.c:174
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:259
dlist_node node
Definition: slab.c:90
signed int int32
Definition: c.h:362
static void * SlabAlloc(MemoryContext context, Size size)
Definition: slab.c:339
Size chunkSize
Definition: slab.c:66
#define malloc(a)
Definition: header.h:50
static const MemoryContextMethods SlabMethods
Definition: slab.c:146
Size headerSize
Definition: slab.c:69
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:859
#define dlist_container(type, membername, ptr)
Definition: ilist.h:477
int firstFreeChunk
Definition: slab.c:92
#define ERROR
Definition: elog.h:43
SlabContext * slab
Definition: slab.c:109
int chunksPerBlock
Definition: slab.c:70
void MemoryContextStats(MemoryContext context)
Definition: mcxt.c:499
Size fullChunkSize
Definition: slab.c:67
#define SlabChunkIndex(slab, block, chunk)
Definition: slab.c:123
int errdetail(const char *fmt,...)
Definition: elog.c:957
static bool SlabIsEmpty(MemoryContext context)
Definition: slab.c:619
static void dlist_delete(dlist_node *node)
Definition: ilist.h:358
void MemoryContextCreate(MemoryContext node, NodeTag tag, const MemoryContextMethods *methods, MemoryContext parent, const char *name)
Definition: mcxt.c:749
int nfree
Definition: slab.c:91
MemoryContext TopMemoryContext
Definition: mcxt.c:44
#define WARNING
Definition: elog.h:40
static void SlabFree(MemoryContext context, void *pointer)
Definition: slab.c:497
#define dlist_head_element(type, membername, lhead)
Definition: ilist.h:487
dlist_node * cur
Definition: ilist.h:161
static void dlist_init(dlist_head *head)
Definition: ilist.h:278
#define ereport(elevel,...)
Definition: elog.h:144
#define free(a)
Definition: header.h:65
#define Assert(condition)
Definition: c.h:745
struct SlabBlock SlabBlock
static bool dlist_is_empty(dlist_head *head)
Definition: ilist.h:289
size_t Size
Definition: c.h:473
#define MAXALIGN(LEN)
Definition: c.h:698
const char * name
Definition: encode.c:561
Size mem_allocated
Definition: memnodes.h:82
Definition: slab.c:88
#define SlabBlockGetChunk(slab, block, idx)
Definition: slab.c:118
int errmsg(const char *fmt,...)
Definition: elog.c:824
#define elog(elevel,...)
Definition: elog.h:214
int i
int minFreeChunks
Definition: slab.c:71
struct SlabChunk SlabChunk
#define SlabChunkGetPointer(chk)
Definition: slab.c:116
#define SlabPointerGetChunk(ptr)
Definition: slab.c:114
#define snprintf
Definition: port.h:193
const char * name
Definition: memnodes.h:88
#define offsetof(type, field)
Definition: c.h:668
unsigned char bool
Definition: c.h:324
static void SlabDelete(MemoryContext context)
Definition: slab.c:325
static void SlabStats(MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals)
Definition: slab.c:637