PostgreSQL Source Code  git master
slab.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * slab.c
4  * SLAB allocator definitions.
5  *
6  * SLAB is a MemoryContext implementation designed for cases where large
7  * numbers of equally-sized objects are allocated (and freed).
8  *
9  *
10  * Portions Copyright (c) 2017-2020, PostgreSQL Global Development Group
11  *
12  * IDENTIFICATION
13  * src/backend/utils/mmgr/slab.c
14  *
15  *
16  * NOTE:
17  * The constant allocation size allows significant simplification and various
18  * optimizations over more general purpose allocators. The blocks are carved
19  * into chunks of exactly the right size (plus alignment), not wasting any
20  * memory.
21  *
22  * The information about free chunks is maintained both at the block level and
23  * global (context) level. This is possible as the chunk size (and thus also
24  * the number of chunks per block) is fixed.
25  *
26  * On each block, free chunks are tracked in a simple linked list. Contents
27  * of free chunks is replaced with an index of the next free chunk, forming
28  * a very simple linked list. Each block also contains a counter of free
29  * chunks. Combined with the local block-level freelist, it makes it trivial
30  * to eventually free the whole block.
31  *
32  * At the context level, we use 'freelist' to track blocks ordered by number
33  * of free chunks, starting with blocks having a single allocated chunk, and
34  * with completely full blocks on the tail.
35  *
36  * This also allows various optimizations - for example when searching for
37  * free chunk, the allocator reuses space from the fullest blocks first, in
38  * the hope that some of the less full blocks will get completely empty (and
39  * returned back to the OS).
40  *
41  * For each block, we maintain pointer to the first free chunk - this is quite
42  * cheap and allows us to skip all the preceding used chunks, eliminating
43  * a significant number of lookups in many common usage patterns. In the worst
44  * case this performs as if the pointer was not maintained.
45  *
46  * We cache the freelist index for the blocks with the fewest free chunks
47  * (minFreeChunks), so that we don't have to search the freelist on every
48  * SlabAlloc() call, which is quite expensive.
49  *
50  *-------------------------------------------------------------------------
51  */
52 
53 #include "postgres.h"
54 
55 #include "lib/ilist.h"
56 #include "utils/memdebug.h"
57 #include "utils/memutils.h"
58 
59 /*
60  * SlabContext is a specialized implementation of MemoryContext.
61  */
62 typedef struct SlabContext
63 {
64  MemoryContextData header; /* Standard memory-context fields */
65  /* Allocation parameters for this context: */
66  Size chunkSize; /* chunk size */
67  Size fullChunkSize; /* chunk size including header and alignment */
68  Size blockSize; /* block size */
69  Size headerSize; /* allocated size of context header */
70  int chunksPerBlock; /* number of chunks per block */
71  int minFreeChunks; /* min number of free chunks in any block */
72  int nblocks; /* number of blocks allocated */
73 #ifdef MEMORY_CONTEXT_CHECKING
74  bool *freechunks; /* bitmap of free chunks in a block */
75 #endif
76  /* blocks with free space, grouped by number of free chunks: */
78 } SlabContext;
79 
80 /*
81  * SlabBlock
82  * Structure of a single block in SLAB allocator.
83  *
84  * node: doubly-linked list of blocks in global freelist
85  * nfree: number of free chunks in this block
86  * firstFreeChunk: index of the first free chunk
87  */
88 typedef struct SlabBlock
89 {
90  dlist_node node; /* doubly-linked list */
91  int nfree; /* number of free chunks */
92  int firstFreeChunk; /* index of the first free chunk in the block */
93 } SlabBlock;
94 
95 /*
96  * SlabChunk
97  * The prefix of each piece of memory in a SlabBlock
98  *
99  * Note: to meet the memory context APIs, the payload area of the chunk must
100  * be maxaligned, and the "slab" link must be immediately adjacent to the
101  * payload area (cf. GetMemoryChunkContext). Since we support no machines on
102  * which MAXALIGN is more than twice sizeof(void *), this happens without any
103  * special hacking in this struct declaration. But there is a static
104  * assertion below that the alignment is done correctly.
105  */
106 typedef struct SlabChunk
107 {
108  SlabBlock *block; /* block owning this chunk */
109  SlabContext *slab; /* owning context */
110  /* there must not be any padding to reach a MAXALIGN boundary here! */
111 } SlabChunk;
112 
113 
114 #define SlabPointerGetChunk(ptr) \
115  ((SlabChunk *)(((char *)(ptr)) - sizeof(SlabChunk)))
116 #define SlabChunkGetPointer(chk) \
117  ((void *)(((char *)(chk)) + sizeof(SlabChunk)))
118 #define SlabBlockGetChunk(slab, block, idx) \
119  ((SlabChunk *) ((char *) (block) + sizeof(SlabBlock) \
120  + (idx * slab->fullChunkSize)))
121 #define SlabBlockStart(block) \
122  ((char *) block + sizeof(SlabBlock))
123 #define SlabChunkIndex(slab, block, chunk) \
124  (((char *) chunk - SlabBlockStart(block)) / slab->fullChunkSize)
125 
126 /*
127  * These functions implement the MemoryContext API for Slab contexts.
128  */
129 static void *SlabAlloc(MemoryContext context, Size size);
130 static void SlabFree(MemoryContext context, void *pointer);
131 static void *SlabRealloc(MemoryContext context, void *pointer, Size size);
132 static void SlabReset(MemoryContext context);
133 static void SlabDelete(MemoryContext context);
134 static Size SlabGetChunkSpace(MemoryContext context, void *pointer);
135 static bool SlabIsEmpty(MemoryContext context);
136 static void SlabStats(MemoryContext context,
137  MemoryStatsPrintFunc printfunc, void *passthru,
138  MemoryContextCounters *totals);
139 #ifdef MEMORY_CONTEXT_CHECKING
140 static void SlabCheck(MemoryContext context);
141 #endif
142 
143 /*
144  * This is the virtual function table for Slab contexts.
145  */
147  SlabAlloc,
148  SlabFree,
149  SlabRealloc,
150  SlabReset,
151  SlabDelete,
153  SlabIsEmpty,
154  SlabStats
155 #ifdef MEMORY_CONTEXT_CHECKING
156  ,SlabCheck
157 #endif
158 };
159 
160 /* ----------
161  * Debug macros
162  * ----------
163  */
164 #ifdef HAVE_ALLOCINFO
165 #define SlabFreeInfo(_cxt, _chunk) \
166  fprintf(stderr, "SlabFree: %s: %p, %zu\n", \
167  (_cxt)->header.name, (_chunk), (_chunk)->header.size)
168 #define SlabAllocInfo(_cxt, _chunk) \
169  fprintf(stderr, "SlabAlloc: %s: %p, %zu\n", \
170  (_cxt)->header.name, (_chunk), (_chunk)->header.size)
171 #else
172 #define SlabFreeInfo(_cxt, _chunk)
173 #define SlabAllocInfo(_cxt, _chunk)
174 #endif
175 
176 
177 /*
178  * SlabContextCreate
179  * Create a new Slab context.
180  *
181  * parent: parent context, or NULL if top-level context
182  * name: name of context (must be statically allocated)
183  * blockSize: allocation block size
184  * chunkSize: allocation chunk size
185  *
186  * The chunkSize may not exceed:
187  * MAXALIGN_DOWN(SIZE_MAX) - MAXALIGN(sizeof(SlabBlock)) - sizeof(SlabChunk)
188  */
191  const char *name,
192  Size blockSize,
193  Size chunkSize)
194 {
195  int chunksPerBlock;
197  Size freelistSize;
199  SlabContext *slab;
200  int i;
201 
202  /* Assert we padded SlabChunk properly */
203  StaticAssertStmt(sizeof(SlabChunk) == MAXALIGN(sizeof(SlabChunk)),
204  "sizeof(SlabChunk) is not maxaligned");
206  sizeof(SlabChunk),
207  "padding calculation in SlabChunk is wrong");
208 
209  /* Make sure the linked list node fits inside a freed chunk */
210  if (chunkSize < sizeof(int))
211  chunkSize = sizeof(int);
212 
213  /* chunk, including SLAB header (both addresses nicely aligned) */
214  fullChunkSize = sizeof(SlabChunk) + MAXALIGN(chunkSize);
215 
216  /* Make sure the block can store at least one chunk. */
217  if (blockSize < fullChunkSize + sizeof(SlabBlock))
218  elog(ERROR, "block size %zu for slab is too small for %zu chunks",
219  blockSize, chunkSize);
220 
221  /* Compute maximum number of chunks per block */
222  chunksPerBlock = (blockSize - sizeof(SlabBlock)) / fullChunkSize;
223 
224  /* The freelist starts with 0, ends with chunksPerBlock. */
225  freelistSize = sizeof(dlist_head) * (chunksPerBlock + 1);
226 
227  /*
228  * Allocate the context header. Unlike aset.c, we never try to combine
229  * this with the first regular block; not worth the extra complication.
230  */
231 
232  /* Size of the memory context header */
233  headerSize = offsetof(SlabContext, freelist) + freelistSize;
234 
235 #ifdef MEMORY_CONTEXT_CHECKING
236  /*
237  * With memory checking, we need to allocate extra space for the bitmap
238  * of free chunks. The bitmap is an array of bools, so we don't need to
239  * worry about alignment.
240  */
241  headerSize += chunksPerBlock * sizeof(bool);
242 #endif
243 
244  slab = (SlabContext *) malloc(headerSize);
245  if (slab == NULL)
246  {
248  ereport(ERROR,
249  (errcode(ERRCODE_OUT_OF_MEMORY),
250  errmsg("out of memory"),
251  errdetail("Failed while creating memory context \"%s\".",
252  name)));
253  }
254 
255  /*
256  * Avoid writing code that can fail between here and MemoryContextCreate;
257  * we'd leak the header if we ereport in this stretch.
258  */
259 
260  /* Fill in SlabContext-specific header fields */
261  slab->chunkSize = chunkSize;
262  slab->fullChunkSize = fullChunkSize;
263  slab->blockSize = blockSize;
264  slab->headerSize = headerSize;
265  slab->chunksPerBlock = chunksPerBlock;
266  slab->minFreeChunks = 0;
267  slab->nblocks = 0;
268 
269  /* initialize the freelist slots */
270  for (i = 0; i < (slab->chunksPerBlock + 1); i++)
271  dlist_init(&slab->freelist[i]);
272 
273 #ifdef MEMORY_CONTEXT_CHECKING
274  /* set the freechunks pointer right after the freelists array */
275  slab->freechunks
276  = (bool *) slab + offsetof(SlabContext, freelist) + freelistSize;
277 #endif
278 
279  /* Finally, do the type-independent part of context creation */
282  &SlabMethods,
283  parent,
284  name);
285 
286  return (MemoryContext) slab;
287 }
288 
289 /*
290  * SlabReset
291  * Frees all memory which is allocated in the given set.
292  *
293  * The code simply frees all the blocks in the context - we don't keep any
294  * keeper blocks or anything like that.
295  */
296 static void
298 {
299  int i;
300  SlabContext *slab = castNode(SlabContext, context);
301 
302  Assert(slab);
303 
304 #ifdef MEMORY_CONTEXT_CHECKING
305  /* Check for corruption and leaks before freeing */
306  SlabCheck(context);
307 #endif
308 
309  /* walk over freelists and free the blocks */
310  for (i = 0; i <= slab->chunksPerBlock; i++)
311  {
312  dlist_mutable_iter miter;
313 
314  dlist_foreach_modify(miter, &slab->freelist[i])
315  {
316  SlabBlock *block = dlist_container(SlabBlock, node, miter.cur);
317 
318  dlist_delete(miter.cur);
319 
320 #ifdef CLOBBER_FREED_MEMORY
321  wipe_mem(block, slab->blockSize);
322 #endif
323  free(block);
324  slab->nblocks--;
325  context->mem_allocated -= slab->blockSize;
326  }
327  }
328 
329  slab->minFreeChunks = 0;
330 
331  Assert(slab->nblocks == 0);
332  Assert(context->mem_allocated == 0);
333 }
334 
335 /*
336  * SlabDelete
337  * Free all memory which is allocated in the given context.
338  */
339 static void
341 {
342  /* Reset to release all the SlabBlocks */
343  SlabReset(context);
344  /* And free the context header */
345  free(context);
346 }
347 
348 /*
349  * SlabAlloc
350  * Returns pointer to allocated memory of given size or NULL if
351  * request could not be completed; memory is added to the slab.
352  */
353 static void *
355 {
356  SlabContext *slab = castNode(SlabContext, context);
357  SlabBlock *block;
358  SlabChunk *chunk;
359  int idx;
360 
361  Assert(slab);
362 
363  Assert((slab->minFreeChunks >= 0) &&
364  (slab->minFreeChunks < slab->chunksPerBlock));
365 
366  /* make sure we only allow correct request size */
367  if (size != slab->chunkSize)
368  elog(ERROR, "unexpected alloc chunk size %zu (expected %zu)",
369  size, slab->chunkSize);
370 
371  /*
372  * If there are no free chunks in any existing block, create a new block
373  * and put it to the last freelist bucket.
374  *
375  * slab->minFreeChunks == 0 means there are no blocks with free chunks,
376  * thanks to how minFreeChunks is updated at the end of SlabAlloc().
377  */
378  if (slab->minFreeChunks == 0)
379  {
380  block = (SlabBlock *) malloc(slab->blockSize);
381 
382  if (block == NULL)
383  return NULL;
384 
385  block->nfree = slab->chunksPerBlock;
386  block->firstFreeChunk = 0;
387 
388  /*
389  * Put all the chunks on a freelist. Walk the chunks and point each
390  * one to the next one.
391  */
392  for (idx = 0; idx < slab->chunksPerBlock; idx++)
393  {
394  chunk = SlabBlockGetChunk(slab, block, idx);
395  *(int32 *) SlabChunkGetPointer(chunk) = (idx + 1);
396  }
397 
398  /*
399  * And add it to the last freelist with all chunks empty.
400  *
401  * We know there are no blocks in the freelist, otherwise we wouldn't
402  * need a new block.
403  */
405 
406  dlist_push_head(&slab->freelist[slab->chunksPerBlock], &block->node);
407 
408  slab->minFreeChunks = slab->chunksPerBlock;
409  slab->nblocks += 1;
410  context->mem_allocated += slab->blockSize;
411  }
412 
413  /* grab the block from the freelist (even the new block is there) */
414  block = dlist_head_element(SlabBlock, node,
415  &slab->freelist[slab->minFreeChunks]);
416 
417  /* make sure we actually got a valid block, with matching nfree */
418  Assert(block != NULL);
419  Assert(slab->minFreeChunks == block->nfree);
420  Assert(block->nfree > 0);
421 
422  /* we know index of the first free chunk in the block */
423  idx = block->firstFreeChunk;
424 
425  /* make sure the chunk index is valid, and that it's marked as empty */
426  Assert((idx >= 0) && (idx < slab->chunksPerBlock));
427 
428  /* compute the chunk location block start (after the block header) */
429  chunk = SlabBlockGetChunk(slab, block, idx);
430 
431  /*
432  * Update the block nfree count, and also the minFreeChunks as we've
433  * decreased nfree for a block with the minimum number of free chunks
434  * (because that's how we chose the block).
435  */
436  block->nfree--;
437  slab->minFreeChunks = block->nfree;
438 
439  /*
440  * Remove the chunk from the freelist head. The index of the next free
441  * chunk is stored in the chunk itself.
442  */
444  block->firstFreeChunk = *(int32 *) SlabChunkGetPointer(chunk);
445 
446  Assert(block->firstFreeChunk >= 0);
447  Assert(block->firstFreeChunk <= slab->chunksPerBlock);
448 
449  Assert((block->nfree != 0 &&
450  block->firstFreeChunk < slab->chunksPerBlock) ||
451  (block->nfree == 0 &&
452  block->firstFreeChunk == slab->chunksPerBlock));
453 
454  /* move the whole block to the right place in the freelist */
455  dlist_delete(&block->node);
456  dlist_push_head(&slab->freelist[block->nfree], &block->node);
457 
458  /*
459  * And finally update minFreeChunks, i.e. the index to the block with the
460  * lowest number of free chunks. We only need to do that when the block
461  * got full (otherwise we know the current block is the right one). We'll
462  * simply walk the freelist until we find a non-empty entry.
463  */
464  if (slab->minFreeChunks == 0)
465  {
466  for (idx = 1; idx <= slab->chunksPerBlock; idx++)
467  {
468  if (dlist_is_empty(&slab->freelist[idx]))
469  continue;
470 
471  /* found a non-empty freelist */
472  slab->minFreeChunks = idx;
473  break;
474  }
475  }
476 
477  if (slab->minFreeChunks == slab->chunksPerBlock)
478  slab->minFreeChunks = 0;
479 
480  /* Prepare to initialize the chunk header. */
481  VALGRIND_MAKE_MEM_UNDEFINED(chunk, sizeof(SlabChunk));
482 
483  chunk->block = block;
484  chunk->slab = slab;
485 
486 #ifdef MEMORY_CONTEXT_CHECKING
487  /* slab mark to catch clobber of "unused" space */
488  if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
489  {
490  set_sentinel(SlabChunkGetPointer(chunk), size);
491  VALGRIND_MAKE_MEM_NOACCESS(((char *) chunk) +
492  sizeof(SlabChunk) + slab->chunkSize,
493  slab->fullChunkSize -
494  (slab->chunkSize + sizeof(SlabChunk)));
495  }
496 #endif
497 #ifdef RANDOMIZE_ALLOCATED_MEMORY
498  /* fill the allocated space with junk */
499  randomize_mem((char *) SlabChunkGetPointer(chunk), size);
500 #endif
501 
502  SlabAllocInfo(slab, chunk);
503 
504  Assert(slab->nblocks * slab->blockSize == context->mem_allocated);
505 
506  return SlabChunkGetPointer(chunk);
507 }
508 
509 /*
510  * SlabFree
511  * Frees allocated memory; memory is removed from the slab.
512  */
513 static void
514 SlabFree(MemoryContext context, void *pointer)
515 {
516  int idx;
517  SlabContext *slab = castNode(SlabContext, context);
518  SlabChunk *chunk = SlabPointerGetChunk(pointer);
519  SlabBlock *block = chunk->block;
520 
521  SlabFreeInfo(slab, chunk);
522 
523 #ifdef MEMORY_CONTEXT_CHECKING
524  /* Test for someone scribbling on unused space in chunk */
525  if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
526  if (!sentinel_ok(pointer, slab->chunkSize))
527  elog(WARNING, "detected write past chunk end in %s %p",
528  slab->header.name, chunk);
529 #endif
530 
531  /* compute index of the chunk with respect to block start */
532  idx = SlabChunkIndex(slab, block, chunk);
533 
534  /* add chunk to freelist, and update block nfree count */
535  *(int32 *) pointer = block->firstFreeChunk;
536  block->firstFreeChunk = idx;
537  block->nfree++;
538 
539  Assert(block->nfree > 0);
540  Assert(block->nfree <= slab->chunksPerBlock);
541 
542 #ifdef CLOBBER_FREED_MEMORY
543  /* XXX don't wipe the int32 index, used for block-level freelist */
544  wipe_mem((char *) pointer + sizeof(int32),
545  slab->chunkSize - sizeof(int32));
546 #endif
547 
548  /* remove the block from a freelist */
549  dlist_delete(&block->node);
550 
551  /*
552  * See if we need to update the minFreeChunks field for the slab - we only
553  * need to do that if there the block had that number of free chunks
554  * before we freed one. In that case, we check if there still are blocks
555  * in the original freelist and we either keep the current value (if there
556  * still are blocks) or increment it by one (the new block is still the
557  * one with minimum free chunks).
558  *
559  * The one exception is when the block will get completely free - in that
560  * case we will free it, se we can't use it for minFreeChunks. It however
561  * means there are no more blocks with free chunks.
562  */
563  if (slab->minFreeChunks == (block->nfree - 1))
564  {
565  /* Have we removed the last chunk from the freelist? */
566  if (dlist_is_empty(&slab->freelist[slab->minFreeChunks]))
567  {
568  /* but if we made the block entirely free, we'll free it */
569  if (block->nfree == slab->chunksPerBlock)
570  slab->minFreeChunks = 0;
571  else
572  slab->minFreeChunks++;
573  }
574  }
575 
576  /* If the block is now completely empty, free it. */
577  if (block->nfree == slab->chunksPerBlock)
578  {
579  free(block);
580  slab->nblocks--;
581  context->mem_allocated -= slab->blockSize;
582  }
583  else
584  dlist_push_head(&slab->freelist[block->nfree], &block->node);
585 
586  Assert(slab->nblocks >= 0);
587  Assert(slab->nblocks * slab->blockSize == context->mem_allocated);
588 }
589 
590 /*
591  * SlabRealloc
592  * Change the allocated size of a chunk.
593  *
594  * As Slab is designed for allocating equally-sized chunks of memory, it can't
595  * do an actual chunk size change. We try to be gentle and allow calls with
596  * exactly the same size, as in that case we can simply return the same
597  * chunk. When the size differs, we throw an error.
598  *
599  * We could also allow requests with size < chunkSize. That however seems
600  * rather pointless - Slab is meant for chunks of constant size, and moreover
601  * realloc is usually used to enlarge the chunk.
602  */
603 static void *
604 SlabRealloc(MemoryContext context, void *pointer, Size size)
605 {
606  SlabContext *slab = castNode(SlabContext, context);
607 
608  Assert(slab);
609 
610  /* can't do actual realloc with slab, but let's try to be gentle */
611  if (size == slab->chunkSize)
612  return pointer;
613 
614  elog(ERROR, "slab allocator does not support realloc()");
615  return NULL; /* keep compiler quiet */
616 }
617 
618 /*
619  * SlabGetChunkSpace
620  * Given a currently-allocated chunk, determine the total space
621  * it occupies (including all memory-allocation overhead).
622  */
623 static Size
624 SlabGetChunkSpace(MemoryContext context, void *pointer)
625 {
626  SlabContext *slab = castNode(SlabContext, context);
627 
628  Assert(slab);
629 
630  return slab->fullChunkSize;
631 }
632 
633 /*
634  * SlabIsEmpty
635  * Is an Slab empty of any allocated space?
636  */
637 static bool
639 {
640  SlabContext *slab = castNode(SlabContext, context);
641 
642  Assert(slab);
643 
644  return (slab->nblocks == 0);
645 }
646 
647 /*
648  * SlabStats
649  * Compute stats about memory consumption of a Slab context.
650  *
651  * printfunc: if not NULL, pass a human-readable stats string to this.
652  * passthru: pass this pointer through to printfunc.
653  * totals: if not NULL, add stats about this context into *totals.
654  */
655 static void
657  MemoryStatsPrintFunc printfunc, void *passthru,
658  MemoryContextCounters *totals)
659 {
660  SlabContext *slab = castNode(SlabContext, context);
661  Size nblocks = 0;
662  Size freechunks = 0;
663  Size totalspace;
664  Size freespace = 0;
665  int i;
666 
667  /* Include context header in totalspace */
668  totalspace = slab->headerSize;
669 
670  for (i = 0; i <= slab->chunksPerBlock; i++)
671  {
672  dlist_iter iter;
673 
674  dlist_foreach(iter, &slab->freelist[i])
675  {
676  SlabBlock *block = dlist_container(SlabBlock, node, iter.cur);
677 
678  nblocks++;
679  totalspace += slab->blockSize;
680  freespace += slab->fullChunkSize * block->nfree;
681  freechunks += block->nfree;
682  }
683  }
684 
685  if (printfunc)
686  {
687  char stats_string[200];
688 
689  snprintf(stats_string, sizeof(stats_string),
690  "%zu total in %zd blocks; %zu free (%zd chunks); %zu used",
691  totalspace, nblocks, freespace, freechunks,
692  totalspace - freespace);
693  printfunc(context, passthru, stats_string);
694  }
695 
696  if (totals)
697  {
698  totals->nblocks += nblocks;
699  totals->freechunks += freechunks;
700  totals->totalspace += totalspace;
701  totals->freespace += freespace;
702  }
703 }
704 
705 
706 #ifdef MEMORY_CONTEXT_CHECKING
707 
708 /*
709  * SlabCheck
710  * Walk through chunks and check consistency of memory.
711  *
712  * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
713  * find yourself in an infinite loop when trouble occurs, because this
714  * routine will be entered again when elog cleanup tries to release memory!
715  */
716 static void
717 SlabCheck(MemoryContext context)
718 {
719  int i;
720  SlabContext *slab = castNode(SlabContext, context);
721  const char *name = slab->header.name;
722 
723  Assert(slab);
724  Assert(slab->chunksPerBlock > 0);
725 
726  /* walk all the freelists */
727  for (i = 0; i <= slab->chunksPerBlock; i++)
728  {
729  int j,
730  nfree;
731  dlist_iter iter;
732 
733  /* walk all blocks on this freelist */
734  dlist_foreach(iter, &slab->freelist[i])
735  {
736  int idx;
737  SlabBlock *block = dlist_container(SlabBlock, node, iter.cur);
738 
739  /*
740  * Make sure the number of free chunks (in the block header)
741  * matches position in the freelist.
742  */
743  if (block->nfree != i)
744  elog(WARNING, "problem in slab %s: number of free chunks %d in block %p does not match freelist %d",
745  name, block->nfree, block, i);
746 
747  /* reset the bitmap of free chunks for this block */
748  memset(slab->freechunks, 0, (slab->chunksPerBlock * sizeof(bool)));
749  idx = block->firstFreeChunk;
750 
751  /*
752  * Now walk through the chunks, count the free ones and also
753  * perform some additional checks for the used ones. As the chunk
754  * freelist is stored within the chunks themselves, we have to
755  * walk through the chunks and construct our own bitmap.
756  */
757 
758  nfree = 0;
759  while (idx < slab->chunksPerBlock)
760  {
761  SlabChunk *chunk;
762 
763  /* count the chunk as free, add it to the bitmap */
764  nfree++;
765  slab->freechunks[idx] = true;
766 
767  /* read index of the next free chunk */
768  chunk = SlabBlockGetChunk(slab, block, idx);
770  idx = *(int32 *) SlabChunkGetPointer(chunk);
771  }
772 
773  for (j = 0; j < slab->chunksPerBlock; j++)
774  {
775  /* non-zero bit in the bitmap means chunk the chunk is used */
776  if (!slab->freechunks[j])
777  {
778  SlabChunk *chunk = SlabBlockGetChunk(slab, block, j);
779 
780  /* chunks have both block and slab pointers, so check both */
781  if (chunk->block != block)
782  elog(WARNING, "problem in slab %s: bogus block link in block %p, chunk %p",
783  name, block, chunk);
784 
785  if (chunk->slab != slab)
786  elog(WARNING, "problem in slab %s: bogus slab link in block %p, chunk %p",
787  name, block, chunk);
788 
789  /* there might be sentinel (thanks to alignment) */
790  if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
791  if (!sentinel_ok(chunk, slab->chunkSize))
792  elog(WARNING, "problem in slab %s: detected write past chunk end in block %p, chunk %p",
793  name, block, chunk);
794  }
795  }
796 
797  /*
798  * Make sure we got the expected number of free chunks (as tracked
799  * in the block header).
800  */
801  if (nfree != block->nfree)
802  elog(WARNING, "problem in slab %s: number of free chunks %d in block %p does not match bitmap %d",
803  name, block->nfree, block, nfree);
804  }
805  }
806 
807  Assert(slab->nblocks * slab->blockSize == context->mem_allocated);
808 }
809 
810 #endif /* MEMORY_CONTEXT_CHECKING */
static Size SlabGetChunkSpace(MemoryContext context, void *pointer)
Definition: slab.c:624
static void SlabReset(MemoryContext context)
Definition: slab.c:297
struct SlabContext SlabContext
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
dlist_node * cur
Definition: ilist.h:180
void(* MemoryStatsPrintFunc)(MemoryContext context, void *passthru, const char *stats_string)
Definition: memnodes.h:54
MemoryContextData header
Definition: slab.c:64
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:524
int nblocks
Definition: slab.c:72
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:300
#define castNode(_type_, nodeptr)
Definition: nodes.h:594
Size blockSize
Definition: slab.c:68
struct dlist_head dlist_head
#define VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
Definition: memdebug.h:28
SlabBlock * block
Definition: slab.c:108
#define dlist_foreach(iter, lhead)
Definition: ilist.h:507
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition: memdebug.h:27
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:276
static void * SlabRealloc(MemoryContext context, void *pointer, Size size)
Definition: slab.c:604
int errcode(int sqlerrcode)
Definition: elog.c:608
dlist_head freelist[FLEXIBLE_ARRAY_MEMBER]
Definition: slab.c:77
MemoryContext SlabContextCreate(MemoryContext parent, const char *name, Size blockSize, Size chunkSize)
Definition: slab.c:190
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:259
dlist_node node
Definition: slab.c:90
#define SlabFreeInfo(_cxt, _chunk)
Definition: slab.c:172
signed int int32
Definition: c.h:355
static void * SlabAlloc(MemoryContext context, Size size)
Definition: slab.c:354
Size chunkSize
Definition: slab.c:66
#define malloc(a)
Definition: header.h:50
static const MemoryContextMethods SlabMethods
Definition: slab.c:146
Size headerSize
Definition: slab.c:69
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:852
#define dlist_container(type, membername, ptr)
Definition: ilist.h:477
int firstFreeChunk
Definition: slab.c:92
#define ERROR
Definition: elog.h:43
SlabContext * slab
Definition: slab.c:109
int chunksPerBlock
Definition: slab.c:70
void MemoryContextStats(MemoryContext context)
Definition: mcxt.c:498
Size fullChunkSize
Definition: slab.c:67
#define SlabChunkIndex(slab, block, chunk)
Definition: slab.c:123
int errdetail(const char *fmt,...)
Definition: elog.c:955
static bool SlabIsEmpty(MemoryContext context)
Definition: slab.c:638
static void dlist_delete(dlist_node *node)
Definition: ilist.h:358
void MemoryContextCreate(MemoryContext node, NodeTag tag, const MemoryContextMethods *methods, MemoryContext parent, const char *name)
Definition: mcxt.c:748
#define ereport(elevel, rest)
Definition: elog.h:141
int nfree
Definition: slab.c:91
MemoryContext TopMemoryContext
Definition: mcxt.c:44
#define WARNING
Definition: elog.h:40
static void SlabFree(MemoryContext context, void *pointer)
Definition: slab.c:514
#define dlist_head_element(type, membername, lhead)
Definition: ilist.h:487
dlist_node * cur
Definition: ilist.h:161
#define SlabAllocInfo(_cxt, _chunk)
Definition: slab.c:173
static void dlist_init(dlist_head *head)
Definition: ilist.h:278
#define free(a)
Definition: header.h:65
#define Assert(condition)
Definition: c.h:738
struct SlabBlock SlabBlock
static bool dlist_is_empty(dlist_head *head)
Definition: ilist.h:289
size_t Size
Definition: c.h:466
#define MAXALIGN(LEN)
Definition: c.h:691
const char * name
Definition: encode.c:521
Size mem_allocated
Definition: memnodes.h:82
Definition: slab.c:88
#define SlabBlockGetChunk(slab, block, idx)
Definition: slab.c:118
int errmsg(const char *fmt,...)
Definition: elog.c:822
#define elog(elevel,...)
Definition: elog.h:228
int i
int minFreeChunks
Definition: slab.c:71
struct SlabChunk SlabChunk
#define SlabChunkGetPointer(chk)
Definition: slab.c:116
#define SlabPointerGetChunk(ptr)
Definition: slab.c:114
#define snprintf
Definition: port.h:192
const char * name
Definition: memnodes.h:88
#define offsetof(type, field)
Definition: c.h:661
unsigned char bool
Definition: c.h:317
static void SlabDelete(MemoryContext context)
Definition: slab.c:340
static void SlabStats(MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals)
Definition: slab.c:656