PostgreSQL Source Code  git master
slab.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * slab.c
4  * SLAB allocator definitions.
5  *
6  * SLAB is a MemoryContext implementation designed for cases where large
7  * numbers of equally-sized objects are allocated (and freed).
8  *
9  *
10  * Portions Copyright (c) 2017-2019, PostgreSQL Global Development Group
11  *
12  * IDENTIFICATION
13  * src/backend/utils/mmgr/slab.c
14  *
15  *
16  * NOTE:
17  * The constant allocation size allows significant simplification and various
18  * optimizations over more general purpose allocators. The blocks are carved
19  * into chunks of exactly the right size (plus alignment), not wasting any
20  * memory.
21  *
22  * The information about free chunks is maintained both at the block level and
23  * global (context) level. This is possible as the chunk size (and thus also
24  * the number of chunks per block) is fixed.
25  *
26  * On each block, free chunks are tracked in a simple linked list. Contents
27  * of free chunks is replaced with an index of the next free chunk, forming
28  * a very simple linked list. Each block also contains a counter of free
29  * chunks. Combined with the local block-level freelist, it makes it trivial
30  * to eventually free the whole block.
31  *
32  * At the context level, we use 'freelist' to track blocks ordered by number
33  * of free chunks, starting with blocks having a single allocated chunk, and
34  * with completely full blocks on the tail.
35  *
36  * This also allows various optimizations - for example when searching for
37  * free chunk, the allocator reuses space from the fullest blocks first, in
38  * the hope that some of the less full blocks will get completely empty (and
39  * returned back to the OS).
40  *
41  * For each block, we maintain pointer to the first free chunk - this is quite
42  * cheap and allows us to skip all the preceding used chunks, eliminating
43  * a significant number of lookups in many common usage patterns. In the worst
44  * case this performs as if the pointer was not maintained.
45  *
46  * We cache the freelist index for the blocks with the fewest free chunks
47  * (minFreeChunks), so that we don't have to search the freelist on every
48  * SlabAlloc() call, which is quite expensive.
49  *
50  *-------------------------------------------------------------------------
51  */
52 
53 #include "postgres.h"
54 
55 #include "utils/memdebug.h"
56 #include "utils/memutils.h"
57 #include "lib/ilist.h"
58 
59 
60 /*
61  * SlabContext is a specialized implementation of MemoryContext.
62  */
63 typedef struct SlabContext
64 {
65  MemoryContextData header; /* Standard memory-context fields */
66  /* Allocation parameters for this context: */
67  Size chunkSize; /* chunk size */
68  Size fullChunkSize; /* chunk size including header and alignment */
69  Size blockSize; /* block size */
70  Size headerSize; /* allocated size of context header */
71  int chunksPerBlock; /* number of chunks per block */
72  int minFreeChunks; /* min number of free chunks in any block */
73  int nblocks; /* number of blocks allocated */
74  /* blocks with free space, grouped by number of free chunks: */
75  dlist_head freelist[FLEXIBLE_ARRAY_MEMBER];
76 } SlabContext;
77 
78 /*
79  * SlabBlock
80  * Structure of a single block in SLAB allocator.
81  *
82  * node: doubly-linked list of blocks in global freelist
83  * nfree: number of free chunks in this block
84  * firstFreeChunk: index of the first free chunk
85  */
86 typedef struct SlabBlock
87 {
88  dlist_node node; /* doubly-linked list */
89  int nfree; /* number of free chunks */
90  int firstFreeChunk; /* index of the first free chunk in the block */
91 } SlabBlock;
92 
93 /*
94  * SlabChunk
95  * The prefix of each piece of memory in a SlabBlock
96  *
97  * Note: to meet the memory context APIs, the payload area of the chunk must
98  * be maxaligned, and the "slab" link must be immediately adjacent to the
99  * payload area (cf. GetMemoryChunkContext). Since we support no machines on
100  * which MAXALIGN is more than twice sizeof(void *), this happens without any
101  * special hacking in this struct declaration. But there is a static
102  * assertion below that the alignment is done correctly.
103  */
104 typedef struct SlabChunk
105 {
106  SlabBlock *block; /* block owning this chunk */
107  SlabContext *slab; /* owning context */
108  /* there must not be any padding to reach a MAXALIGN boundary here! */
109 } SlabChunk;
110 
111 
112 #define SlabPointerGetChunk(ptr) \
113  ((SlabChunk *)(((char *)(ptr)) - sizeof(SlabChunk)))
114 #define SlabChunkGetPointer(chk) \
115  ((void *)(((char *)(chk)) + sizeof(SlabChunk)))
116 #define SlabBlockGetChunk(slab, block, idx) \
117  ((SlabChunk *) ((char *) (block) + sizeof(SlabBlock) \
118  + (idx * slab->fullChunkSize)))
119 #define SlabBlockStart(block) \
120  ((char *) block + sizeof(SlabBlock))
121 #define SlabChunkIndex(slab, block, chunk) \
122  (((char *) chunk - SlabBlockStart(block)) / slab->fullChunkSize)
123 
124 /*
125  * These functions implement the MemoryContext API for Slab contexts.
126  */
127 static void *SlabAlloc(MemoryContext context, Size size);
128 static void SlabFree(MemoryContext context, void *pointer);
129 static void *SlabRealloc(MemoryContext context, void *pointer, Size size);
130 static void SlabReset(MemoryContext context);
131 static void SlabDelete(MemoryContext context);
132 static Size SlabGetChunkSpace(MemoryContext context, void *pointer);
133 static bool SlabIsEmpty(MemoryContext context);
134 static void SlabStats(MemoryContext context,
135  MemoryStatsPrintFunc printfunc, void *passthru,
136  MemoryContextCounters *totals);
137 #ifdef MEMORY_CONTEXT_CHECKING
138 static void SlabCheck(MemoryContext context);
139 #endif
140 
141 /*
142  * This is the virtual function table for Slab contexts.
143  */
145  SlabAlloc,
146  SlabFree,
147  SlabRealloc,
148  SlabReset,
149  SlabDelete,
151  SlabIsEmpty,
152  SlabStats
153 #ifdef MEMORY_CONTEXT_CHECKING
154  ,SlabCheck
155 #endif
156 };
157 
158 /* ----------
159  * Debug macros
160  * ----------
161  */
162 #ifdef HAVE_ALLOCINFO
163 #define SlabFreeInfo(_cxt, _chunk) \
164  fprintf(stderr, "SlabFree: %s: %p, %zu\n", \
165  (_cxt)->header.name, (_chunk), (_chunk)->header.size)
166 #define SlabAllocInfo(_cxt, _chunk) \
167  fprintf(stderr, "SlabAlloc: %s: %p, %zu\n", \
168  (_cxt)->header.name, (_chunk), (_chunk)->header.size)
169 #else
170 #define SlabFreeInfo(_cxt, _chunk)
171 #define SlabAllocInfo(_cxt, _chunk)
172 #endif
173 
174 
175 /*
176  * SlabContextCreate
177  * Create a new Slab context.
178  *
179  * parent: parent context, or NULL if top-level context
180  * name: name of context (must be statically allocated)
181  * blockSize: allocation block size
182  * chunkSize: allocation chunk size
183  *
184  * The chunkSize may not exceed:
185  * MAXALIGN_DOWN(SIZE_MAX) - MAXALIGN(sizeof(SlabBlock)) - sizeof(SlabChunk)
186  */
189  const char *name,
190  Size blockSize,
191  Size chunkSize)
192 {
193  int chunksPerBlock;
195  Size freelistSize;
197  SlabContext *slab;
198  int i;
199 
200  /* Assert we padded SlabChunk properly */
201  StaticAssertStmt(sizeof(SlabChunk) == MAXALIGN(sizeof(SlabChunk)),
202  "sizeof(SlabChunk) is not maxaligned");
204  sizeof(SlabChunk),
205  "padding calculation in SlabChunk is wrong");
206 
207  /* Make sure the linked list node fits inside a freed chunk */
208  if (chunkSize < sizeof(int))
209  chunkSize = sizeof(int);
210 
211  /* chunk, including SLAB header (both addresses nicely aligned) */
212  fullChunkSize = sizeof(SlabChunk) + MAXALIGN(chunkSize);
213 
214  /* Make sure the block can store at least one chunk. */
215  if (blockSize < fullChunkSize + sizeof(SlabBlock))
216  elog(ERROR, "block size %zu for slab is too small for %zu chunks",
217  blockSize, chunkSize);
218 
219  /* Compute maximum number of chunks per block */
220  chunksPerBlock = (blockSize - sizeof(SlabBlock)) / fullChunkSize;
221 
222  /* The freelist starts with 0, ends with chunksPerBlock. */
223  freelistSize = sizeof(dlist_head) * (chunksPerBlock + 1);
224 
225  /*
226  * Allocate the context header. Unlike aset.c, we never try to combine
227  * this with the first regular block; not worth the extra complication.
228  */
229 
230  /* Size of the memory context header */
231  headerSize = offsetof(SlabContext, freelist) + freelistSize;
232 
233  slab = (SlabContext *) malloc(headerSize);
234  if (slab == NULL)
235  {
237  ereport(ERROR,
238  (errcode(ERRCODE_OUT_OF_MEMORY),
239  errmsg("out of memory"),
240  errdetail("Failed while creating memory context \"%s\".",
241  name)));
242  }
243 
244  /*
245  * Avoid writing code that can fail between here and MemoryContextCreate;
246  * we'd leak the header if we ereport in this stretch.
247  */
248 
249  /* Fill in SlabContext-specific header fields */
250  slab->chunkSize = chunkSize;
252  slab->blockSize = blockSize;
253  slab->headerSize = headerSize;
255  slab->minFreeChunks = 0;
256  slab->nblocks = 0;
257 
258  /* initialize the freelist slots */
259  for (i = 0; i < (slab->chunksPerBlock + 1); i++)
260  dlist_init(&slab->freelist[i]);
261 
262  /* Finally, do the type-independent part of context creation */
265  &SlabMethods,
266  parent,
267  name);
268 
269  return (MemoryContext) slab;
270 }
271 
272 /*
273  * SlabReset
274  * Frees all memory which is allocated in the given set.
275  *
276  * The code simply frees all the blocks in the context - we don't keep any
277  * keeper blocks or anything like that.
278  */
279 static void
281 {
282  int i;
283  SlabContext *slab = castNode(SlabContext, context);
284 
285  Assert(slab);
286 
287 #ifdef MEMORY_CONTEXT_CHECKING
288  /* Check for corruption and leaks before freeing */
289  SlabCheck(context);
290 #endif
291 
292  /* walk over freelists and free the blocks */
293  for (i = 0; i <= slab->chunksPerBlock; i++)
294  {
295  dlist_mutable_iter miter;
296 
297  dlist_foreach_modify(miter, &slab->freelist[i])
298  {
299  SlabBlock *block = dlist_container(SlabBlock, node, miter.cur);
300 
301  dlist_delete(miter.cur);
302 
303 #ifdef CLOBBER_FREED_MEMORY
304  wipe_mem(block, slab->blockSize);
305 #endif
306  free(block);
307  slab->nblocks--;
308  context->mem_allocated -= slab->blockSize;
309  }
310  }
311 
312  slab->minFreeChunks = 0;
313 
314  Assert(slab->nblocks == 0);
315  Assert(context->mem_allocated == 0);
316 }
317 
318 /*
319  * SlabDelete
320  * Free all memory which is allocated in the given context.
321  */
322 static void
324 {
325  /* Reset to release all the SlabBlocks */
326  SlabReset(context);
327  /* And free the context header */
328  free(context);
329 }
330 
331 /*
332  * SlabAlloc
333  * Returns pointer to allocated memory of given size or NULL if
334  * request could not be completed; memory is added to the slab.
335  */
336 static void *
338 {
339  SlabContext *slab = castNode(SlabContext, context);
340  SlabBlock *block;
341  SlabChunk *chunk;
342  int idx;
343 
344  Assert(slab);
345 
346  Assert((slab->minFreeChunks >= 0) &&
347  (slab->minFreeChunks < slab->chunksPerBlock));
348 
349  /* make sure we only allow correct request size */
350  if (size != slab->chunkSize)
351  elog(ERROR, "unexpected alloc chunk size %zu (expected %zu)",
352  size, slab->chunkSize);
353 
354  /*
355  * If there are no free chunks in any existing block, create a new block
356  * and put it to the last freelist bucket.
357  *
358  * slab->minFreeChunks == 0 means there are no blocks with free chunks,
359  * thanks to how minFreeChunks is updated at the end of SlabAlloc().
360  */
361  if (slab->minFreeChunks == 0)
362  {
363  block = (SlabBlock *) malloc(slab->blockSize);
364 
365  if (block == NULL)
366  return NULL;
367 
368  block->nfree = slab->chunksPerBlock;
369  block->firstFreeChunk = 0;
370 
371  /*
372  * Put all the chunks on a freelist. Walk the chunks and point each
373  * one to the next one.
374  */
375  for (idx = 0; idx < slab->chunksPerBlock; idx++)
376  {
377  chunk = SlabBlockGetChunk(slab, block, idx);
378  *(int32 *) SlabChunkGetPointer(chunk) = (idx + 1);
379  }
380 
381  /*
382  * And add it to the last freelist with all chunks empty.
383  *
384  * We know there are no blocks in the freelist, otherwise we wouldn't
385  * need a new block.
386  */
388 
389  dlist_push_head(&slab->freelist[slab->chunksPerBlock], &block->node);
390 
391  slab->minFreeChunks = slab->chunksPerBlock;
392  slab->nblocks += 1;
393  context->mem_allocated += slab->blockSize;
394  }
395 
396  /* grab the block from the freelist (even the new block is there) */
397  block = dlist_head_element(SlabBlock, node,
398  &slab->freelist[slab->minFreeChunks]);
399 
400  /* make sure we actually got a valid block, with matching nfree */
401  Assert(block != NULL);
402  Assert(slab->minFreeChunks == block->nfree);
403  Assert(block->nfree > 0);
404 
405  /* we know index of the first free chunk in the block */
406  idx = block->firstFreeChunk;
407 
408  /* make sure the chunk index is valid, and that it's marked as empty */
409  Assert((idx >= 0) && (idx < slab->chunksPerBlock));
410 
411  /* compute the chunk location block start (after the block header) */
412  chunk = SlabBlockGetChunk(slab, block, idx);
413 
414  /*
415  * Update the block nfree count, and also the minFreeChunks as we've
416  * decreased nfree for a block with the minimum number of free chunks
417  * (because that's how we chose the block).
418  */
419  block->nfree--;
420  slab->minFreeChunks = block->nfree;
421 
422  /*
423  * Remove the chunk from the freelist head. The index of the next free
424  * chunk is stored in the chunk itself.
425  */
427  block->firstFreeChunk = *(int32 *) SlabChunkGetPointer(chunk);
428 
429  Assert(block->firstFreeChunk >= 0);
430  Assert(block->firstFreeChunk <= slab->chunksPerBlock);
431 
432  Assert((block->nfree != 0 &&
433  block->firstFreeChunk < slab->chunksPerBlock) ||
434  (block->nfree == 0 &&
435  block->firstFreeChunk == slab->chunksPerBlock));
436 
437  /* move the whole block to the right place in the freelist */
438  dlist_delete(&block->node);
439  dlist_push_head(&slab->freelist[block->nfree], &block->node);
440 
441  /*
442  * And finally update minFreeChunks, i.e. the index to the block with the
443  * lowest number of free chunks. We only need to do that when the block
444  * got full (otherwise we know the current block is the right one). We'll
445  * simply walk the freelist until we find a non-empty entry.
446  */
447  if (slab->minFreeChunks == 0)
448  {
449  for (idx = 1; idx <= slab->chunksPerBlock; idx++)
450  {
451  if (dlist_is_empty(&slab->freelist[idx]))
452  continue;
453 
454  /* found a non-empty freelist */
455  slab->minFreeChunks = idx;
456  break;
457  }
458  }
459 
460  if (slab->minFreeChunks == slab->chunksPerBlock)
461  slab->minFreeChunks = 0;
462 
463  /* Prepare to initialize the chunk header. */
464  VALGRIND_MAKE_MEM_UNDEFINED(chunk, sizeof(SlabChunk));
465 
466  chunk->block = block;
467  chunk->slab = slab;
468 
469 #ifdef MEMORY_CONTEXT_CHECKING
470  /* slab mark to catch clobber of "unused" space */
471  if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
472  {
473  set_sentinel(SlabChunkGetPointer(chunk), size);
474  VALGRIND_MAKE_MEM_NOACCESS(((char *) chunk) +
475  sizeof(SlabChunk) + slab->chunkSize,
476  slab->fullChunkSize -
477  (slab->chunkSize + sizeof(SlabChunk)));
478  }
479 #endif
480 #ifdef RANDOMIZE_ALLOCATED_MEMORY
481  /* fill the allocated space with junk */
482  randomize_mem((char *) SlabChunkGetPointer(chunk), size);
483 #endif
484 
485  SlabAllocInfo(slab, chunk);
486 
487  Assert(slab->nblocks * slab->blockSize == context->mem_allocated);
488 
489  return SlabChunkGetPointer(chunk);
490 }
491 
492 /*
493  * SlabFree
494  * Frees allocated memory; memory is removed from the slab.
495  */
496 static void
497 SlabFree(MemoryContext context, void *pointer)
498 {
499  int idx;
500  SlabContext *slab = castNode(SlabContext, context);
501  SlabChunk *chunk = SlabPointerGetChunk(pointer);
502  SlabBlock *block = chunk->block;
503 
504  SlabFreeInfo(slab, chunk);
505 
506 #ifdef MEMORY_CONTEXT_CHECKING
507  /* Test for someone scribbling on unused space in chunk */
508  if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
509  if (!sentinel_ok(pointer, slab->chunkSize))
510  elog(WARNING, "detected write past chunk end in %s %p",
511  slab->header.name, chunk);
512 #endif
513 
514  /* compute index of the chunk with respect to block start */
515  idx = SlabChunkIndex(slab, block, chunk);
516 
517  /* add chunk to freelist, and update block nfree count */
518  *(int32 *) pointer = block->firstFreeChunk;
519  block->firstFreeChunk = idx;
520  block->nfree++;
521 
522  Assert(block->nfree > 0);
523  Assert(block->nfree <= slab->chunksPerBlock);
524 
525 #ifdef CLOBBER_FREED_MEMORY
526  /* XXX don't wipe the int32 index, used for block-level freelist */
527  wipe_mem((char *) pointer + sizeof(int32),
528  slab->chunkSize - sizeof(int32));
529 #endif
530 
531  /* remove the block from a freelist */
532  dlist_delete(&block->node);
533 
534  /*
535  * See if we need to update the minFreeChunks field for the slab - we only
536  * need to do that if there the block had that number of free chunks
537  * before we freed one. In that case, we check if there still are blocks
538  * in the original freelist and we either keep the current value (if there
539  * still are blocks) or increment it by one (the new block is still the
540  * one with minimum free chunks).
541  *
542  * The one exception is when the block will get completely free - in that
543  * case we will free it, se we can't use it for minFreeChunks. It however
544  * means there are no more blocks with free chunks.
545  */
546  if (slab->minFreeChunks == (block->nfree - 1))
547  {
548  /* Have we removed the last chunk from the freelist? */
549  if (dlist_is_empty(&slab->freelist[slab->minFreeChunks]))
550  {
551  /* but if we made the block entirely free, we'll free it */
552  if (block->nfree == slab->chunksPerBlock)
553  slab->minFreeChunks = 0;
554  else
555  slab->minFreeChunks++;
556  }
557  }
558 
559  /* If the block is now completely empty, free it. */
560  if (block->nfree == slab->chunksPerBlock)
561  {
562  free(block);
563  slab->nblocks--;
564  context->mem_allocated -= slab->blockSize;
565  }
566  else
567  dlist_push_head(&slab->freelist[block->nfree], &block->node);
568 
569  Assert(slab->nblocks >= 0);
570  Assert(slab->nblocks * slab->blockSize == context->mem_allocated);
571 }
572 
573 /*
574  * SlabRealloc
575  * Change the allocated size of a chunk.
576  *
577  * As Slab is designed for allocating equally-sized chunks of memory, it can't
578  * do an actual chunk size change. We try to be gentle and allow calls with
579  * exactly the same size, as in that case we can simply return the same
580  * chunk. When the size differs, we throw an error.
581  *
582  * We could also allow requests with size < chunkSize. That however seems
583  * rather pointless - Slab is meant for chunks of constant size, and moreover
584  * realloc is usually used to enlarge the chunk.
585  */
586 static void *
587 SlabRealloc(MemoryContext context, void *pointer, Size size)
588 {
589  SlabContext *slab = castNode(SlabContext, context);
590 
591  Assert(slab);
592 
593  /* can't do actual realloc with slab, but let's try to be gentle */
594  if (size == slab->chunkSize)
595  return pointer;
596 
597  elog(ERROR, "slab allocator does not support realloc()");
598  return NULL; /* keep compiler quiet */
599 }
600 
601 /*
602  * SlabGetChunkSpace
603  * Given a currently-allocated chunk, determine the total space
604  * it occupies (including all memory-allocation overhead).
605  */
606 static Size
607 SlabGetChunkSpace(MemoryContext context, void *pointer)
608 {
609  SlabContext *slab = castNode(SlabContext, context);
610 
611  Assert(slab);
612 
613  return slab->fullChunkSize;
614 }
615 
616 /*
617  * SlabIsEmpty
618  * Is an Slab empty of any allocated space?
619  */
620 static bool
622 {
623  SlabContext *slab = castNode(SlabContext, context);
624 
625  Assert(slab);
626 
627  return (slab->nblocks == 0);
628 }
629 
630 /*
631  * SlabStats
632  * Compute stats about memory consumption of a Slab context.
633  *
634  * printfunc: if not NULL, pass a human-readable stats string to this.
635  * passthru: pass this pointer through to printfunc.
636  * totals: if not NULL, add stats about this context into *totals.
637  */
638 static void
640  MemoryStatsPrintFunc printfunc, void *passthru,
641  MemoryContextCounters *totals)
642 {
643  SlabContext *slab = castNode(SlabContext, context);
644  Size nblocks = 0;
645  Size freechunks = 0;
646  Size totalspace;
647  Size freespace = 0;
648  int i;
649 
650  /* Include context header in totalspace */
651  totalspace = slab->headerSize;
652 
653  for (i = 0; i <= slab->chunksPerBlock; i++)
654  {
655  dlist_iter iter;
656 
657  dlist_foreach(iter, &slab->freelist[i])
658  {
659  SlabBlock *block = dlist_container(SlabBlock, node, iter.cur);
660 
661  nblocks++;
662  totalspace += slab->blockSize;
663  freespace += slab->fullChunkSize * block->nfree;
664  freechunks += block->nfree;
665  }
666  }
667 
668  if (printfunc)
669  {
670  char stats_string[200];
671 
672  snprintf(stats_string, sizeof(stats_string),
673  "%zu total in %zd blocks; %zu free (%zd chunks); %zu used",
674  totalspace, nblocks, freespace, freechunks,
675  totalspace - freespace);
676  printfunc(context, passthru, stats_string);
677  }
678 
679  if (totals)
680  {
681  totals->nblocks += nblocks;
682  totals->freechunks += freechunks;
683  totals->totalspace += totalspace;
684  totals->freespace += freespace;
685  }
686 }
687 
688 
689 #ifdef MEMORY_CONTEXT_CHECKING
690 
691 /*
692  * SlabCheck
693  * Walk through chunks and check consistency of memory.
694  *
695  * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
696  * find yourself in an infinite loop when trouble occurs, because this
697  * routine will be entered again when elog cleanup tries to release memory!
698  */
699 static void
700 SlabCheck(MemoryContext context)
701 {
702  int i;
703  SlabContext *slab = castNode(SlabContext, context);
704  const char *name = slab->header.name;
705  char *freechunks;
706 
707  Assert(slab);
708  Assert(slab->chunksPerBlock > 0);
709 
710  /* bitmap of free chunks on a block */
711  freechunks = palloc(slab->chunksPerBlock * sizeof(bool));
712 
713  /* walk all the freelists */
714  for (i = 0; i <= slab->chunksPerBlock; i++)
715  {
716  int j,
717  nfree;
718  dlist_iter iter;
719 
720  /* walk all blocks on this freelist */
721  dlist_foreach(iter, &slab->freelist[i])
722  {
723  int idx;
724  SlabBlock *block = dlist_container(SlabBlock, node, iter.cur);
725 
726  /*
727  * Make sure the number of free chunks (in the block header)
728  * matches position in the freelist.
729  */
730  if (block->nfree != i)
731  elog(WARNING, "problem in slab %s: number of free chunks %d in block %p does not match freelist %d",
732  name, block->nfree, block, i);
733 
734  /* reset the bitmap of free chunks for this block */
735  memset(freechunks, 0, (slab->chunksPerBlock * sizeof(bool)));
736  idx = block->firstFreeChunk;
737 
738  /*
739  * Now walk through the chunks, count the free ones and also
740  * perform some additional checks for the used ones. As the chunk
741  * freelist is stored within the chunks themselves, we have to
742  * walk through the chunks and construct our own bitmap.
743  */
744 
745  nfree = 0;
746  while (idx < slab->chunksPerBlock)
747  {
748  SlabChunk *chunk;
749 
750  /* count the chunk as free, add it to the bitmap */
751  nfree++;
752  freechunks[idx] = true;
753 
754  /* read index of the next free chunk */
755  chunk = SlabBlockGetChunk(slab, block, idx);
757  idx = *(int32 *) SlabChunkGetPointer(chunk);
758  }
759 
760  for (j = 0; j < slab->chunksPerBlock; j++)
761  {
762  /* non-zero bit in the bitmap means chunk the chunk is used */
763  if (!freechunks[j])
764  {
765  SlabChunk *chunk = SlabBlockGetChunk(slab, block, j);
766 
767  /* chunks have both block and slab pointers, so check both */
768  if (chunk->block != block)
769  elog(WARNING, "problem in slab %s: bogus block link in block %p, chunk %p",
770  name, block, chunk);
771 
772  if (chunk->slab != slab)
773  elog(WARNING, "problem in slab %s: bogus slab link in block %p, chunk %p",
774  name, block, chunk);
775 
776  /* there might be sentinel (thanks to alignment) */
777  if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
778  if (!sentinel_ok(chunk, slab->chunkSize))
779  elog(WARNING, "problem in slab %s: detected write past chunk end in block %p, chunk %p",
780  name, block, chunk);
781  }
782  }
783 
784  /*
785  * Make sure we got the expected number of free chunks (as tracked
786  * in the block header).
787  */
788  if (nfree != block->nfree)
789  elog(WARNING, "problem in slab %s: number of free chunks %d in block %p does not match bitmap %d",
790  name, block->nfree, block, nfree);
791  }
792  }
793 
794  Assert(slab->nblocks * slab->blockSize == context->mem_allocated);
795 }
796 
797 #endif /* MEMORY_CONTEXT_CHECKING */
static Size SlabGetChunkSpace(MemoryContext context, void *pointer)
Definition: slab.c:607
static void SlabReset(MemoryContext context)
Definition: slab.c:280
struct SlabContext SlabContext
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
dlist_node * cur
Definition: ilist.h:180
void(* MemoryStatsPrintFunc)(MemoryContext context, void *passthru, const char *stats_string)
Definition: memnodes.h:54
MemoryContextData header
Definition: slab.c:65
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:524
int nblocks
Definition: slab.c:73
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:300
#define castNode(_type_, nodeptr)
Definition: nodes.h:594
Size blockSize
Definition: slab.c:69
struct dlist_head dlist_head
#define VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
Definition: memdebug.h:28
SlabBlock * block
Definition: slab.c:106
#define dlist_foreach(iter, lhead)
Definition: ilist.h:507
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition: memdebug.h:27
static void * SlabRealloc(MemoryContext context, void *pointer, Size size)
Definition: slab.c:587
int errcode(int sqlerrcode)
Definition: elog.c:570
dlist_head freelist[FLEXIBLE_ARRAY_MEMBER]
Definition: slab.c:75
MemoryContext SlabContextCreate(MemoryContext parent, const char *name, Size blockSize, Size chunkSize)
Definition: slab.c:188
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:264
dlist_node node
Definition: slab.c:88
#define SlabFreeInfo(_cxt, _chunk)
Definition: slab.c:170
signed int int32
Definition: c.h:346
static void * SlabAlloc(MemoryContext context, Size size)
Definition: slab.c:337
Size chunkSize
Definition: slab.c:67
#define malloc(a)
Definition: header.h:50
static const MemoryContextMethods SlabMethods
Definition: slab.c:144
Size headerSize
Definition: slab.c:70
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:842
#define dlist_container(type, membername, ptr)
Definition: ilist.h:477
int firstFreeChunk
Definition: slab.c:90
#define ERROR
Definition: elog.h:43
SlabContext * slab
Definition: slab.c:107
int chunksPerBlock
Definition: slab.c:71
void MemoryContextStats(MemoryContext context)
Definition: mcxt.c:498
Size fullChunkSize
Definition: slab.c:68
#define SlabChunkIndex(slab, block, chunk)
Definition: slab.c:121
int errdetail(const char *fmt,...)
Definition: elog.c:860
static bool SlabIsEmpty(MemoryContext context)
Definition: slab.c:621
static void dlist_delete(dlist_node *node)
Definition: ilist.h:358
void MemoryContextCreate(MemoryContext node, NodeTag tag, const MemoryContextMethods *methods, MemoryContext parent, const char *name)
Definition: mcxt.c:748
#define ereport(elevel, rest)
Definition: elog.h:141
int nfree
Definition: slab.c:89
MemoryContext TopMemoryContext
Definition: mcxt.c:44
#define WARNING
Definition: elog.h:40
static void SlabFree(MemoryContext context, void *pointer)
Definition: slab.c:497
#define dlist_head_element(type, membername, lhead)
Definition: ilist.h:487
dlist_node * cur
Definition: ilist.h:161
#define SlabAllocInfo(_cxt, _chunk)
Definition: slab.c:171
static void dlist_init(dlist_head *head)
Definition: ilist.h:278
#define free(a)
Definition: header.h:65
#define Assert(condition)
Definition: c.h:732
struct SlabBlock SlabBlock
static bool dlist_is_empty(dlist_head *head)
Definition: ilist.h:289
size_t Size
Definition: c.h:466
#define MAXALIGN(LEN)
Definition: c.h:685
const char * name
Definition: encode.c:521
Size mem_allocated
Definition: memnodes.h:82
Definition: slab.c:86
#define SlabBlockGetChunk(slab, block, idx)
Definition: slab.c:116
void * palloc(Size size)
Definition: mcxt.c:949
int errmsg(const char *fmt,...)
Definition: elog.c:784
#define elog(elevel,...)
Definition: elog.h:226
int i
int minFreeChunks
Definition: slab.c:72
struct SlabChunk SlabChunk
#define SlabChunkGetPointer(chk)
Definition: slab.c:114
#define SlabPointerGetChunk(ptr)
Definition: slab.c:112
#define snprintf
Definition: port.h:192
const char * name
Definition: memnodes.h:88
#define offsetof(type, field)
Definition: c.h:655
static void SlabDelete(MemoryContext context)
Definition: slab.c:323
static void SlabStats(MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals)
Definition: slab.c:639