PostgreSQL Source Code  git master
slab.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * slab.c
4  * SLAB allocator definitions.
5  *
6  * SLAB is a MemoryContext implementation designed for cases where large
7  * numbers of equally-sized objects are allocated (and freed).
8  *
9  *
10  * Portions Copyright (c) 2017-2022, PostgreSQL Global Development Group
11  *
12  * IDENTIFICATION
13  * src/backend/utils/mmgr/slab.c
14  *
15  *
16  * NOTE:
17  * The constant allocation size allows significant simplification and various
18  * optimizations over more general purpose allocators. The blocks are carved
19  * into chunks of exactly the right size (plus alignment), not wasting any
20  * memory.
21  *
22  * The information about free chunks is maintained both at the block level and
23  * global (context) level. This is possible as the chunk size (and thus also
24  * the number of chunks per block) is fixed.
25  *
26  * On each block, free chunks are tracked in a simple linked list. Contents
27  * of free chunks is replaced with an index of the next free chunk, forming
28  * a very simple linked list. Each block also contains a counter of free
29  * chunks. Combined with the local block-level freelist, it makes it trivial
30  * to eventually free the whole block.
31  *
32  * At the context level, we use 'freelist' to track blocks ordered by number
33  * of free chunks, starting with blocks having a single allocated chunk, and
34  * with completely full blocks on the tail.
35  *
36  * This also allows various optimizations - for example when searching for
37  * free chunk, the allocator reuses space from the fullest blocks first, in
38  * the hope that some of the less full blocks will get completely empty (and
39  * returned back to the OS).
40  *
41  * For each block, we maintain pointer to the first free chunk - this is quite
42  * cheap and allows us to skip all the preceding used chunks, eliminating
43  * a significant number of lookups in many common usage patterns. In the worst
44  * case this performs as if the pointer was not maintained.
45  *
46  * We cache the freelist index for the blocks with the fewest free chunks
47  * (minFreeChunks), so that we don't have to search the freelist on every
48  * SlabAlloc() call, which is quite expensive.
49  *
50  *-------------------------------------------------------------------------
51  */
52 
53 #include "postgres.h"
54 
55 #include "lib/ilist.h"
56 #include "utils/memdebug.h"
57 #include "utils/memutils.h"
60 
61 #define Slab_BLOCKHDRSZ MAXALIGN(sizeof(SlabBlock))
62 
63 /*
64  * SlabContext is a specialized implementation of MemoryContext.
65  */
66 typedef struct SlabContext
67 {
68  MemoryContextData header; /* Standard memory-context fields */
69  /* Allocation parameters for this context: */
70  Size chunkSize; /* chunk size */
71  Size fullChunkSize; /* chunk size including header and alignment */
72  Size blockSize; /* block size */
73  Size headerSize; /* allocated size of context header */
74  int chunksPerBlock; /* number of chunks per block */
75  int minFreeChunks; /* min number of free chunks in any block */
76  int nblocks; /* number of blocks allocated */
77 #ifdef MEMORY_CONTEXT_CHECKING
78  bool *freechunks; /* bitmap of free chunks in a block */
79 #endif
80  /* blocks with free space, grouped by number of free chunks: */
83 
84 /*
85  * SlabBlock
86  * Structure of a single block in SLAB allocator.
87  *
88  * node: doubly-linked list of blocks in global freelist
89  * nfree: number of free chunks in this block
90  * firstFreeChunk: index of the first free chunk
91  */
92 typedef struct SlabBlock
93 {
94  dlist_node node; /* doubly-linked list */
95  int nfree; /* number of free chunks */
96  int firstFreeChunk; /* index of the first free chunk in the block */
97  SlabContext *slab; /* owning context */
99 
100 
101 #define Slab_CHUNKHDRSZ sizeof(MemoryChunk)
102 #define SlabPointerGetChunk(ptr) \
103  ((MemoryChunk *)(((char *)(ptr)) - sizeof(MemoryChunk)))
104 #define SlabChunkGetPointer(chk) \
105  ((void *)(((char *)(chk)) + sizeof(MemoryChunk)))
106 #define SlabBlockGetChunk(slab, block, idx) \
107  ((MemoryChunk *) ((char *) (block) + Slab_BLOCKHDRSZ \
108  + (idx * slab->fullChunkSize)))
109 #define SlabBlockStart(block) \
110  ((char *) block + Slab_BLOCKHDRSZ)
111 #define SlabChunkIndex(slab, block, chunk) \
112  (((char *) chunk - SlabBlockStart(block)) / slab->fullChunkSize)
113 
114 /*
115  * SlabIsValid
116  * True iff set is valid slab allocation set.
117  */
118 #define SlabIsValid(set) \
119  (PointerIsValid(set) && IsA(set, SlabContext))
120 
121 /*
122  * SlabBlockIsValid
123  * True iff block is valid block of slab allocation set.
124  */
125 #define SlabBlockIsValid(block) \
126  (PointerIsValid(block) && SlabIsValid((block)->slab))
127 
128 
129 /*
130  * SlabContextCreate
131  * Create a new Slab context.
132  *
133  * parent: parent context, or NULL if top-level context
134  * name: name of context (must be statically allocated)
135  * blockSize: allocation block size
136  * chunkSize: allocation chunk size
137  *
138  * The MAXALIGN(chunkSize) may not exceed MEMORYCHUNK_MAX_VALUE
139  */
142  const char *name,
143  Size blockSize,
144  Size chunkSize)
145 {
146  int chunksPerBlock;
147  Size fullChunkSize;
148  Size freelistSize;
149  Size headerSize;
150  SlabContext *slab;
151  int i;
152 
153  /* ensure MemoryChunk's size is properly maxaligned */
155  "sizeof(MemoryChunk) is not maxaligned");
156  Assert(MAXALIGN(chunkSize) <= MEMORYCHUNK_MAX_VALUE);
157 
158  /* Make sure the linked list node fits inside a freed chunk */
159  if (chunkSize < sizeof(int))
160  chunkSize = sizeof(int);
161 
162  /* chunk, including SLAB header (both addresses nicely aligned) */
163 #ifdef MEMORY_CONTEXT_CHECKING
164  /* ensure there's always space for the sentinel byte */
165  fullChunkSize = Slab_CHUNKHDRSZ + MAXALIGN(chunkSize + 1);
166 #else
167  fullChunkSize = Slab_CHUNKHDRSZ + MAXALIGN(chunkSize);
168 #endif
169 
170  /* Make sure the block can store at least one chunk. */
171  if (blockSize < fullChunkSize + Slab_BLOCKHDRSZ)
172  elog(ERROR, "block size %zu for slab is too small for %zu chunks",
173  blockSize, chunkSize);
174 
175  /* Compute maximum number of chunks per block */
176  chunksPerBlock = (blockSize - Slab_BLOCKHDRSZ) / fullChunkSize;
177 
178  /* The freelist starts with 0, ends with chunksPerBlock. */
179  freelistSize = sizeof(dlist_head) * (chunksPerBlock + 1);
180 
181  /*
182  * Allocate the context header. Unlike aset.c, we never try to combine
183  * this with the first regular block; not worth the extra complication.
184  */
185 
186  /* Size of the memory context header */
187  headerSize = offsetof(SlabContext, freelist) + freelistSize;
188 
189 #ifdef MEMORY_CONTEXT_CHECKING
190 
191  /*
192  * With memory checking, we need to allocate extra space for the bitmap of
193  * free chunks. The bitmap is an array of bools, so we don't need to worry
194  * about alignment.
195  */
196  headerSize += chunksPerBlock * sizeof(bool);
197 #endif
198 
199  slab = (SlabContext *) malloc(headerSize);
200  if (slab == NULL)
201  {
203  ereport(ERROR,
204  (errcode(ERRCODE_OUT_OF_MEMORY),
205  errmsg("out of memory"),
206  errdetail("Failed while creating memory context \"%s\".",
207  name)));
208  }
209 
210  /*
211  * Avoid writing code that can fail between here and MemoryContextCreate;
212  * we'd leak the header if we ereport in this stretch.
213  */
214 
215  /* Fill in SlabContext-specific header fields */
216  slab->chunkSize = chunkSize;
217  slab->fullChunkSize = fullChunkSize;
218  slab->blockSize = blockSize;
219  slab->headerSize = headerSize;
220  slab->chunksPerBlock = chunksPerBlock;
221  slab->minFreeChunks = 0;
222  slab->nblocks = 0;
223 
224  /* initialize the freelist slots */
225  for (i = 0; i < (slab->chunksPerBlock + 1); i++)
226  dlist_init(&slab->freelist[i]);
227 
228 #ifdef MEMORY_CONTEXT_CHECKING
229  /* set the freechunks pointer right after the freelists array */
230  slab->freechunks
231  = (bool *) slab + offsetof(SlabContext, freelist) + freelistSize;
232 #endif
233 
234  /* Finally, do the type-independent part of context creation */
236  T_SlabContext,
237  MCTX_SLAB_ID,
238  parent,
239  name);
240 
241  return (MemoryContext) slab;
242 }
243 
244 /*
245  * SlabReset
246  * Frees all memory which is allocated in the given set.
247  *
248  * The code simply frees all the blocks in the context - we don't keep any
249  * keeper blocks or anything like that.
250  */
251 void
253 {
254  SlabContext *slab = (SlabContext *) context;
255  int i;
256 
257  Assert(SlabIsValid(slab));
258 
259 #ifdef MEMORY_CONTEXT_CHECKING
260  /* Check for corruption and leaks before freeing */
261  SlabCheck(context);
262 #endif
263 
264  /* walk over freelists and free the blocks */
265  for (i = 0; i <= slab->chunksPerBlock; i++)
266  {
267  dlist_mutable_iter miter;
268 
269  dlist_foreach_modify(miter, &slab->freelist[i])
270  {
271  SlabBlock *block = dlist_container(SlabBlock, node, miter.cur);
272 
273  dlist_delete(miter.cur);
274 
275 #ifdef CLOBBER_FREED_MEMORY
276  wipe_mem(block, slab->blockSize);
277 #endif
278  free(block);
279  slab->nblocks--;
280  context->mem_allocated -= slab->blockSize;
281  }
282  }
283 
284  slab->minFreeChunks = 0;
285 
286  Assert(slab->nblocks == 0);
287  Assert(context->mem_allocated == 0);
288 }
289 
290 /*
291  * SlabDelete
292  * Free all memory which is allocated in the given context.
293  */
294 void
296 {
297  /* Reset to release all the SlabBlocks */
298  SlabReset(context);
299  /* And free the context header */
300  free(context);
301 }
302 
303 /*
304  * SlabAlloc
305  * Returns pointer to allocated memory of given size or NULL if
306  * request could not be completed; memory is added to the slab.
307  */
308 void *
310 {
311  SlabContext *slab = (SlabContext *) context;
312  SlabBlock *block;
313  MemoryChunk *chunk;
314  int idx;
315 
316  Assert(SlabIsValid(slab));
317 
318  Assert((slab->minFreeChunks >= 0) &&
319  (slab->minFreeChunks < slab->chunksPerBlock));
320 
321  /* make sure we only allow correct request size */
322  if (size != slab->chunkSize)
323  elog(ERROR, "unexpected alloc chunk size %zu (expected %zu)",
324  size, slab->chunkSize);
325 
326  /*
327  * If there are no free chunks in any existing block, create a new block
328  * and put it to the last freelist bucket.
329  *
330  * slab->minFreeChunks == 0 means there are no blocks with free chunks,
331  * thanks to how minFreeChunks is updated at the end of SlabAlloc().
332  */
333  if (slab->minFreeChunks == 0)
334  {
335  block = (SlabBlock *) malloc(slab->blockSize);
336 
337  if (block == NULL)
338  return NULL;
339 
340  block->nfree = slab->chunksPerBlock;
341  block->firstFreeChunk = 0;
342  block->slab = slab;
343 
344  /*
345  * Put all the chunks on a freelist. Walk the chunks and point each
346  * one to the next one.
347  */
348  for (idx = 0; idx < slab->chunksPerBlock; idx++)
349  {
350  chunk = SlabBlockGetChunk(slab, block, idx);
351  *(int32 *) MemoryChunkGetPointer(chunk) = (idx + 1);
352  }
353 
354  /*
355  * And add it to the last freelist with all chunks empty.
356  *
357  * We know there are no blocks in the freelist, otherwise we wouldn't
358  * need a new block.
359  */
361 
362  dlist_push_head(&slab->freelist[slab->chunksPerBlock], &block->node);
363 
364  slab->minFreeChunks = slab->chunksPerBlock;
365  slab->nblocks += 1;
366  context->mem_allocated += slab->blockSize;
367  }
368 
369  /* grab the block from the freelist (even the new block is there) */
370  block = dlist_head_element(SlabBlock, node,
371  &slab->freelist[slab->minFreeChunks]);
372 
373  /* make sure we actually got a valid block, with matching nfree */
374  Assert(block != NULL);
375  Assert(slab->minFreeChunks == block->nfree);
376  Assert(block->nfree > 0);
377 
378  /* we know index of the first free chunk in the block */
379  idx = block->firstFreeChunk;
380 
381  /* make sure the chunk index is valid, and that it's marked as empty */
382  Assert((idx >= 0) && (idx < slab->chunksPerBlock));
383 
384  /* compute the chunk location block start (after the block header) */
385  chunk = SlabBlockGetChunk(slab, block, idx);
386 
387  /*
388  * Update the block nfree count, and also the minFreeChunks as we've
389  * decreased nfree for a block with the minimum number of free chunks
390  * (because that's how we chose the block).
391  */
392  block->nfree--;
393  slab->minFreeChunks = block->nfree;
394 
395  /*
396  * Remove the chunk from the freelist head. The index of the next free
397  * chunk is stored in the chunk itself.
398  */
400  block->firstFreeChunk = *(int32 *) MemoryChunkGetPointer(chunk);
401 
402  Assert(block->firstFreeChunk >= 0);
403  Assert(block->firstFreeChunk <= slab->chunksPerBlock);
404 
405  Assert((block->nfree != 0 &&
406  block->firstFreeChunk < slab->chunksPerBlock) ||
407  (block->nfree == 0 &&
408  block->firstFreeChunk == slab->chunksPerBlock));
409 
410  /* move the whole block to the right place in the freelist */
411  dlist_delete(&block->node);
412  dlist_push_head(&slab->freelist[block->nfree], &block->node);
413 
414  /*
415  * And finally update minFreeChunks, i.e. the index to the block with the
416  * lowest number of free chunks. We only need to do that when the block
417  * got full (otherwise we know the current block is the right one). We'll
418  * simply walk the freelist until we find a non-empty entry.
419  */
420  if (slab->minFreeChunks == 0)
421  {
422  for (idx = 1; idx <= slab->chunksPerBlock; idx++)
423  {
424  if (dlist_is_empty(&slab->freelist[idx]))
425  continue;
426 
427  /* found a non-empty freelist */
428  slab->minFreeChunks = idx;
429  break;
430  }
431  }
432 
433  if (slab->minFreeChunks == slab->chunksPerBlock)
434  slab->minFreeChunks = 0;
435 
436  /* Prepare to initialize the chunk header. */
438 
439  MemoryChunkSetHdrMask(chunk, block, MAXALIGN(slab->chunkSize),
440  MCTX_SLAB_ID);
441 #ifdef MEMORY_CONTEXT_CHECKING
442  /* slab mark to catch clobber of "unused" space */
443  Assert(slab->chunkSize < (slab->fullChunkSize - Slab_CHUNKHDRSZ));
444  set_sentinel(MemoryChunkGetPointer(chunk), size);
445  VALGRIND_MAKE_MEM_NOACCESS(((char *) chunk) +
446  Slab_CHUNKHDRSZ + slab->chunkSize,
447  slab->fullChunkSize -
448  (slab->chunkSize + Slab_CHUNKHDRSZ));
449 #endif
450 
451 #ifdef RANDOMIZE_ALLOCATED_MEMORY
452  /* fill the allocated space with junk */
453  randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
454 #endif
455 
456  Assert(slab->nblocks * slab->blockSize == context->mem_allocated);
457 
458  return MemoryChunkGetPointer(chunk);
459 }
460 
461 /*
462  * SlabFree
463  * Frees allocated memory; memory is removed from the slab.
464  */
465 void
466 SlabFree(void *pointer)
467 {
468  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
469  SlabBlock *block = MemoryChunkGetBlock(chunk);
470  SlabContext *slab;
471  int idx;
472 
473  /*
474  * For speed reasons we just Assert that the referenced block is good.
475  * Future field experience may show that this Assert had better become a
476  * regular runtime test-and-elog check.
477  */
478  Assert(SlabBlockIsValid(block));
479  slab = block->slab;
480 
481 #ifdef MEMORY_CONTEXT_CHECKING
482  /* Test for someone scribbling on unused space in chunk */
483  Assert(slab->chunkSize < (slab->fullChunkSize - Slab_CHUNKHDRSZ));
484  if (!sentinel_ok(pointer, slab->chunkSize))
485  elog(WARNING, "detected write past chunk end in %s %p",
486  slab->header.name, chunk);
487 #endif
488 
489  /* compute index of the chunk with respect to block start */
490  idx = SlabChunkIndex(slab, block, chunk);
491 
492  /* add chunk to freelist, and update block nfree count */
493  *(int32 *) pointer = block->firstFreeChunk;
494  block->firstFreeChunk = idx;
495  block->nfree++;
496 
497  Assert(block->nfree > 0);
498  Assert(block->nfree <= slab->chunksPerBlock);
499 
500 #ifdef CLOBBER_FREED_MEMORY
501  /* XXX don't wipe the int32 index, used for block-level freelist */
502  wipe_mem((char *) pointer + sizeof(int32),
503  slab->chunkSize - sizeof(int32));
504 #endif
505 
506  /* remove the block from a freelist */
507  dlist_delete(&block->node);
508 
509  /*
510  * See if we need to update the minFreeChunks field for the slab - we only
511  * need to do that if there the block had that number of free chunks
512  * before we freed one. In that case, we check if there still are blocks
513  * in the original freelist and we either keep the current value (if there
514  * still are blocks) or increment it by one (the new block is still the
515  * one with minimum free chunks).
516  *
517  * The one exception is when the block will get completely free - in that
518  * case we will free it, se we can't use it for minFreeChunks. It however
519  * means there are no more blocks with free chunks.
520  */
521  if (slab->minFreeChunks == (block->nfree - 1))
522  {
523  /* Have we removed the last chunk from the freelist? */
524  if (dlist_is_empty(&slab->freelist[slab->minFreeChunks]))
525  {
526  /* but if we made the block entirely free, we'll free it */
527  if (block->nfree == slab->chunksPerBlock)
528  slab->minFreeChunks = 0;
529  else
530  slab->minFreeChunks++;
531  }
532  }
533 
534  /* If the block is now completely empty, free it. */
535  if (block->nfree == slab->chunksPerBlock)
536  {
537  free(block);
538  slab->nblocks--;
539  slab->header.mem_allocated -= slab->blockSize;
540  }
541  else
542  dlist_push_head(&slab->freelist[block->nfree], &block->node);
543 
544  Assert(slab->nblocks >= 0);
545  Assert(slab->nblocks * slab->blockSize == slab->header.mem_allocated);
546 }
547 
548 /*
549  * SlabRealloc
550  * Change the allocated size of a chunk.
551  *
552  * As Slab is designed for allocating equally-sized chunks of memory, it can't
553  * do an actual chunk size change. We try to be gentle and allow calls with
554  * exactly the same size, as in that case we can simply return the same
555  * chunk. When the size differs, we throw an error.
556  *
557  * We could also allow requests with size < chunkSize. That however seems
558  * rather pointless - Slab is meant for chunks of constant size, and moreover
559  * realloc is usually used to enlarge the chunk.
560  */
561 void *
562 SlabRealloc(void *pointer, Size size)
563 {
564  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
565  SlabBlock *block = MemoryChunkGetBlock(chunk);
566  SlabContext *slab;
567 
568  /*
569  * Try to verify that we have a sane block pointer: the block header
570  * should reference a slab context. (We use a test-and-elog, not just
571  * Assert, because it seems highly likely that we're here in error in the
572  * first place.)
573  */
574  if (!SlabBlockIsValid(block))
575  elog(ERROR, "could not find block containing chunk %p", chunk);
576  slab = block->slab;
577 
578  /* can't do actual realloc with slab, but let's try to be gentle */
579  if (size == slab->chunkSize)
580  return pointer;
581 
582  elog(ERROR, "slab allocator does not support realloc()");
583  return NULL; /* keep compiler quiet */
584 }
585 
586 /*
587  * SlabGetChunkContext
588  * Return the MemoryContext that 'pointer' belongs to.
589  */
591 SlabGetChunkContext(void *pointer)
592 {
593  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
594  SlabBlock *block = MemoryChunkGetBlock(chunk);
595 
596  Assert(SlabBlockIsValid(block));
597  return &block->slab->header;
598 }
599 
600 /*
601  * SlabGetChunkSpace
602  * Given a currently-allocated chunk, determine the total space
603  * it occupies (including all memory-allocation overhead).
604  */
605 Size
606 SlabGetChunkSpace(void *pointer)
607 {
608  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
609  SlabBlock *block = MemoryChunkGetBlock(chunk);
610  SlabContext *slab;
611 
612  Assert(SlabBlockIsValid(block));
613  slab = block->slab;
614 
615  return slab->fullChunkSize;
616 }
617 
618 /*
619  * SlabIsEmpty
620  * Is an Slab empty of any allocated space?
621  */
622 bool
624 {
625  SlabContext *slab = (SlabContext *) context;
626 
627  Assert(SlabIsValid(slab));
628 
629  return (slab->nblocks == 0);
630 }
631 
632 /*
633  * SlabStats
634  * Compute stats about memory consumption of a Slab context.
635  *
636  * printfunc: if not NULL, pass a human-readable stats string to this.
637  * passthru: pass this pointer through to printfunc.
638  * totals: if not NULL, add stats about this context into *totals.
639  * print_to_stderr: print stats to stderr if true, elog otherwise.
640  */
641 void
643  MemoryStatsPrintFunc printfunc, void *passthru,
644  MemoryContextCounters *totals,
645  bool print_to_stderr)
646 {
647  SlabContext *slab = (SlabContext *) context;
648  Size nblocks = 0;
649  Size freechunks = 0;
650  Size totalspace;
651  Size freespace = 0;
652  int i;
653 
654  Assert(SlabIsValid(slab));
655 
656  /* Include context header in totalspace */
657  totalspace = slab->headerSize;
658 
659  for (i = 0; i <= slab->chunksPerBlock; i++)
660  {
661  dlist_iter iter;
662 
663  dlist_foreach(iter, &slab->freelist[i])
664  {
665  SlabBlock *block = dlist_container(SlabBlock, node, iter.cur);
666 
667  nblocks++;
668  totalspace += slab->blockSize;
669  freespace += slab->fullChunkSize * block->nfree;
670  freechunks += block->nfree;
671  }
672  }
673 
674  if (printfunc)
675  {
676  char stats_string[200];
677 
678  snprintf(stats_string, sizeof(stats_string),
679  "%zu total in %zu blocks; %zu free (%zu chunks); %zu used",
680  totalspace, nblocks, freespace, freechunks,
681  totalspace - freespace);
682  printfunc(context, passthru, stats_string, print_to_stderr);
683  }
684 
685  if (totals)
686  {
687  totals->nblocks += nblocks;
688  totals->freechunks += freechunks;
689  totals->totalspace += totalspace;
690  totals->freespace += freespace;
691  }
692 }
693 
694 
695 #ifdef MEMORY_CONTEXT_CHECKING
696 
697 /*
698  * SlabCheck
699  * Walk through chunks and check consistency of memory.
700  *
701  * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
702  * find yourself in an infinite loop when trouble occurs, because this
703  * routine will be entered again when elog cleanup tries to release memory!
704  */
705 void
706 SlabCheck(MemoryContext context)
707 {
708  SlabContext *slab = (SlabContext *) context;
709  int i;
710  const char *name = slab->header.name;
711 
712  Assert(SlabIsValid(slab));
713  Assert(slab->chunksPerBlock > 0);
714 
715  /* walk all the freelists */
716  for (i = 0; i <= slab->chunksPerBlock; i++)
717  {
718  int j,
719  nfree;
720  dlist_iter iter;
721 
722  /* walk all blocks on this freelist */
723  dlist_foreach(iter, &slab->freelist[i])
724  {
725  int idx;
726  SlabBlock *block = dlist_container(SlabBlock, node, iter.cur);
727 
728  /*
729  * Make sure the number of free chunks (in the block header)
730  * matches position in the freelist.
731  */
732  if (block->nfree != i)
733  elog(WARNING, "problem in slab %s: number of free chunks %d in block %p does not match freelist %d",
734  name, block->nfree, block, i);
735 
736  /* make sure the slab pointer correctly points to this context */
737  if (block->slab != slab)
738  elog(WARNING, "problem in slab %s: bogus slab link in block %p",
739  name, block);
740 
741  /* reset the bitmap of free chunks for this block */
742  memset(slab->freechunks, 0, (slab->chunksPerBlock * sizeof(bool)));
743  idx = block->firstFreeChunk;
744 
745  /*
746  * Now walk through the chunks, count the free ones and also
747  * perform some additional checks for the used ones. As the chunk
748  * freelist is stored within the chunks themselves, we have to
749  * walk through the chunks and construct our own bitmap.
750  */
751 
752  nfree = 0;
753  while (idx < slab->chunksPerBlock)
754  {
755  MemoryChunk *chunk;
756 
757  /* count the chunk as free, add it to the bitmap */
758  nfree++;
759  slab->freechunks[idx] = true;
760 
761  /* read index of the next free chunk */
762  chunk = SlabBlockGetChunk(slab, block, idx);
764  idx = *(int32 *) MemoryChunkGetPointer(chunk);
765  }
766 
767  for (j = 0; j < slab->chunksPerBlock; j++)
768  {
769  /* non-zero bit in the bitmap means chunk the chunk is used */
770  if (!slab->freechunks[j])
771  {
772  MemoryChunk *chunk = SlabBlockGetChunk(slab, block, j);
773  SlabBlock *chunkblock = (SlabBlock *) MemoryChunkGetBlock(chunk);
774 
775  /*
776  * check the chunk's blockoffset correctly points back to
777  * the block
778  */
779  if (chunkblock != block)
780  elog(WARNING, "problem in slab %s: bogus block link in block %p, chunk %p",
781  name, block, chunk);
782 
783  /* check the sentinel byte is intact */
784  Assert(slab->chunkSize < (slab->fullChunkSize - Slab_CHUNKHDRSZ));
785  if (!sentinel_ok(chunk, Slab_CHUNKHDRSZ + slab->chunkSize))
786  elog(WARNING, "problem in slab %s: detected write past chunk end in block %p, chunk %p",
787  name, block, chunk);
788  }
789  }
790 
791  /*
792  * Make sure we got the expected number of free chunks (as tracked
793  * in the block header).
794  */
795  if (nfree != block->nfree)
796  elog(WARNING, "problem in slab %s: number of free chunks %d in block %p does not match bitmap %d",
797  name, block->nfree, block, nfree);
798  }
799  }
800 
801  Assert(slab->nblocks * slab->blockSize == context->mem_allocated);
802 }
803 
804 #endif /* MEMORY_CONTEXT_CHECKING */
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:259
#define MAXALIGN(LEN)
Definition: c.h:747
signed int int32
Definition: c.h:430
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:362
unsigned char bool
Definition: c.h:392
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:869
size_t Size
Definition: c.h:541
int errdetail(const char *fmt,...)
Definition: elog.c:1039
int errcode(int sqlerrcode)
Definition: elog.c:695
int errmsg(const char *fmt,...)
Definition: elog.c:906
#define WARNING
Definition: elog.h:32
#define ERROR
Definition: elog.h:35
#define ereport(elevel,...)
Definition: elog.h:145
const char * name
Definition: encode.c:561
#define free(a)
Definition: header.h:65
#define malloc(a)
Definition: header.h:50
#define dlist_foreach(iter, lhead)
Definition: ilist.h:573
static void dlist_init(dlist_head *head)
Definition: ilist.h:314
#define dlist_head_element(type, membername, lhead)
Definition: ilist.h:553
static bool dlist_is_empty(dlist_head *head)
Definition: ilist.h:325
static void dlist_delete(dlist_node *node)
Definition: ilist.h:394
struct dlist_head dlist_head
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:336
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:590
#define dlist_container(type, membername, ptr)
Definition: ilist.h:543
int j
Definition: isn.c:74
int i
Definition: isn.c:73
Assert(fmt[strlen(fmt) - 1] !='\n')
void MemoryContextCreate(MemoryContext node, NodeTag tag, MemoryContextMethodID method_id, MemoryContext parent, const char *name)
Definition: mcxt.c:946
MemoryContext TopMemoryContext
Definition: mcxt.c:130
void MemoryContextStats(MemoryContext context)
Definition: mcxt.c:672
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition: memdebug.h:27
#define VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
Definition: memdebug.h:28
void(* MemoryStatsPrintFunc)(MemoryContext context, void *passthru, const char *stats_string, bool print_to_stderr)
Definition: memnodes.h:54
@ MCTX_SLAB_ID
#define MEMORYCHUNK_MAX_VALUE
#define MemoryChunkGetPointer(c)
static void * MemoryChunkGetBlock(MemoryChunk *chunk)
#define PointerGetMemoryChunk(p)
static void MemoryChunkSetHdrMask(MemoryChunk *chunk, void *block, Size value, MemoryContextMethodID methodid)
#define snprintf
Definition: port.h:238
#define SlabChunkIndex(slab, block, chunk)
Definition: slab.c:111
#define Slab_BLOCKHDRSZ
Definition: slab.c:61
struct SlabBlock SlabBlock
#define SlabBlockGetChunk(slab, block, idx)
Definition: slab.c:106
#define SlabIsValid(set)
Definition: slab.c:118
void * SlabAlloc(MemoryContext context, Size size)
Definition: slab.c:309
void SlabFree(void *pointer)
Definition: slab.c:466
void SlabReset(MemoryContext context)
Definition: slab.c:252
#define Slab_CHUNKHDRSZ
Definition: slab.c:101
struct SlabContext SlabContext
MemoryContext SlabContextCreate(MemoryContext parent, const char *name, Size blockSize, Size chunkSize)
Definition: slab.c:141
Size SlabGetChunkSpace(void *pointer)
Definition: slab.c:606
bool SlabIsEmpty(MemoryContext context)
Definition: slab.c:623
MemoryContext SlabGetChunkContext(void *pointer)
Definition: slab.c:591
void SlabStats(MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
Definition: slab.c:642
void * SlabRealloc(void *pointer, Size size)
Definition: slab.c:562
void SlabDelete(MemoryContext context)
Definition: slab.c:295
#define SlabBlockIsValid(block)
Definition: slab.c:125
Size mem_allocated
Definition: memnodes.h:87
const char * name
Definition: memnodes.h:93
Definition: slab.c:93
int firstFreeChunk
Definition: slab.c:96
int nfree
Definition: slab.c:95
SlabContext * slab
Definition: slab.c:97
dlist_node node
Definition: slab.c:94
Size blockSize
Definition: slab.c:72
Size fullChunkSize
Definition: slab.c:71
int nblocks
Definition: slab.c:76
MemoryContextData header
Definition: slab.c:68
int chunksPerBlock
Definition: slab.c:74
Size headerSize
Definition: slab.c:73
Size chunkSize
Definition: slab.c:70
int minFreeChunks
Definition: slab.c:75
dlist_head freelist[FLEXIBLE_ARRAY_MEMBER]
Definition: slab.c:81
dlist_node * cur
Definition: ilist.h:179
dlist_node * cur
Definition: ilist.h:200