PostgreSQL Source Code git master
Loading...
Searching...
No Matches
slab.c File Reference
#include "postgres.h"
#include "lib/ilist.h"
#include "utils/memdebug.h"
#include "utils/memutils.h"
#include "utils/memutils_internal.h"
#include "utils/memutils_memorychunk.h"
Include dependency graph for slab.c:

Go to the source code of this file.

Data Structures

struct  SlabContext
 
struct  SlabBlock
 

Macros

#define Slab_BLOCKHDRSZ   MAXALIGN(sizeof(SlabBlock))
 
#define Slab_CONTEXT_HDRSZ(chunksPerBlock)   sizeof(SlabContext)
 
#define SLAB_BLOCKLIST_COUNT   3
 
#define SLAB_MAXIMUM_EMPTY_BLOCKS   10
 
#define Slab_CHUNKHDRSZ   sizeof(MemoryChunk)
 
#define SlabChunkGetPointer(chk)    ((void *) (((char *) (chk)) + sizeof(MemoryChunk)))
 
#define SlabBlockGetChunk(slab, block, n)
 
#define SlabIsValid(set)   ((set) && IsA(set, SlabContext))
 
#define SlabBlockIsValid(block)    ((block) && SlabIsValid((block)->slab))
 

Typedefs

typedef struct SlabContext SlabContext
 
typedef struct SlabBlock SlabBlock
 

Functions

static int32 SlabBlocklistIndex (SlabContext *slab, int nfree)
 
static int32 SlabFindNextBlockListIndex (SlabContext *slab)
 
static MemoryChunkSlabGetNextFreeChunk (SlabContext *slab, SlabBlock *block)
 
MemoryContext SlabContextCreate (MemoryContext parent, const char *name, Size blockSize, Size chunkSize)
 
void SlabReset (MemoryContext context)
 
void SlabDelete (MemoryContext context)
 
static voidSlabAllocSetupNewChunk (MemoryContext context, SlabBlock *block, MemoryChunk *chunk, Size size)
 
static pg_noinline voidSlabAllocFromNewBlock (MemoryContext context, Size size, int flags)
 
pg_noinline static pg_noreturn void SlabAllocInvalidSize (MemoryContext context, Size size)
 
voidSlabAlloc (MemoryContext context, Size size, int flags)
 
void SlabFree (void *pointer)
 
voidSlabRealloc (void *pointer, Size size, int flags)
 
MemoryContext SlabGetChunkContext (void *pointer)
 
Size SlabGetChunkSpace (void *pointer)
 
bool SlabIsEmpty (MemoryContext context)
 
void SlabStats (MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
 

Macro Definition Documentation

◆ Slab_BLOCKHDRSZ

#define Slab_BLOCKHDRSZ   MAXALIGN(sizeof(SlabBlock))

Definition at line 77 of file slab.c.

◆ SLAB_BLOCKLIST_COUNT

#define SLAB_BLOCKLIST_COUNT   3

Definition at line 95 of file slab.c.

◆ Slab_CHUNKHDRSZ

#define Slab_CHUNKHDRSZ   sizeof(MemoryChunk)

Definition at line 157 of file slab.c.

◆ Slab_CONTEXT_HDRSZ

#define Slab_CONTEXT_HDRSZ (   chunksPerBlock)    sizeof(SlabContext)

Definition at line 88 of file slab.c.

◆ SLAB_MAXIMUM_EMPTY_BLOCKS

#define SLAB_MAXIMUM_EMPTY_BLOCKS   10

Definition at line 98 of file slab.c.

◆ SlabBlockGetChunk

#define SlabBlockGetChunk (   slab,
  block,
 
)
Value:
((MemoryChunk *) ((char *) (block) + Slab_BLOCKHDRSZ \
+ ((n) * (slab)->fullChunkSize)))
#define Slab_BLOCKHDRSZ
Definition slab.c:77

Definition at line 165 of file slab.c.

211{
212 int32 index;
213 int32 blocklist_shift = slab->blocklist_shift;
214
215 Assert(nfree >= 0 && nfree <= slab->chunksPerBlock);
216
217 /*
218 * Determine the blocklist index based on the number of free chunks. We
219 * must ensure that 0 free chunks is dedicated to index 0. Everything
220 * else must be >= 1 and < SLAB_BLOCKLIST_COUNT.
221 *
222 * To make this as efficient as possible, we exploit some two's complement
223 * arithmetic where we reverse the sign before bit shifting. This results
224 * in an nfree of 0 using index 0 and anything non-zero staying non-zero.
225 * This is exploiting 0 and -0 being the same in two's complement. When
226 * we're done, we just need to flip the sign back over again for a
227 * positive index.
228 */
229 index = -((-nfree) >> blocklist_shift);
230
231 if (nfree == 0)
232 Assert(index == 0);
233 else
235
236 return index;
237}
238
239/*
240 * SlabFindNextBlockListIndex
241 * Search blocklist for blocks which have free chunks and return the
242 * index of the blocklist found containing at least 1 block with free
243 * chunks. If no block can be found we return 0.
244 *
245 * Note: We give priority to fuller blocks so that these are filled before
246 * emptier blocks. This is done to increase the chances that mostly-empty
247 * blocks will eventually become completely empty so they can be free'd.
248 */
249static int32
251{
252 /* start at 1 as blocklist[0] is for full blocks. */
253 for (int i = 1; i < SLAB_BLOCKLIST_COUNT; i++)
254 {
255 /* return the first found non-empty index */
256 if (!dlist_is_empty(&slab->blocklist[i]))
257 return i;
258 }
259
260 /* no blocks with free space */
261 return 0;
262}
263
264/*
265 * SlabGetNextFreeChunk
266 * Return the next free chunk in block and update the block to account
267 * for the returned chunk now being used.
268 */
269static inline MemoryChunk *
271{
273
274 Assert(block->nfree > 0);
275
276 if (block->freehead != NULL)
277 {
278 chunk = block->freehead;
279
280 /*
281 * Pop the chunk from the linked list of free chunks. The pointer to
282 * the next free chunk is stored in the chunk itself.
283 */
286
287 /* check nothing stomped on the free chunk's memory */
288 Assert(block->freehead == NULL ||
289 (block->freehead >= SlabBlockGetChunk(slab, block, 0) &&
290 block->freehead <= SlabBlockGetChunk(slab, block, slab->chunksPerBlock - 1) &&
291 SlabChunkMod(slab, block, block->freehead) == 0));
292 }
293 else
294 {
295 Assert(block->nunused > 0);
296
297 chunk = block->unused;
298 block->unused = (MemoryChunk *) (((char *) block->unused) + slab->fullChunkSize);
299 block->nunused--;
300 }
301
302 block->nfree--;
303
304 return chunk;
305}
306
307/*
308 * SlabContextCreate
309 * Create a new Slab context.
310 *
311 * parent: parent context, or NULL if top-level context
312 * name: name of context (must be statically allocated)
313 * blockSize: allocation block size
314 * chunkSize: allocation chunk size
315 *
316 * The Slab_CHUNKHDRSZ + MAXALIGN(chunkSize + 1) may not exceed
317 * MEMORYCHUNK_MAX_VALUE.
318 * 'blockSize' may not exceed MEMORYCHUNK_MAX_BLOCKOFFSET.
319 */
322 const char *name,
323 Size blockSize,
324 Size chunkSize)
325{
326 int chunksPerBlock;
327 Size fullChunkSize;
328 SlabContext *slab;
329 int i;
330
331 /* ensure MemoryChunk's size is properly maxaligned */
333 "sizeof(MemoryChunk) is not maxaligned");
335
336 /*
337 * Ensure there's enough space to store the pointer to the next free chunk
338 * in the memory of the (otherwise) unused allocation.
339 */
340 if (chunkSize < sizeof(MemoryChunk *))
341 chunkSize = sizeof(MemoryChunk *);
342
343 /* length of the maxaligned chunk including the chunk header */
344#ifdef MEMORY_CONTEXT_CHECKING
345 /* ensure there's always space for the sentinel byte */
346 fullChunkSize = Slab_CHUNKHDRSZ + MAXALIGN(chunkSize + 1);
347#else
348 fullChunkSize = Slab_CHUNKHDRSZ + MAXALIGN(chunkSize);
349#endif
350
351 Assert(fullChunkSize <= MEMORYCHUNK_MAX_VALUE);
352
353 /* compute the number of chunks that will fit on each block */
354 chunksPerBlock = (blockSize - Slab_BLOCKHDRSZ) / fullChunkSize;
355
356 /* Make sure the block can store at least one chunk. */
357 if (chunksPerBlock == 0)
358 elog(ERROR, "block size %zu for slab is too small for %zu-byte chunks",
359 blockSize, chunkSize);
360
361
362
363 slab = (SlabContext *) malloc(Slab_CONTEXT_HDRSZ(chunksPerBlock));
364 if (slab == NULL)
365 {
369 errmsg("out of memory"),
370 errdetail("Failed while creating memory context \"%s\".",
371 name)));
372 }
373
374 /*
375 * Avoid writing code that can fail between here and MemoryContextCreate;
376 * we'd leak the header if we ereport in this stretch.
377 */
378
379 /* See comments about Valgrind interactions in aset.c */
380 VALGRIND_CREATE_MEMPOOL(slab, 0, false);
381 /* This vchunk covers the SlabContext only */
382 VALGRIND_MEMPOOL_ALLOC(slab, slab, sizeof(SlabContext));
383
384 /* Fill in SlabContext-specific header fields */
385 slab->chunkSize = (uint32) chunkSize;
386 slab->fullChunkSize = (uint32) fullChunkSize;
387 slab->blockSize = (uint32) blockSize;
388 slab->chunksPerBlock = chunksPerBlock;
389 slab->curBlocklistIndex = 0;
390
391 /*
392 * Compute a shift that guarantees that shifting chunksPerBlock with it is
393 * < SLAB_BLOCKLIST_COUNT - 1. The reason that we subtract 1 from
394 * SLAB_BLOCKLIST_COUNT in this calculation is that we reserve the 0th
395 * blocklist element for blocks which have no free chunks.
396 *
397 * We calculate the number of bits to shift by rather than a divisor to
398 * divide by as performing division each time we need to find the
399 * blocklist index would be much slower.
400 */
401 slab->blocklist_shift = 0;
402 while ((slab->chunksPerBlock >> slab->blocklist_shift) >= (SLAB_BLOCKLIST_COUNT - 1))
403 slab->blocklist_shift++;
404
405 /* initialize the list to store empty blocks to be reused */
406 dclist_init(&slab->emptyblocks);
407
408 /* initialize each blocklist slot */
409 for (i = 0; i < SLAB_BLOCKLIST_COUNT; i++)
410 dlist_init(&slab->blocklist[i]);
411
412#ifdef MEMORY_CONTEXT_CHECKING
413 /* set the isChunkFree pointer right after the end of the context */
414 slab->isChunkFree = (bool *) ((char *) slab + sizeof(SlabContext));
415#endif
416
417 /* Finally, do the type-independent part of context creation */
421 parent,
422 name);
423
424 return (MemoryContext) slab;
425}
426
427/*
428 * SlabReset
429 * Frees all memory which is allocated in the given set.
430 *
431 * The code simply frees all the blocks in the context - we don't keep any
432 * keeper blocks or anything like that.
433 */
434void
436{
437 SlabContext *slab = (SlabContext *) context;
439 int i;
440
441 Assert(SlabIsValid(slab));
442
443#ifdef MEMORY_CONTEXT_CHECKING
444 /* Check for corruption and leaks before freeing */
445 SlabCheck(context);
446#endif
447
448 /* release any retained empty blocks */
450 {
451 SlabBlock *block = dlist_container(SlabBlock, node, miter.cur);
452
454
455#ifdef CLOBBER_FREED_MEMORY
456 wipe_mem(block, slab->blockSize);
457#endif
458
459 /* As in aset.c, free block-header vchunks explicitly */
460 VALGRIND_MEMPOOL_FREE(slab, block);
461
462 free(block);
463 context->mem_allocated -= slab->blockSize;
464 }
465
466 /* walk over blocklist and free the blocks */
467 for (i = 0; i < SLAB_BLOCKLIST_COUNT; i++)
468 {
470 {
471 SlabBlock *block = dlist_container(SlabBlock, node, miter.cur);
472
473 dlist_delete(miter.cur);
474
475#ifdef CLOBBER_FREED_MEMORY
476 wipe_mem(block, slab->blockSize);
477#endif
478
479 /* As in aset.c, free block-header vchunks explicitly */
480 VALGRIND_MEMPOOL_FREE(slab, block);
481
482 free(block);
483 context->mem_allocated -= slab->blockSize;
484 }
485 }
486
487 /*
488 * Instruct Valgrind to throw away all the vchunks associated with this
489 * context, except for the one covering the SlabContext. This gets rid of
490 * the vchunks for whatever user data is getting discarded by the context
491 * reset.
492 */
493 VALGRIND_MEMPOOL_TRIM(slab, slab, sizeof(SlabContext));
494
495 slab->curBlocklistIndex = 0;
496
497 Assert(context->mem_allocated == 0);
498}
499
500/*
501 * SlabDelete
502 * Free all memory which is allocated in the given context.
503 */
504void
506{
507 /* Reset to release all the SlabBlocks */
508 SlabReset(context);
509
510 /* Destroy the vpool -- see notes in aset.c */
512
513 /* And free the context header */
514 free(context);
515}
516
517/*
518 * Small helper for allocating a new chunk from a chunk, to avoid duplicating
519 * the code between SlabAlloc() and SlabAllocFromNewBlock().
520 */
521static inline void *
523 MemoryChunk *chunk, Size size)
524{
525 SlabContext *slab = (SlabContext *) context;
526
527 /*
528 * Check that the chunk pointer is actually somewhere on the block and is
529 * aligned as expected.
530 */
531 Assert(chunk >= SlabBlockGetChunk(slab, block, 0));
532 Assert(chunk <= SlabBlockGetChunk(slab, block, slab->chunksPerBlock - 1));
533 Assert(SlabChunkMod(slab, block, chunk) == 0);
534
535 /* Prepare to initialize the chunk header. */
537
539
540#ifdef MEMORY_CONTEXT_CHECKING
541 /* slab mark to catch clobber of "unused" space */
545 slab->chunkSize,
546 slab->fullChunkSize -
547 (slab->chunkSize + Slab_CHUNKHDRSZ));
548#endif
549
550#ifdef RANDOMIZE_ALLOCATED_MEMORY
551 /* fill the allocated space with junk */
553#endif
554
555 /* Disallow access to the chunk header. */
557
559}
560
562static void *
563SlabAllocFromNewBlock(MemoryContext context, Size size, int flags)
564{
565 SlabContext *slab = (SlabContext *) context;
566 SlabBlock *block;
568 dlist_head *blocklist;
569 int blocklist_idx;
570
571 /* to save allocating a new one, first check the empty blocks list */
572 if (dclist_count(&slab->emptyblocks) > 0)
573 {
575
576 block = dlist_container(SlabBlock, node, node);
577
578 /*
579 * SlabFree() should have left this block in a valid state with all
580 * chunks free. Ensure that's the case.
581 */
582 Assert(block->nfree == slab->chunksPerBlock);
583
584 /* fetch the next chunk from this block */
585 chunk = SlabGetNextFreeChunk(slab, block);
586 }
587 else
588 {
589 block = (SlabBlock *) malloc(slab->blockSize);
590
591 if (unlikely(block == NULL))
592 return MemoryContextAllocationFailure(context, size, flags);
593
594 /* Make a vchunk covering the new block's header */
596
597 block->slab = slab;
598 context->mem_allocated += slab->blockSize;
599
600 /* use the first chunk in the new block */
601 chunk = SlabBlockGetChunk(slab, block, 0);
602
603 block->nfree = slab->chunksPerBlock - 1;
604 block->unused = SlabBlockGetChunk(slab, block, 1);
605 block->freehead = NULL;
606 block->nunused = slab->chunksPerBlock - 1;
607 }
608
609 /* find the blocklist element for storing blocks with 1 used chunk */
610 blocklist_idx = SlabBlocklistIndex(slab, block->nfree);
611 blocklist = &slab->blocklist[blocklist_idx];
612
613 /* this better be empty. We just added a block thinking it was */
614 Assert(dlist_is_empty(blocklist));
615
616 dlist_push_head(blocklist, &block->node);
617
619
620 return SlabAllocSetupNewChunk(context, block, chunk, size);
621}
622
623/*
624 * SlabAllocInvalidSize
625 * Handle raising an ERROR for an invalid size request. We don't do this
626 * in slab alloc as calling the elog functions would force the compiler
627 * to setup the stack frame in SlabAlloc. For performance reasons, we
628 * want to avoid that.
629 */
632static void
634{
635 SlabContext *slab = (SlabContext *) context;
636
637 elog(ERROR, "unexpected alloc chunk size %zu (expected %u)", size,
638 slab->chunkSize);
639}
640
641/*
642 * SlabAlloc
643 * Returns a pointer to a newly allocated memory chunk or raises an ERROR
644 * on allocation failure, or returns NULL when flags contains
645 * MCXT_ALLOC_NO_OOM. 'size' must be the same size as was specified
646 * during SlabContextCreate().
647 *
648 * This function should only contain the most common code paths. Everything
649 * else should be in pg_noinline helper functions, thus avoiding the overhead
650 * of creating a stack frame for the common cases. Allocating memory is often
651 * a bottleneck in many workloads, so avoiding stack frame setup is
652 * worthwhile. Helper functions should always directly return the newly
653 * allocated memory so that we can just return that address directly as a tail
654 * call.
655 */
656void *
657SlabAlloc(MemoryContext context, Size size, int flags)
658{
659 SlabContext *slab = (SlabContext *) context;
660 SlabBlock *block;
662
663 Assert(SlabIsValid(slab));
664
665 /* sanity check that this is pointing to a valid blocklist */
666 Assert(slab->curBlocklistIndex >= 0);
668
669 /*
670 * Make sure we only allow correct request size. This doubles as the
671 * MemoryContextCheckSize check.
672 */
673 if (unlikely(size != slab->chunkSize))
674 SlabAllocInvalidSize(context, size);
675
676 if (unlikely(slab->curBlocklistIndex == 0))
677 {
678 /*
679 * Handle the case when there are no partially filled blocks
680 * available. This happens either when the last allocation took the
681 * last chunk in the block, or when SlabFree() free'd the final block.
682 */
683 return SlabAllocFromNewBlock(context, size, flags);
684 }
685 else
686 {
687 dlist_head *blocklist = &slab->blocklist[slab->curBlocklistIndex];
689
690 Assert(!dlist_is_empty(blocklist));
691
692 /* grab the block from the blocklist */
693 block = dlist_head_element(SlabBlock, node, blocklist);
694
695 /* make sure we actually got a valid block, with matching nfree */
696 Assert(block != NULL);
697 Assert(slab->curBlocklistIndex == SlabBlocklistIndex(slab, block->nfree));
698 Assert(block->nfree > 0);
699
700 /* fetch the next chunk from this block */
701 chunk = SlabGetNextFreeChunk(slab, block);
702
703 /* get the new blocklist index based on the new free chunk count */
705
706 /*
707 * Handle the case where the blocklist index changes. This also deals
708 * with blocks becoming full as only full blocks go at index 0.
709 */
711 {
712 dlist_delete_from(blocklist, &block->node);
714
715 if (dlist_is_empty(blocklist))
717 }
718 }
719
720 return SlabAllocSetupNewChunk(context, block, chunk, size);
721}
722
723/*
724 * SlabFree
725 * Frees allocated memory; memory is removed from the slab.
726 */
727void
728SlabFree(void *pointer)
729{
731 SlabBlock *block;
732 SlabContext *slab;
733 int curBlocklistIdx;
734 int newBlocklistIdx;
735
736 /* Allow access to the chunk header. */
738
739 block = MemoryChunkGetBlock(chunk);
740
741 /*
742 * For speed reasons we just Assert that the referenced block is good.
743 * Future field experience may show that this Assert had better become a
744 * regular runtime test-and-elog check.
745 */
746 Assert(SlabBlockIsValid(block));
747 slab = block->slab;
748
749#ifdef MEMORY_CONTEXT_CHECKING
750 /* Test for someone scribbling on unused space in chunk */
752 if (!sentinel_ok(pointer, slab->chunkSize))
753 elog(WARNING, "detected write past chunk end in %s %p",
754 slab->header.name, chunk);
755#endif
756
757 /* push this chunk onto the head of the block's free list */
758 *(MemoryChunk **) pointer = block->freehead;
759 block->freehead = chunk;
760
761 block->nfree++;
762
763 Assert(block->nfree > 0);
764 Assert(block->nfree <= slab->chunksPerBlock);
765
766#ifdef CLOBBER_FREED_MEMORY
767 /* don't wipe the free list MemoryChunk pointer stored in the chunk */
768 wipe_mem((char *) pointer + sizeof(MemoryChunk *),
769 slab->chunkSize - sizeof(MemoryChunk *));
770#endif
771
772 curBlocklistIdx = SlabBlocklistIndex(slab, block->nfree - 1);
774
775 /*
776 * Check if the block needs to be moved to another element on the
777 * blocklist based on it now having 1 more free chunk.
778 */
780 {
781 /* do the move */
784
785 /*
786 * The blocklist[curBlocklistIdx] may now be empty or we may now be
787 * able to use a lower-element blocklist. We'll need to redetermine
788 * what the slab->curBlocklistIndex is if the current blocklist was
789 * changed or if a lower element one was changed. We must ensure we
790 * use the list with the fullest block(s).
791 */
793 {
795
796 /*
797 * We know there must be a block with at least 1 unused chunk as
798 * we just pfree'd one. Ensure curBlocklistIndex reflects this.
799 */
800 Assert(slab->curBlocklistIndex > 0);
801 }
802 }
803
804 /* Handle when a block becomes completely empty */
805 if (unlikely(block->nfree == slab->chunksPerBlock))
806 {
807 /* remove the block */
809
810 /*
811 * To avoid thrashing malloc/free, we keep a list of empty blocks that
812 * we can reuse again instead of having to malloc a new one.
813 */
815 dclist_push_head(&slab->emptyblocks, &block->node);
816 else
817 {
818 /*
819 * When we have enough empty blocks stored already, we actually
820 * free the block.
821 */
822#ifdef CLOBBER_FREED_MEMORY
823 wipe_mem(block, slab->blockSize);
824#endif
825
826 /* As in aset.c, free block-header vchunks explicitly */
827 VALGRIND_MEMPOOL_FREE(slab, block);
828
829 free(block);
830 slab->header.mem_allocated -= slab->blockSize;
831 }
832
833 /*
834 * Check if we need to reset the blocklist index. This is required
835 * when the blocklist this block is on has become completely empty.
836 */
837 if (slab->curBlocklistIndex == newBlocklistIdx &&
840 }
841}
842
843/*
844 * SlabRealloc
845 * Change the allocated size of a chunk.
846 *
847 * As Slab is designed for allocating equally-sized chunks of memory, it can't
848 * do an actual chunk size change. We try to be gentle and allow calls with
849 * exactly the same size, as in that case we can simply return the same
850 * chunk. When the size differs, we throw an error.
851 *
852 * We could also allow requests with size < chunkSize. That however seems
853 * rather pointless - Slab is meant for chunks of constant size, and moreover
854 * realloc is usually used to enlarge the chunk.
855 */
856void *
857SlabRealloc(void *pointer, Size size, int flags)
858{
860 SlabBlock *block;
861 SlabContext *slab;
862
863 /* Allow access to the chunk header. */
865
866 block = MemoryChunkGetBlock(chunk);
867
868 /* Disallow access to the chunk header. */
870
871 /*
872 * Try to verify that we have a sane block pointer: the block header
873 * should reference a slab context. (We use a test-and-elog, not just
874 * Assert, because it seems highly likely that we're here in error in the
875 * first place.)
876 */
877 if (!SlabBlockIsValid(block))
878 elog(ERROR, "could not find block containing chunk %p", chunk);
879 slab = block->slab;
880
881 /* can't do actual realloc with slab, but let's try to be gentle */
882 if (size == slab->chunkSize)
883 return pointer;
884
885 elog(ERROR, "slab allocator does not support realloc()");
886 return NULL; /* keep compiler quiet */
887}
888
889/*
890 * SlabGetChunkContext
891 * Return the MemoryContext that 'pointer' belongs to.
892 */
894SlabGetChunkContext(void *pointer)
895{
897 SlabBlock *block;
898
899 /* Allow access to the chunk header. */
901
902 block = MemoryChunkGetBlock(chunk);
903
904 /* Disallow access to the chunk header. */
906
907 Assert(SlabBlockIsValid(block));
908
909 return &block->slab->header;
910}
911
912/*
913 * SlabGetChunkSpace
914 * Given a currently-allocated chunk, determine the total space
915 * it occupies (including all memory-allocation overhead).
916 */
917Size
918SlabGetChunkSpace(void *pointer)
919{
921 SlabBlock *block;
922 SlabContext *slab;
923
924 /* Allow access to the chunk header. */
926
927 block = MemoryChunkGetBlock(chunk);
928
929 /* Disallow access to the chunk header. */
931
932 Assert(SlabBlockIsValid(block));
933 slab = block->slab;
934
935 return slab->fullChunkSize;
936}
937
938/*
939 * SlabIsEmpty
940 * Is the slab empty of any allocated space?
941 */
942bool
944{
945 Assert(SlabIsValid((SlabContext *) context));
946
947 return (context->mem_allocated == 0);
948}
949
950/*
951 * SlabStats
952 * Compute stats about memory consumption of a Slab context.
953 *
954 * printfunc: if not NULL, pass a human-readable stats string to this.
955 * passthru: pass this pointer through to printfunc.
956 * totals: if not NULL, add stats about this context into *totals.
957 * print_to_stderr: print stats to stderr if true, elog otherwise.
958 */
959void
963 bool print_to_stderr)
964{
965 SlabContext *slab = (SlabContext *) context;
966 Size nblocks = 0;
967 Size freechunks = 0;
968 Size totalspace;
969 Size freespace = 0;
970 int i;
971
972 Assert(SlabIsValid(slab));
973
974 /* Include context header in totalspace */
975 totalspace = Slab_CONTEXT_HDRSZ(slab->chunksPerBlock);
976
977 /* Add the space consumed by blocks in the emptyblocks list */
978 totalspace += dclist_count(&slab->emptyblocks) * slab->blockSize;
979
980 for (i = 0; i < SLAB_BLOCKLIST_COUNT; i++)
981 {
982 dlist_iter iter;
983
984 dlist_foreach(iter, &slab->blocklist[i])
985 {
986 SlabBlock *block = dlist_container(SlabBlock, node, iter.cur);
987
988 nblocks++;
989 totalspace += slab->blockSize;
990 freespace += slab->fullChunkSize * block->nfree;
991 freechunks += block->nfree;
992 }
993 }
994
995 if (printfunc)
996 {
997 char stats_string[200];
998
999 /* XXX should we include free chunks on empty blocks? */
1001 "%zu total in %zu blocks; %u empty blocks; %zu free (%zu chunks); %zu used",
1002 totalspace, nblocks, dclist_count(&slab->emptyblocks),
1003 freespace, freechunks, totalspace - freespace);
1005 }
1006
1007 if (totals)
1008 {
1009 totals->nblocks += nblocks;
1010 totals->freechunks += freechunks;
1011 totals->totalspace += totalspace;
1012 totals->freespace += freespace;
1013 }
1014}
1015
1016
1017#ifdef MEMORY_CONTEXT_CHECKING
1018
1019/*
1020 * SlabCheck
1021 * Walk through all blocks looking for inconsistencies.
1022 *
1023 * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
1024 * find yourself in an infinite loop when trouble occurs, because this
1025 * routine will be entered again when elog cleanup tries to release memory!
1026 */
1027void
1028SlabCheck(MemoryContext context)
1029{
1030 SlabContext *slab = (SlabContext *) context;
1031 int i;
1032 int nblocks = 0;
1033 const char *name = slab->header.name;
1034 dlist_iter iter;
1035
1036 Assert(SlabIsValid(slab));
1037 Assert(slab->chunksPerBlock > 0);
1038
1039 /*
1040 * Have a look at the empty blocks. These should have all their chunks
1041 * marked as free. Ensure that's the case.
1042 */
1043 dclist_foreach(iter, &slab->emptyblocks)
1044 {
1045 SlabBlock *block = dlist_container(SlabBlock, node, iter.cur);
1046
1047 if (block->nfree != slab->chunksPerBlock)
1048 elog(WARNING, "problem in slab %s: empty block %p should have %d free chunks but has %d chunks free",
1049 name, block, slab->chunksPerBlock, block->nfree);
1050 }
1051
1052 /* walk the non-empty block lists */
1053 for (i = 0; i < SLAB_BLOCKLIST_COUNT; i++)
1054 {
1055 int j,
1056 nfree;
1057
1058 /* walk all blocks on this blocklist */
1059 dlist_foreach(iter, &slab->blocklist[i])
1060 {
1061 SlabBlock *block = dlist_container(SlabBlock, node, iter.cur);
1063
1064 /*
1065 * Make sure the number of free chunks (in the block header)
1066 * matches the position in the blocklist.
1067 */
1068 if (SlabBlocklistIndex(slab, block->nfree) != i)
1069 elog(WARNING, "problem in slab %s: block %p is on blocklist %d but should be on blocklist %d",
1070 name, block, i, SlabBlocklistIndex(slab, block->nfree));
1071
1072 /* make sure the block is not empty */
1073 if (block->nfree >= slab->chunksPerBlock)
1074 elog(WARNING, "problem in slab %s: empty block %p incorrectly stored on blocklist element %d",
1075 name, block, i);
1076
1077 /* make sure the slab pointer correctly points to this context */
1078 if (block->slab != slab)
1079 elog(WARNING, "problem in slab %s: bogus slab link in block %p",
1080 name, block);
1081
1082 /* reset the array of free chunks for this block */
1083 memset(slab->isChunkFree, 0, (slab->chunksPerBlock * sizeof(bool)));
1084 nfree = 0;
1085
1086 /* walk through the block's free list chunks */
1087 cur_chunk = block->freehead;
1088 while (cur_chunk != NULL)
1089 {
1090 int chunkidx = SlabChunkIndex(slab, block, cur_chunk);
1091
1092 /*
1093 * Ensure the free list link points to something on the block
1094 * at an address aligned according to the full chunk size.
1095 */
1096 if (cur_chunk < SlabBlockGetChunk(slab, block, 0) ||
1097 cur_chunk > SlabBlockGetChunk(slab, block, slab->chunksPerBlock - 1) ||
1098 SlabChunkMod(slab, block, cur_chunk) != 0)
1099 elog(WARNING, "problem in slab %s: bogus free list link %p in block %p",
1100 name, cur_chunk, block);
1101
1102 /* count the chunk and mark it free on the free chunk array */
1103 nfree++;
1104 slab->isChunkFree[chunkidx] = true;
1105
1106 /* read pointer of the next free chunk */
1109 }
1110
1111 /* check that the unused pointer matches what nunused claims */
1112 if (SlabBlockGetChunk(slab, block, slab->chunksPerBlock - block->nunused) !=
1113 block->unused)
1114 elog(WARNING, "problem in slab %s: mismatch detected between nunused chunks and unused pointer in block %p",
1115 name, block);
1116
1117 /*
1118 * count the remaining free chunks that have yet to make it onto
1119 * the block's free list.
1120 */
1121 cur_chunk = block->unused;
1122 for (j = 0; j < block->nunused; j++)
1123 {
1124 int chunkidx = SlabChunkIndex(slab, block, cur_chunk);
1125
1126
1127 /* count the chunk as free and mark it as so in the array */
1128 nfree++;
1129 if (chunkidx < slab->chunksPerBlock)
1130 slab->isChunkFree[chunkidx] = true;
1131
1132 /* move forward 1 chunk */
1133 cur_chunk = (MemoryChunk *) (((char *) cur_chunk) + slab->fullChunkSize);
1134 }
1135
1136 for (j = 0; j < slab->chunksPerBlock; j++)
1137 {
1138 if (!slab->isChunkFree[j])
1139 {
1140 MemoryChunk *chunk = SlabBlockGetChunk(slab, block, j);
1142
1143 /* Allow access to the chunk header. */
1145
1147
1148 /* Disallow access to the chunk header. */
1150
1151 /*
1152 * check the chunk's blockoffset correctly points back to
1153 * the block
1154 */
1155 if (chunkblock != block)
1156 elog(WARNING, "problem in slab %s: bogus block link in block %p, chunk %p",
1157 name, block, chunk);
1158
1159 /* check the sentinel byte is intact */
1160 Assert(slab->chunkSize < (slab->fullChunkSize - Slab_CHUNKHDRSZ));
1162 elog(WARNING, "problem in slab %s: detected write past chunk end in block %p, chunk %p",
1163 name, block, chunk);
1164 }
1165 }
1166
1167 /*
1168 * Make sure we got the expected number of free chunks (as tracked
1169 * in the block header).
1170 */
1171 if (nfree != block->nfree)
1172 elog(WARNING, "problem in slab %s: nfree in block %p is %d but %d chunk were found as free",
1173 name, block, block->nfree, nfree);
1174
1175 nblocks++;
1176 }
1177 }
1178
1179 /* the stored empty blocks are tracked in mem_allocated too */
1180 nblocks += dclist_count(&slab->emptyblocks);
1181
1182 Assert(nblocks * slab->blockSize == context->mem_allocated);
1183}
1184
1185#endif /* MEMORY_CONTEXT_CHECKING */
#define pg_noinline
Definition c.h:295
#define MAXALIGN(LEN)
Definition c.h:826
#define pg_noreturn
Definition c.h:164
#define Assert(condition)
Definition c.h:873
int32_t int32
Definition c.h:542
#define unlikely(x)
Definition c.h:412
uint32_t uint32
Definition c.h:546
#define StaticAssertDecl(condition, errmessage)
Definition c.h:942
size_t Size
Definition c.h:619
int errdetail(const char *fmt,...)
Definition elog.c:1216
int errcode(int sqlerrcode)
Definition elog.c:863
int errmsg(const char *fmt,...)
Definition elog.c:1080
#define WARNING
Definition elog.h:36
#define ERROR
Definition elog.h:39
#define elog(elevel,...)
Definition elog.h:226
#define ereport(elevel,...)
Definition elog.h:150
#define dlist_foreach(iter, lhead)
Definition ilist.h:623
static void dlist_init(dlist_head *head)
Definition ilist.h:314
static void dlist_delete_from(dlist_head *head, dlist_node *node)
Definition ilist.h:429
#define dlist_head_element(type, membername, lhead)
Definition ilist.h:603
static void dlist_delete(dlist_node *node)
Definition ilist.h:405
static uint32 dclist_count(const dclist_head *head)
Definition ilist.h:932
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition ilist.h:347
#define dlist_foreach_modify(iter, lhead)
Definition ilist.h:640
static bool dlist_is_empty(const dlist_head *head)
Definition ilist.h:336
static void dclist_delete_from(dclist_head *head, dlist_node *node)
Definition ilist.h:763
static dlist_node * dclist_pop_head_node(dclist_head *head)
Definition ilist.h:789
static void dclist_push_head(dclist_head *head, dlist_node *node)
Definition ilist.h:693
static void dclist_init(dclist_head *head)
Definition ilist.h:671
#define dclist_foreach_modify(iter, lhead)
Definition ilist.h:973
#define dlist_container(type, membername, ptr)
Definition ilist.h:593
#define dclist_foreach(iter, lhead)
Definition ilist.h:970
int j
Definition isn.c:78
int i
Definition isn.c:77
void MemoryContextCreate(MemoryContext node, NodeTag tag, MemoryContextMethodID method_id, MemoryContext parent, const char *name)
Definition mcxt.c:1149
MemoryContext TopMemoryContext
Definition mcxt.c:166
void MemoryContextStats(MemoryContext context)
Definition mcxt.c:863
void * MemoryContextAllocationFailure(MemoryContext context, Size size, int flags)
Definition mcxt.c:1198
#define VALGRIND_DESTROY_MEMPOOL(context)
Definition memdebug.h:25
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition memdebug.h:26
#define VALGRIND_CREATE_MEMPOOL(context, redzones, zeroed)
Definition memdebug.h:24
#define VALGRIND_MEMPOOL_ALLOC(context, addr, size)
Definition memdebug.h:29
#define VALGRIND_MEMPOOL_TRIM(context, addr, size)
Definition memdebug.h:32
#define VALGRIND_MEMPOOL_FREE(context, addr)
Definition memdebug.h:30
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition memdebug.h:27
#define VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
Definition memdebug.h:28
void(* MemoryStatsPrintFunc)(MemoryContext context, void *passthru, const char *stats_string, bool print_to_stderr)
Definition memnodes.h:54
@ MCTX_SLAB_ID
#define MEMORYCHUNK_MAX_BLOCKOFFSET
#define MEMORYCHUNK_MAX_VALUE
#define MemoryChunkGetPointer(c)
static void * MemoryChunkGetBlock(MemoryChunk *chunk)
#define PointerGetMemoryChunk(p)
static void MemoryChunkSetHdrMask(MemoryChunk *chunk, void *block, Size value, MemoryContextMethodID methodid)
#define snprintf
Definition port.h:260
static int fb(int x)
void * SlabAlloc(MemoryContext context, Size size, int flags)
Definition slab.c:658
static pg_noinline void * SlabAllocFromNewBlock(MemoryContext context, Size size, int flags)
Definition slab.c:564
#define SlabIsValid(set)
Definition slab.c:196
void SlabFree(void *pointer)
Definition slab.c:729
void SlabReset(MemoryContext context)
Definition slab.c:436
#define Slab_CHUNKHDRSZ
Definition slab.c:157
static MemoryChunk * SlabGetNextFreeChunk(SlabContext *slab, SlabBlock *block)
Definition slab.c:271
#define SlabChunkGetPointer(chk)
Definition slab.c:158
MemoryContext SlabContextCreate(MemoryContext parent, const char *name, Size blockSize, Size chunkSize)
Definition slab.c:322
static int32 SlabBlocklistIndex(SlabContext *slab, int nfree)
Definition slab.c:211
static void * SlabAllocSetupNewChunk(MemoryContext context, SlabBlock *block, MemoryChunk *chunk, Size size)
Definition slab.c:523
Size SlabGetChunkSpace(void *pointer)
Definition slab.c:919
#define Slab_CONTEXT_HDRSZ(chunksPerBlock)
Definition slab.c:88
bool SlabIsEmpty(MemoryContext context)
Definition slab.c:944
MemoryContext SlabGetChunkContext(void *pointer)
Definition slab.c:895
pg_noinline static pg_noreturn void SlabAllocInvalidSize(MemoryContext context, Size size)
Definition slab.c:634
static int32 SlabFindNextBlockListIndex(SlabContext *slab)
Definition slab.c:251
#define SlabBlockGetChunk(slab, block, n)
Definition slab.c:165
void * SlabRealloc(void *pointer, Size size, int flags)
Definition slab.c:858
void SlabStats(MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
Definition slab.c:961
void SlabDelete(MemoryContext context)
Definition slab.c:506
#define SLAB_BLOCKLIST_COUNT
Definition slab.c:95
#define SlabBlockIsValid(block)
Definition slab.c:202
#define SLAB_MAXIMUM_EMPTY_BLOCKS
Definition slab.c:98
#define free(a)
#define malloc(a)
const char * name
Definition memnodes.h:131
int32 nfree
Definition slab.c:149
MemoryChunk * freehead
Definition slab.c:151
MemoryChunk * unused
Definition slab.c:152
SlabContext * slab
Definition slab.c:148
dlist_node node
Definition slab.c:153
int32 nunused
Definition slab.c:150
dlist_head blocklist[SLAB_BLOCKLIST_COUNT]
Definition slab.c:129
int32 chunksPerBlock
Definition slab.c:110
uint32 fullChunkSize
Definition slab.c:108
MemoryContextData header
Definition slab.c:105
uint32 blockSize
Definition slab.c:109
int32 curBlocklistIndex
Definition slab.c:111
int32 blocklist_shift
Definition slab.c:118
uint32 chunkSize
Definition slab.c:107
dclist_head emptyblocks
Definition slab.c:120
dlist_node * cur
Definition ilist.h:179
Definition type.h:96
const char * name

◆ SlabBlockIsValid

#define SlabBlockIsValid (   block)     ((block) && SlabIsValid((block)->slab))

Definition at line 202 of file slab.c.

◆ SlabChunkGetPointer

#define SlabChunkGetPointer (   chk)     ((void *) (((char *) (chk)) + sizeof(MemoryChunk)))

Definition at line 158 of file slab.c.

◆ SlabIsValid

#define SlabIsValid (   set)    ((set) && IsA(set, SlabContext))

Definition at line 196 of file slab.c.

Typedef Documentation

◆ SlabBlock

◆ SlabContext

Function Documentation

◆ SlabAlloc()

void * SlabAlloc ( MemoryContext  context,
Size  size,
int  flags 
)

Definition at line 658 of file slab.c.

659{
660 SlabContext *slab = (SlabContext *) context;
661 SlabBlock *block;
663
664 Assert(SlabIsValid(slab));
665
666 /* sanity check that this is pointing to a valid blocklist */
667 Assert(slab->curBlocklistIndex >= 0);
669
670 /*
671 * Make sure we only allow correct request size. This doubles as the
672 * MemoryContextCheckSize check.
673 */
674 if (unlikely(size != slab->chunkSize))
675 SlabAllocInvalidSize(context, size);
676
677 if (unlikely(slab->curBlocklistIndex == 0))
678 {
679 /*
680 * Handle the case when there are no partially filled blocks
681 * available. This happens either when the last allocation took the
682 * last chunk in the block, or when SlabFree() free'd the final block.
683 */
684 return SlabAllocFromNewBlock(context, size, flags);
685 }
686 else
687 {
688 dlist_head *blocklist = &slab->blocklist[slab->curBlocklistIndex];
690
691 Assert(!dlist_is_empty(blocklist));
692
693 /* grab the block from the blocklist */
694 block = dlist_head_element(SlabBlock, node, blocklist);
695
696 /* make sure we actually got a valid block, with matching nfree */
697 Assert(block != NULL);
698 Assert(slab->curBlocklistIndex == SlabBlocklistIndex(slab, block->nfree));
699 Assert(block->nfree > 0);
700
701 /* fetch the next chunk from this block */
702 chunk = SlabGetNextFreeChunk(slab, block);
703
704 /* get the new blocklist index based on the new free chunk count */
706
707 /*
708 * Handle the case where the blocklist index changes. This also deals
709 * with blocks becoming full as only full blocks go at index 0.
710 */
712 {
713 dlist_delete_from(blocklist, &block->node);
715
716 if (dlist_is_empty(blocklist))
718 }
719 }
720
721 return SlabAllocSetupNewChunk(context, block, chunk, size);
722}

References Assert, SlabContext::blocklist, SlabContext::chunkSize, SlabContext::chunksPerBlock, SlabContext::curBlocklistIndex, dlist_delete_from(), dlist_head_element, dlist_is_empty(), dlist_push_head(), fb(), SlabBlock::nfree, SlabBlock::node, SlabAllocFromNewBlock(), SlabAllocInvalidSize(), SlabAllocSetupNewChunk(), SlabBlocklistIndex(), SlabFindNextBlockListIndex(), SlabGetNextFreeChunk(), SlabIsValid, and unlikely.

◆ SlabAllocFromNewBlock()

static pg_noinline void * SlabAllocFromNewBlock ( MemoryContext  context,
Size  size,
int  flags 
)
static

Definition at line 564 of file slab.c.

565{
566 SlabContext *slab = (SlabContext *) context;
567 SlabBlock *block;
569 dlist_head *blocklist;
570 int blocklist_idx;
571
572 /* to save allocating a new one, first check the empty blocks list */
573 if (dclist_count(&slab->emptyblocks) > 0)
574 {
576
577 block = dlist_container(SlabBlock, node, node);
578
579 /*
580 * SlabFree() should have left this block in a valid state with all
581 * chunks free. Ensure that's the case.
582 */
583 Assert(block->nfree == slab->chunksPerBlock);
584
585 /* fetch the next chunk from this block */
586 chunk = SlabGetNextFreeChunk(slab, block);
587 }
588 else
589 {
590 block = (SlabBlock *) malloc(slab->blockSize);
591
592 if (unlikely(block == NULL))
593 return MemoryContextAllocationFailure(context, size, flags);
594
595 /* Make a vchunk covering the new block's header */
597
598 block->slab = slab;
599 context->mem_allocated += slab->blockSize;
600
601 /* use the first chunk in the new block */
602 chunk = SlabBlockGetChunk(slab, block, 0);
603
604 block->nfree = slab->chunksPerBlock - 1;
605 block->unused = SlabBlockGetChunk(slab, block, 1);
606 block->freehead = NULL;
607 block->nunused = slab->chunksPerBlock - 1;
608 }
609
610 /* find the blocklist element for storing blocks with 1 used chunk */
611 blocklist_idx = SlabBlocklistIndex(slab, block->nfree);
612 blocklist = &slab->blocklist[blocklist_idx];
613
614 /* this better be empty. We just added a block thinking it was */
615 Assert(dlist_is_empty(blocklist));
616
617 dlist_push_head(blocklist, &block->node);
618
620
621 return SlabAllocSetupNewChunk(context, block, chunk, size);
622}

References Assert, SlabContext::blocklist, SlabContext::blockSize, SlabContext::chunksPerBlock, SlabContext::curBlocklistIndex, dclist_count(), dclist_pop_head_node(), dlist_container, dlist_is_empty(), dlist_push_head(), SlabContext::emptyblocks, fb(), SlabBlock::freehead, malloc, MemoryContextData::mem_allocated, MemoryContextAllocationFailure(), SlabBlock::nfree, SlabBlock::node, SlabBlock::nunused, SlabBlock::slab, Slab_BLOCKHDRSZ, SlabAllocSetupNewChunk(), SlabBlockGetChunk, SlabBlocklistIndex(), SlabGetNextFreeChunk(), unlikely, SlabBlock::unused, and VALGRIND_MEMPOOL_ALLOC.

Referenced by SlabAlloc().

◆ SlabAllocInvalidSize()

pg_noinline static pg_noreturn void SlabAllocInvalidSize ( MemoryContext  context,
Size  size 
)
static

Definition at line 634 of file slab.c.

635{
636 SlabContext *slab = (SlabContext *) context;
637
638 elog(ERROR, "unexpected alloc chunk size %zu (expected %u)", size,
639 slab->chunkSize);
640}

References SlabContext::chunkSize, elog, and ERROR.

Referenced by SlabAlloc().

◆ SlabAllocSetupNewChunk()

static void * SlabAllocSetupNewChunk ( MemoryContext  context,
SlabBlock block,
MemoryChunk chunk,
Size  size 
)
inlinestatic

Definition at line 523 of file slab.c.

525{
526 SlabContext *slab = (SlabContext *) context;
527
528 /*
529 * Check that the chunk pointer is actually somewhere on the block and is
530 * aligned as expected.
531 */
532 Assert(chunk >= SlabBlockGetChunk(slab, block, 0));
533 Assert(chunk <= SlabBlockGetChunk(slab, block, slab->chunksPerBlock - 1));
534 Assert(SlabChunkMod(slab, block, chunk) == 0);
535
536 /* Prepare to initialize the chunk header. */
538
540
541#ifdef MEMORY_CONTEXT_CHECKING
542 /* slab mark to catch clobber of "unused" space */
546 slab->chunkSize,
547 slab->fullChunkSize -
548 (slab->chunkSize + Slab_CHUNKHDRSZ));
549#endif
550
551#ifdef RANDOMIZE_ALLOCATED_MEMORY
552 /* fill the allocated space with junk */
554#endif
555
556 /* Disallow access to the chunk header. */
558
560}

References Assert, SlabContext::chunkSize, SlabContext::chunksPerBlock, fb(), SlabContext::fullChunkSize, MAXALIGN, MCTX_SLAB_ID, MemoryChunkGetPointer, MemoryChunkSetHdrMask(), Slab_CHUNKHDRSZ, SlabBlockGetChunk, VALGRIND_MAKE_MEM_NOACCESS, and VALGRIND_MAKE_MEM_UNDEFINED.

Referenced by SlabAlloc(), and SlabAllocFromNewBlock().

◆ SlabBlocklistIndex()

static int32 SlabBlocklistIndex ( SlabContext slab,
int  nfree 
)
inlinestatic

Definition at line 211 of file slab.c.

212{
213 int32 index;
214 int32 blocklist_shift = slab->blocklist_shift;
215
216 Assert(nfree >= 0 && nfree <= slab->chunksPerBlock);
217
218 /*
219 * Determine the blocklist index based on the number of free chunks. We
220 * must ensure that 0 free chunks is dedicated to index 0. Everything
221 * else must be >= 1 and < SLAB_BLOCKLIST_COUNT.
222 *
223 * To make this as efficient as possible, we exploit some two's complement
224 * arithmetic where we reverse the sign before bit shifting. This results
225 * in an nfree of 0 using index 0 and anything non-zero staying non-zero.
226 * This is exploiting 0 and -0 being the same in two's complement. When
227 * we're done, we just need to flip the sign back over again for a
228 * positive index.
229 */
230 index = -((-nfree) >> blocklist_shift);
231
232 if (nfree == 0)
233 Assert(index == 0);
234 else
236
237 return index;
238}

References Assert, SlabContext::blocklist_shift, fb(), and SLAB_BLOCKLIST_COUNT.

Referenced by SlabAlloc(), SlabAllocFromNewBlock(), and SlabFree().

◆ SlabContextCreate()

MemoryContext SlabContextCreate ( MemoryContext  parent,
const char name,
Size  blockSize,
Size  chunkSize 
)

Definition at line 322 of file slab.c.

326{
327 int chunksPerBlock;
328 Size fullChunkSize;
329 SlabContext *slab;
330 int i;
331
332 /* ensure MemoryChunk's size is properly maxaligned */
334 "sizeof(MemoryChunk) is not maxaligned");
336
337 /*
338 * Ensure there's enough space to store the pointer to the next free chunk
339 * in the memory of the (otherwise) unused allocation.
340 */
341 if (chunkSize < sizeof(MemoryChunk *))
342 chunkSize = sizeof(MemoryChunk *);
343
344 /* length of the maxaligned chunk including the chunk header */
345#ifdef MEMORY_CONTEXT_CHECKING
346 /* ensure there's always space for the sentinel byte */
347 fullChunkSize = Slab_CHUNKHDRSZ + MAXALIGN(chunkSize + 1);
348#else
349 fullChunkSize = Slab_CHUNKHDRSZ + MAXALIGN(chunkSize);
350#endif
351
352 Assert(fullChunkSize <= MEMORYCHUNK_MAX_VALUE);
353
354 /* compute the number of chunks that will fit on each block */
355 chunksPerBlock = (blockSize - Slab_BLOCKHDRSZ) / fullChunkSize;
356
357 /* Make sure the block can store at least one chunk. */
358 if (chunksPerBlock == 0)
359 elog(ERROR, "block size %zu for slab is too small for %zu-byte chunks",
360 blockSize, chunkSize);
361
362
363
364 slab = (SlabContext *) malloc(Slab_CONTEXT_HDRSZ(chunksPerBlock));
365 if (slab == NULL)
366 {
370 errmsg("out of memory"),
371 errdetail("Failed while creating memory context \"%s\".",
372 name)));
373 }
374
375 /*
376 * Avoid writing code that can fail between here and MemoryContextCreate;
377 * we'd leak the header if we ereport in this stretch.
378 */
379
380 /* See comments about Valgrind interactions in aset.c */
381 VALGRIND_CREATE_MEMPOOL(slab, 0, false);
382 /* This vchunk covers the SlabContext only */
383 VALGRIND_MEMPOOL_ALLOC(slab, slab, sizeof(SlabContext));
384
385 /* Fill in SlabContext-specific header fields */
386 slab->chunkSize = (uint32) chunkSize;
387 slab->fullChunkSize = (uint32) fullChunkSize;
388 slab->blockSize = (uint32) blockSize;
389 slab->chunksPerBlock = chunksPerBlock;
390 slab->curBlocklistIndex = 0;
391
392 /*
393 * Compute a shift that guarantees that shifting chunksPerBlock with it is
394 * < SLAB_BLOCKLIST_COUNT - 1. The reason that we subtract 1 from
395 * SLAB_BLOCKLIST_COUNT in this calculation is that we reserve the 0th
396 * blocklist element for blocks which have no free chunks.
397 *
398 * We calculate the number of bits to shift by rather than a divisor to
399 * divide by as performing division each time we need to find the
400 * blocklist index would be much slower.
401 */
402 slab->blocklist_shift = 0;
403 while ((slab->chunksPerBlock >> slab->blocklist_shift) >= (SLAB_BLOCKLIST_COUNT - 1))
404 slab->blocklist_shift++;
405
406 /* initialize the list to store empty blocks to be reused */
407 dclist_init(&slab->emptyblocks);
408
409 /* initialize each blocklist slot */
410 for (i = 0; i < SLAB_BLOCKLIST_COUNT; i++)
411 dlist_init(&slab->blocklist[i]);
412
413#ifdef MEMORY_CONTEXT_CHECKING
414 /* set the isChunkFree pointer right after the end of the context */
415 slab->isChunkFree = (bool *) ((char *) slab + sizeof(SlabContext));
416#endif
417
418 /* Finally, do the type-independent part of context creation */
422 parent,
423 name);
424
425 return (MemoryContext) slab;
426}

References Assert, SlabContext::blocklist, SlabContext::blocklist_shift, SlabContext::blockSize, SlabContext::chunkSize, SlabContext::chunksPerBlock, SlabContext::curBlocklistIndex, dclist_init(), dlist_init(), elog, SlabContext::emptyblocks, ereport, errcode(), errdetail(), errmsg(), ERROR, fb(), SlabContext::fullChunkSize, i, malloc, MAXALIGN, MCTX_SLAB_ID, MEMORYCHUNK_MAX_BLOCKOFFSET, MEMORYCHUNK_MAX_VALUE, MemoryContextCreate(), MemoryContextStats(), name, Slab_BLOCKHDRSZ, SLAB_BLOCKLIST_COUNT, Slab_CHUNKHDRSZ, Slab_CONTEXT_HDRSZ, StaticAssertDecl, TopMemoryContext, VALGRIND_CREATE_MEMPOOL, and VALGRIND_MEMPOOL_ALLOC.

Referenced by for(), ReorderBufferAllocate(), and test_random().

◆ SlabDelete()

void SlabDelete ( MemoryContext  context)

Definition at line 506 of file slab.c.

507{
508 /* Reset to release all the SlabBlocks */
509 SlabReset(context);
510
511 /* Destroy the vpool -- see notes in aset.c */
513
514 /* And free the context header */
515 free(context);
516}

References free, SlabReset(), and VALGRIND_DESTROY_MEMPOOL.

◆ SlabFindNextBlockListIndex()

static int32 SlabFindNextBlockListIndex ( SlabContext slab)
static

Definition at line 251 of file slab.c.

252{
253 /* start at 1 as blocklist[0] is for full blocks. */
254 for (int i = 1; i < SLAB_BLOCKLIST_COUNT; i++)
255 {
256 /* return the first found non-empty index */
257 if (!dlist_is_empty(&slab->blocklist[i]))
258 return i;
259 }
260
261 /* no blocks with free space */
262 return 0;
263}

References SlabContext::blocklist, dlist_is_empty(), i, and SLAB_BLOCKLIST_COUNT.

Referenced by SlabAlloc(), and SlabFree().

◆ SlabFree()

void SlabFree ( void pointer)

Definition at line 729 of file slab.c.

730{
732 SlabBlock *block;
733 SlabContext *slab;
734 int curBlocklistIdx;
735 int newBlocklistIdx;
736
737 /* Allow access to the chunk header. */
739
740 block = MemoryChunkGetBlock(chunk);
741
742 /*
743 * For speed reasons we just Assert that the referenced block is good.
744 * Future field experience may show that this Assert had better become a
745 * regular runtime test-and-elog check.
746 */
747 Assert(SlabBlockIsValid(block));
748 slab = block->slab;
749
750#ifdef MEMORY_CONTEXT_CHECKING
751 /* Test for someone scribbling on unused space in chunk */
753 if (!sentinel_ok(pointer, slab->chunkSize))
754 elog(WARNING, "detected write past chunk end in %s %p",
755 slab->header.name, chunk);
756#endif
757
758 /* push this chunk onto the head of the block's free list */
759 *(MemoryChunk **) pointer = block->freehead;
760 block->freehead = chunk;
761
762 block->nfree++;
763
764 Assert(block->nfree > 0);
765 Assert(block->nfree <= slab->chunksPerBlock);
766
767#ifdef CLOBBER_FREED_MEMORY
768 /* don't wipe the free list MemoryChunk pointer stored in the chunk */
769 wipe_mem((char *) pointer + sizeof(MemoryChunk *),
770 slab->chunkSize - sizeof(MemoryChunk *));
771#endif
772
773 curBlocklistIdx = SlabBlocklistIndex(slab, block->nfree - 1);
775
776 /*
777 * Check if the block needs to be moved to another element on the
778 * blocklist based on it now having 1 more free chunk.
779 */
781 {
782 /* do the move */
785
786 /*
787 * The blocklist[curBlocklistIdx] may now be empty or we may now be
788 * able to use a lower-element blocklist. We'll need to redetermine
789 * what the slab->curBlocklistIndex is if the current blocklist was
790 * changed or if a lower element one was changed. We must ensure we
791 * use the list with the fullest block(s).
792 */
794 {
796
797 /*
798 * We know there must be a block with at least 1 unused chunk as
799 * we just pfree'd one. Ensure curBlocklistIndex reflects this.
800 */
801 Assert(slab->curBlocklistIndex > 0);
802 }
803 }
804
805 /* Handle when a block becomes completely empty */
806 if (unlikely(block->nfree == slab->chunksPerBlock))
807 {
808 /* remove the block */
810
811 /*
812 * To avoid thrashing malloc/free, we keep a list of empty blocks that
813 * we can reuse again instead of having to malloc a new one.
814 */
816 dclist_push_head(&slab->emptyblocks, &block->node);
817 else
818 {
819 /*
820 * When we have enough empty blocks stored already, we actually
821 * free the block.
822 */
823#ifdef CLOBBER_FREED_MEMORY
824 wipe_mem(block, slab->blockSize);
825#endif
826
827 /* As in aset.c, free block-header vchunks explicitly */
828 VALGRIND_MEMPOOL_FREE(slab, block);
829
830 free(block);
831 slab->header.mem_allocated -= slab->blockSize;
832 }
833
834 /*
835 * Check if we need to reset the blocklist index. This is required
836 * when the blocklist this block is on has become completely empty.
837 */
838 if (slab->curBlocklistIndex == newBlocklistIdx &&
841 }
842}

References Assert, SlabContext::blocklist, SlabContext::blockSize, SlabContext::chunkSize, SlabContext::chunksPerBlock, SlabContext::curBlocklistIndex, dclist_count(), dclist_push_head(), dlist_delete_from(), dlist_is_empty(), dlist_push_head(), elog, SlabContext::emptyblocks, fb(), free, SlabBlock::freehead, SlabContext::fullChunkSize, SlabContext::header, MemoryContextData::mem_allocated, MemoryChunkGetBlock(), MemoryContextData::name, SlabBlock::nfree, SlabBlock::node, PointerGetMemoryChunk, SlabBlock::slab, Slab_CHUNKHDRSZ, SLAB_MAXIMUM_EMPTY_BLOCKS, SlabBlockIsValid, SlabBlocklistIndex(), SlabFindNextBlockListIndex(), unlikely, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MEMPOOL_FREE, and WARNING.

◆ SlabGetChunkContext()

MemoryContext SlabGetChunkContext ( void pointer)

Definition at line 895 of file slab.c.

896{
898 SlabBlock *block;
899
900 /* Allow access to the chunk header. */
902
903 block = MemoryChunkGetBlock(chunk);
904
905 /* Disallow access to the chunk header. */
907
908 Assert(SlabBlockIsValid(block));
909
910 return &block->slab->header;
911}

References Assert, fb(), SlabContext::header, MemoryChunkGetBlock(), PointerGetMemoryChunk, SlabBlock::slab, Slab_CHUNKHDRSZ, SlabBlockIsValid, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ SlabGetChunkSpace()

Size SlabGetChunkSpace ( void pointer)

Definition at line 919 of file slab.c.

920{
922 SlabBlock *block;
923 SlabContext *slab;
924
925 /* Allow access to the chunk header. */
927
928 block = MemoryChunkGetBlock(chunk);
929
930 /* Disallow access to the chunk header. */
932
933 Assert(SlabBlockIsValid(block));
934 slab = block->slab;
935
936 return slab->fullChunkSize;
937}

References Assert, fb(), SlabContext::fullChunkSize, MemoryChunkGetBlock(), PointerGetMemoryChunk, SlabBlock::slab, Slab_CHUNKHDRSZ, SlabBlockIsValid, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ SlabGetNextFreeChunk()

static MemoryChunk * SlabGetNextFreeChunk ( SlabContext slab,
SlabBlock block 
)
inlinestatic

Definition at line 271 of file slab.c.

272{
274
275 Assert(block->nfree > 0);
276
277 if (block->freehead != NULL)
278 {
279 chunk = block->freehead;
280
281 /*
282 * Pop the chunk from the linked list of free chunks. The pointer to
283 * the next free chunk is stored in the chunk itself.
284 */
287
288 /* check nothing stomped on the free chunk's memory */
289 Assert(block->freehead == NULL ||
290 (block->freehead >= SlabBlockGetChunk(slab, block, 0) &&
291 block->freehead <= SlabBlockGetChunk(slab, block, slab->chunksPerBlock - 1) &&
292 SlabChunkMod(slab, block, block->freehead) == 0));
293 }
294 else
295 {
296 Assert(block->nunused > 0);
297
298 chunk = block->unused;
299 block->unused = (MemoryChunk *) (((char *) block->unused) + slab->fullChunkSize);
300 block->nunused--;
301 }
302
303 block->nfree--;
304
305 return chunk;
306}

References Assert, SlabContext::chunksPerBlock, fb(), SlabBlock::freehead, SlabContext::fullChunkSize, SlabBlock::nfree, SlabBlock::nunused, SlabBlockGetChunk, SlabChunkGetPointer, SlabBlock::unused, and VALGRIND_MAKE_MEM_DEFINED.

Referenced by SlabAlloc(), and SlabAllocFromNewBlock().

◆ SlabIsEmpty()

bool SlabIsEmpty ( MemoryContext  context)

Definition at line 944 of file slab.c.

945{
946 Assert(SlabIsValid((SlabContext *) context));
947
948 return (context->mem_allocated == 0);
949}

References Assert, MemoryContextData::mem_allocated, and SlabIsValid.

◆ SlabRealloc()

void * SlabRealloc ( void pointer,
Size  size,
int  flags 
)

Definition at line 858 of file slab.c.

859{
861 SlabBlock *block;
862 SlabContext *slab;
863
864 /* Allow access to the chunk header. */
866
867 block = MemoryChunkGetBlock(chunk);
868
869 /* Disallow access to the chunk header. */
871
872 /*
873 * Try to verify that we have a sane block pointer: the block header
874 * should reference a slab context. (We use a test-and-elog, not just
875 * Assert, because it seems highly likely that we're here in error in the
876 * first place.)
877 */
878 if (!SlabBlockIsValid(block))
879 elog(ERROR, "could not find block containing chunk %p", chunk);
880 slab = block->slab;
881
882 /* can't do actual realloc with slab, but let's try to be gentle */
883 if (size == slab->chunkSize)
884 return pointer;
885
886 elog(ERROR, "slab allocator does not support realloc()");
887 return NULL; /* keep compiler quiet */
888}

References SlabContext::chunkSize, elog, ERROR, fb(), MemoryChunkGetBlock(), PointerGetMemoryChunk, SlabBlock::slab, Slab_CHUNKHDRSZ, SlabBlockIsValid, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ SlabReset()

void SlabReset ( MemoryContext  context)

Definition at line 436 of file slab.c.

437{
438 SlabContext *slab = (SlabContext *) context;
440 int i;
441
442 Assert(SlabIsValid(slab));
443
444#ifdef MEMORY_CONTEXT_CHECKING
445 /* Check for corruption and leaks before freeing */
446 SlabCheck(context);
447#endif
448
449 /* release any retained empty blocks */
451 {
452 SlabBlock *block = dlist_container(SlabBlock, node, miter.cur);
453
455
456#ifdef CLOBBER_FREED_MEMORY
457 wipe_mem(block, slab->blockSize);
458#endif
459
460 /* As in aset.c, free block-header vchunks explicitly */
461 VALGRIND_MEMPOOL_FREE(slab, block);
462
463 free(block);
464 context->mem_allocated -= slab->blockSize;
465 }
466
467 /* walk over blocklist and free the blocks */
468 for (i = 0; i < SLAB_BLOCKLIST_COUNT; i++)
469 {
471 {
472 SlabBlock *block = dlist_container(SlabBlock, node, miter.cur);
473
474 dlist_delete(miter.cur);
475
476#ifdef CLOBBER_FREED_MEMORY
477 wipe_mem(block, slab->blockSize);
478#endif
479
480 /* As in aset.c, free block-header vchunks explicitly */
481 VALGRIND_MEMPOOL_FREE(slab, block);
482
483 free(block);
484 context->mem_allocated -= slab->blockSize;
485 }
486 }
487
488 /*
489 * Instruct Valgrind to throw away all the vchunks associated with this
490 * context, except for the one covering the SlabContext. This gets rid of
491 * the vchunks for whatever user data is getting discarded by the context
492 * reset.
493 */
494 VALGRIND_MEMPOOL_TRIM(slab, slab, sizeof(SlabContext));
495
496 slab->curBlocklistIndex = 0;
497
498 Assert(context->mem_allocated == 0);
499}

References Assert, SlabContext::blocklist, SlabContext::blockSize, SlabContext::curBlocklistIndex, dclist_delete_from(), dclist_foreach_modify, dlist_container, dlist_delete(), dlist_foreach_modify, SlabContext::emptyblocks, fb(), free, i, MemoryContextData::mem_allocated, SLAB_BLOCKLIST_COUNT, SlabIsValid, VALGRIND_MEMPOOL_FREE, and VALGRIND_MEMPOOL_TRIM.

Referenced by SlabDelete().

◆ SlabStats()

void SlabStats ( MemoryContext  context,
MemoryStatsPrintFunc  printfunc,
void passthru,
MemoryContextCounters totals,
bool  print_to_stderr 
)

Definition at line 961 of file slab.c.

965{
966 SlabContext *slab = (SlabContext *) context;
967 Size nblocks = 0;
968 Size freechunks = 0;
969 Size totalspace;
970 Size freespace = 0;
971 int i;
972
973 Assert(SlabIsValid(slab));
974
975 /* Include context header in totalspace */
976 totalspace = Slab_CONTEXT_HDRSZ(slab->chunksPerBlock);
977
978 /* Add the space consumed by blocks in the emptyblocks list */
979 totalspace += dclist_count(&slab->emptyblocks) * slab->blockSize;
980
981 for (i = 0; i < SLAB_BLOCKLIST_COUNT; i++)
982 {
983 dlist_iter iter;
984
985 dlist_foreach(iter, &slab->blocklist[i])
986 {
987 SlabBlock *block = dlist_container(SlabBlock, node, iter.cur);
988
989 nblocks++;
990 totalspace += slab->blockSize;
991 freespace += slab->fullChunkSize * block->nfree;
992 freechunks += block->nfree;
993 }
994 }
995
996 if (printfunc)
997 {
998 char stats_string[200];
999
1000 /* XXX should we include free chunks on empty blocks? */
1002 "%zu total in %zu blocks; %u empty blocks; %zu free (%zu chunks); %zu used",
1003 totalspace, nblocks, dclist_count(&slab->emptyblocks),
1004 freespace, freechunks, totalspace - freespace);
1006 }
1007
1008 if (totals)
1009 {
1010 totals->nblocks += nblocks;
1011 totals->freechunks += freechunks;
1012 totals->totalspace += totalspace;
1013 totals->freespace += freespace;
1014 }
1015}

References Assert, SlabContext::blocklist, SlabContext::blockSize, SlabContext::chunksPerBlock, dlist_iter::cur, dclist_count(), dlist_container, dlist_foreach, SlabContext::emptyblocks, fb(), SlabContext::fullChunkSize, i, SlabBlock::nfree, SLAB_BLOCKLIST_COUNT, Slab_CONTEXT_HDRSZ, SlabIsValid, and snprintf.