PostgreSQL Source Code  git master
memutils_internal.h File Reference
#include "utils/memutils.h"
Include dependency graph for memutils_internal.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Macros

#define PallocAlignedExtraBytes(alignto)    ((alignto) + (sizeof(MemoryChunk) - MAXIMUM_ALIGNOF))
 
#define MEMORY_CONTEXT_METHODID_BITS   3
 
#define MEMORY_CONTEXT_METHODID_MASK    ((((uint64) 1) << MEMORY_CONTEXT_METHODID_BITS) - 1)
 

Typedefs

typedef enum MemoryContextMethodID MemoryContextMethodID
 

Enumerations

enum  MemoryContextMethodID {
  MCTX_UNUSED1_ID , MCTX_UNUSED2_ID , MCTX_UNUSED3_ID , MCTX_ASET_ID ,
  MCTX_GENERATION_ID , MCTX_SLAB_ID , MCTX_ALIGNED_REDIRECT_ID , MCTX_UNUSED4_ID
}
 

Functions

void * AllocSetAlloc (MemoryContext context, Size size)
 
void AllocSetFree (void *pointer)
 
void * AllocSetRealloc (void *pointer, Size size)
 
void AllocSetReset (MemoryContext context)
 
void AllocSetDelete (MemoryContext context)
 
MemoryContext AllocSetGetChunkContext (void *pointer)
 
Size AllocSetGetChunkSpace (void *pointer)
 
bool AllocSetIsEmpty (MemoryContext context)
 
void AllocSetStats (MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
 
void * GenerationAlloc (MemoryContext context, Size size)
 
void GenerationFree (void *pointer)
 
void * GenerationRealloc (void *pointer, Size size)
 
void GenerationReset (MemoryContext context)
 
void GenerationDelete (MemoryContext context)
 
MemoryContext GenerationGetChunkContext (void *pointer)
 
Size GenerationGetChunkSpace (void *pointer)
 
bool GenerationIsEmpty (MemoryContext context)
 
void GenerationStats (MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
 
void * SlabAlloc (MemoryContext context, Size size)
 
void SlabFree (void *pointer)
 
void * SlabRealloc (void *pointer, Size size)
 
void SlabReset (MemoryContext context)
 
void SlabDelete (MemoryContext context)
 
MemoryContext SlabGetChunkContext (void *pointer)
 
Size SlabGetChunkSpace (void *pointer)
 
bool SlabIsEmpty (MemoryContext context)
 
void SlabStats (MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
 
void AlignedAllocFree (void *pointer)
 
void * AlignedAllocRealloc (void *pointer, Size size)
 
MemoryContext AlignedAllocGetChunkContext (void *pointer)
 
Size AlignedAllocGetChunkSpace (void *pointer)
 
void MemoryContextCreate (MemoryContext node, NodeTag tag, MemoryContextMethodID method_id, MemoryContext parent, const char *name)
 

Macro Definition Documentation

◆ MEMORY_CONTEXT_METHODID_BITS

#define MEMORY_CONTEXT_METHODID_BITS   3

Definition at line 121 of file memutils_internal.h.

◆ MEMORY_CONTEXT_METHODID_MASK

#define MEMORY_CONTEXT_METHODID_MASK    ((((uint64) 1) << MEMORY_CONTEXT_METHODID_BITS) - 1)

Definition at line 122 of file memutils_internal.h.

◆ PallocAlignedExtraBytes

#define PallocAlignedExtraBytes (   alignto)     ((alignto) + (sizeof(MemoryChunk) - MAXIMUM_ALIGNOF))

Definition at line 88 of file memutils_internal.h.

Typedef Documentation

◆ MemoryContextMethodID

Enumeration Type Documentation

◆ MemoryContextMethodID

Enumerator
MCTX_UNUSED1_ID 
MCTX_UNUSED2_ID 
MCTX_UNUSED3_ID 
MCTX_ASET_ID 
MCTX_GENERATION_ID 
MCTX_SLAB_ID 
MCTX_ALIGNED_REDIRECT_ID 
MCTX_UNUSED4_ID 

Definition at line 105 of file memutils_internal.h.

106 {
107  MCTX_UNUSED1_ID, /* 000 occurs in never-used memory */
108  MCTX_UNUSED2_ID, /* glibc malloc'd chunks usually match 001 */
109  MCTX_UNUSED3_ID, /* glibc malloc'd chunks > 128kB match 010 */
110  MCTX_ASET_ID,
112  MCTX_SLAB_ID,
114  MCTX_UNUSED4_ID, /* 111 occurs in wipe_mem'd memory */
MemoryContextMethodID
@ MCTX_GENERATION_ID
@ MCTX_UNUSED4_ID
@ MCTX_UNUSED3_ID
@ MCTX_UNUSED1_ID
@ MCTX_UNUSED2_ID
@ MCTX_SLAB_ID
@ MCTX_ASET_ID
@ MCTX_ALIGNED_REDIRECT_ID

Function Documentation

◆ AlignedAllocFree()

void AlignedAllocFree ( void *  pointer)

Definition at line 29 of file alignedalloc.c.

30 {
31  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
32  void *unaligned;
33 
34  VALGRIND_MAKE_MEM_DEFINED(chunk, sizeof(MemoryChunk));
35 
37 
38  /* obtain the original (unaligned) allocated pointer */
39  unaligned = MemoryChunkGetBlock(chunk);
40 
41 #ifdef MEMORY_CONTEXT_CHECKING
42  /* Test for someone scribbling on unused space in chunk */
43  if (!sentinel_ok(pointer, chunk->requested_size))
44  elog(WARNING, "detected write past chunk end in %s %p",
45  GetMemoryChunkContext(unaligned)->name, chunk);
46 #endif
47 
48  pfree(unaligned);
49 }
#define WARNING
Definition: elog.h:36
Assert(fmt[strlen(fmt) - 1] !='\n')
void pfree(void *pointer)
Definition: mcxt.c:1456
MemoryContext GetMemoryChunkContext(void *pointer)
Definition: mcxt.c:616
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
static bool MemoryChunkIsExternal(MemoryChunk *chunk)
static void * MemoryChunkGetBlock(MemoryChunk *chunk)
#define PointerGetMemoryChunk(p)
const char * name

References Assert(), elog(), GetMemoryChunkContext(), MemoryChunkGetBlock(), MemoryChunkIsExternal(), name, pfree(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and WARNING.

◆ AlignedAllocGetChunkContext()

MemoryContext AlignedAllocGetChunkContext ( void *  pointer)

Definition at line 118 of file alignedalloc.c.

119 {
120  MemoryChunk *redirchunk = PointerGetMemoryChunk(pointer);
121  MemoryContext cxt;
122 
123  VALGRIND_MAKE_MEM_DEFINED(redirchunk, sizeof(MemoryChunk));
124 
125  Assert(!MemoryChunkIsExternal(redirchunk));
126 
127  cxt = GetMemoryChunkContext(MemoryChunkGetBlock(redirchunk));
128 
129  VALGRIND_MAKE_MEM_NOACCESS(redirchunk, sizeof(MemoryChunk));
130 
131  return cxt;
132 }
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition: memdebug.h:27

References Assert(), GetMemoryChunkContext(), MemoryChunkGetBlock(), MemoryChunkIsExternal(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ AlignedAllocGetChunkSpace()

Size AlignedAllocGetChunkSpace ( void *  pointer)

Definition at line 140 of file alignedalloc.c.

141 {
142  MemoryChunk *redirchunk = PointerGetMemoryChunk(pointer);
143  void *unaligned;
144  Size space;
145 
146  VALGRIND_MAKE_MEM_DEFINED(redirchunk, sizeof(MemoryChunk));
147 
148  unaligned = MemoryChunkGetBlock(redirchunk);
149  space = GetMemoryChunkSpace(unaligned);
150 
151  VALGRIND_MAKE_MEM_NOACCESS(redirchunk, sizeof(MemoryChunk));
152 
153  return space;
154 }
size_t Size
Definition: c.h:594
Size GetMemoryChunkSpace(void *pointer)
Definition: mcxt.c:630

References GetMemoryChunkSpace(), MemoryChunkGetBlock(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ AlignedAllocRealloc()

void* AlignedAllocRealloc ( void *  pointer,
Size  size 
)

Definition at line 60 of file alignedalloc.c.

61 {
62  MemoryChunk *redirchunk = PointerGetMemoryChunk(pointer);
63  Size alignto;
64  void *unaligned;
65  MemoryContext ctx;
66  Size old_size;
67  void *newptr;
68 
69  VALGRIND_MAKE_MEM_DEFINED(redirchunk, sizeof(MemoryChunk));
70 
71  alignto = MemoryChunkGetValue(redirchunk);
72  unaligned = MemoryChunkGetBlock(redirchunk);
73 
74  /* sanity check this is a power of 2 value */
75  Assert((alignto & (alignto - 1)) == 0);
76 
77  /*
78  * Determine the size of the original allocation. We can't determine this
79  * exactly as GetMemoryChunkSpace() returns the total space used for the
80  * allocation, which for contexts like aset includes rounding up to the
81  * next power of 2. However, this value is just used to memcpy() the old
82  * data into the new allocation, so we only need to concern ourselves with
83  * not reading beyond the end of the original allocation's memory. The
84  * drawback here is that we may copy more bytes than we need to, which
85  * only amounts to wasted effort. We can safely subtract the extra bytes
86  * that we requested to allow us to align the pointer. We must also
87  * subtract the space for the unaligned pointer's MemoryChunk since
88  * GetMemoryChunkSpace should have included that. This does assume that
89  * all context types use MemoryChunk as a chunk header.
90  */
91  old_size = GetMemoryChunkSpace(unaligned) -
92  PallocAlignedExtraBytes(alignto) - sizeof(MemoryChunk);
93 
94 #ifdef MEMORY_CONTEXT_CHECKING
95  /* check that GetMemoryChunkSpace returned something realistic */
96  Assert(old_size >= redirchunk->requested_size);
97 #endif
98 
99  ctx = GetMemoryChunkContext(unaligned);
100  newptr = MemoryContextAllocAligned(ctx, size, alignto, 0);
101 
102  /*
103  * We may memcpy beyond the end of the original allocation request size,
104  * so we must mark the entire allocation as defined.
105  */
106  VALGRIND_MAKE_MEM_DEFINED(pointer, old_size);
107  memcpy(newptr, pointer, Min(size, old_size));
108  pfree(unaligned);
109 
110  return newptr;
111 }
#define Min(x, y)
Definition: c.h:993
void * MemoryContextAllocAligned(MemoryContext context, Size size, Size alignto, int flags)
Definition: mcxt.c:1344
#define PallocAlignedExtraBytes(alignto)
static Size MemoryChunkGetValue(MemoryChunk *chunk)
struct MemoryChunk MemoryChunk

References Assert(), GetMemoryChunkContext(), GetMemoryChunkSpace(), MemoryChunkGetBlock(), MemoryChunkGetValue(), MemoryContextAllocAligned(), Min, PallocAlignedExtraBytes, pfree(), PointerGetMemoryChunk, and VALGRIND_MAKE_MEM_DEFINED.

◆ AllocSetAlloc()

void* AllocSetAlloc ( MemoryContext  context,
Size  size 
)

Definition at line 703 of file aset.c.

704 {
705  AllocSet set = (AllocSet) context;
706  AllocBlock block;
707  MemoryChunk *chunk;
708  int fidx;
709  Size chunk_size;
710  Size blksize;
711 
712  Assert(AllocSetIsValid(set));
713 
714  /*
715  * If requested size exceeds maximum for chunks, allocate an entire block
716  * for this request.
717  */
718  if (size > set->allocChunkLimit)
719  {
720 #ifdef MEMORY_CONTEXT_CHECKING
721  /* ensure there's always space for the sentinel byte */
722  chunk_size = MAXALIGN(size + 1);
723 #else
724  chunk_size = MAXALIGN(size);
725 #endif
726 
727  blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
728  block = (AllocBlock) malloc(blksize);
729  if (block == NULL)
730  return NULL;
731 
732  context->mem_allocated += blksize;
733 
734  block->aset = set;
735  block->freeptr = block->endptr = ((char *) block) + blksize;
736 
737  chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
738 
739  /* mark the MemoryChunk as externally managed */
741 
742 #ifdef MEMORY_CONTEXT_CHECKING
743  chunk->requested_size = size;
744  /* set mark to catch clobber of "unused" space */
745  Assert(size < chunk_size);
746  set_sentinel(MemoryChunkGetPointer(chunk), size);
747 #endif
748 #ifdef RANDOMIZE_ALLOCATED_MEMORY
749  /* fill the allocated space with junk */
750  randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
751 #endif
752 
753  /*
754  * Stick the new block underneath the active allocation block, if any,
755  * so that we don't lose the use of the space remaining therein.
756  */
757  if (set->blocks != NULL)
758  {
759  block->prev = set->blocks;
760  block->next = set->blocks->next;
761  if (block->next)
762  block->next->prev = block;
763  set->blocks->next = block;
764  }
765  else
766  {
767  block->prev = NULL;
768  block->next = NULL;
769  set->blocks = block;
770  }
771 
772  /* Ensure any padding bytes are marked NOACCESS. */
773  VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
774  chunk_size - size);
775 
776  /* Disallow access to the chunk header. */
778 
779  return MemoryChunkGetPointer(chunk);
780  }
781 
782  /*
783  * Request is small enough to be treated as a chunk. Look in the
784  * corresponding free list to see if there is a free chunk we could reuse.
785  * If one is found, remove it from the free list, make it again a member
786  * of the alloc set and return its data address.
787  *
788  * Note that we don't attempt to ensure there's space for the sentinel
789  * byte here. We expect a large proportion of allocations to be for sizes
790  * which are already a power of 2. If we were to always make space for a
791  * sentinel byte in MEMORY_CONTEXT_CHECKING builds, then we'd end up
792  * doubling the memory requirements for such allocations.
793  */
794  fidx = AllocSetFreeIndex(size);
795  chunk = set->freelist[fidx];
796  if (chunk != NULL)
797  {
799 
800  /* Allow access to the chunk header. */
802 
803  Assert(fidx == MemoryChunkGetValue(chunk));
804 
805  /* pop this chunk off the freelist */
807  set->freelist[fidx] = link->next;
809 
810 #ifdef MEMORY_CONTEXT_CHECKING
811  chunk->requested_size = size;
812  /* set mark to catch clobber of "unused" space */
813  if (size < GetChunkSizeFromFreeListIdx(fidx))
814  set_sentinel(MemoryChunkGetPointer(chunk), size);
815 #endif
816 #ifdef RANDOMIZE_ALLOCATED_MEMORY
817  /* fill the allocated space with junk */
818  randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
819 #endif
820 
821  /* Ensure any padding bytes are marked NOACCESS. */
822  VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
823  GetChunkSizeFromFreeListIdx(fidx) - size);
824 
825  /* Disallow access to the chunk header. */
827 
828  return MemoryChunkGetPointer(chunk);
829  }
830 
831  /*
832  * Choose the actual chunk size to allocate.
833  */
834  chunk_size = GetChunkSizeFromFreeListIdx(fidx);
835  Assert(chunk_size >= size);
836 
837  /*
838  * If there is enough room in the active allocation block, we will put the
839  * chunk into that block. Else must start a new one.
840  */
841  if ((block = set->blocks) != NULL)
842  {
843  Size availspace = block->endptr - block->freeptr;
844 
845  if (availspace < (chunk_size + ALLOC_CHUNKHDRSZ))
846  {
847  /*
848  * The existing active (top) block does not have enough room for
849  * the requested allocation, but it might still have a useful
850  * amount of space in it. Once we push it down in the block list,
851  * we'll never try to allocate more space from it. So, before we
852  * do that, carve up its free space into chunks that we can put on
853  * the set's freelists.
854  *
855  * Because we can only get here when there's less than
856  * ALLOC_CHUNK_LIMIT left in the block, this loop cannot iterate
857  * more than ALLOCSET_NUM_FREELISTS-1 times.
858  */
859  while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
860  {
862  Size availchunk = availspace - ALLOC_CHUNKHDRSZ;
863  int a_fidx = AllocSetFreeIndex(availchunk);
864 
865  /*
866  * In most cases, we'll get back the index of the next larger
867  * freelist than the one we need to put this chunk on. The
868  * exception is when availchunk is exactly a power of 2.
869  */
870  if (availchunk != GetChunkSizeFromFreeListIdx(a_fidx))
871  {
872  a_fidx--;
873  Assert(a_fidx >= 0);
874  availchunk = GetChunkSizeFromFreeListIdx(a_fidx);
875  }
876 
877  chunk = (MemoryChunk *) (block->freeptr);
878 
879  /* Prepare to initialize the chunk header. */
881  block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
882  availspace -= (availchunk + ALLOC_CHUNKHDRSZ);
883 
884  /* store the freelist index in the value field */
885  MemoryChunkSetHdrMask(chunk, block, a_fidx, MCTX_ASET_ID);
886 #ifdef MEMORY_CONTEXT_CHECKING
887  chunk->requested_size = InvalidAllocSize; /* mark it free */
888 #endif
889  /* push this chunk onto the free list */
890  link = GetFreeListLink(chunk);
891 
893  link->next = set->freelist[a_fidx];
895 
896  set->freelist[a_fidx] = chunk;
897  }
898  /* Mark that we need to create a new block */
899  block = NULL;
900  }
901  }
902 
903  /*
904  * Time to create a new regular (multi-chunk) block?
905  */
906  if (block == NULL)
907  {
908  Size required_size;
909 
910  /*
911  * The first such block has size initBlockSize, and we double the
912  * space in each succeeding block, but not more than maxBlockSize.
913  */
914  blksize = set->nextBlockSize;
915  set->nextBlockSize <<= 1;
916  if (set->nextBlockSize > set->maxBlockSize)
917  set->nextBlockSize = set->maxBlockSize;
918 
919  /*
920  * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
921  * space... but try to keep it a power of 2.
922  */
923  required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
924  while (blksize < required_size)
925  blksize <<= 1;
926 
927  /* Try to allocate it */
928  block = (AllocBlock) malloc(blksize);
929 
930  /*
931  * We could be asking for pretty big blocks here, so cope if malloc
932  * fails. But give up if there's less than 1 MB or so available...
933  */
934  while (block == NULL && blksize > 1024 * 1024)
935  {
936  blksize >>= 1;
937  if (blksize < required_size)
938  break;
939  block = (AllocBlock) malloc(blksize);
940  }
941 
942  if (block == NULL)
943  return NULL;
944 
945  context->mem_allocated += blksize;
946 
947  block->aset = set;
948  block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
949  block->endptr = ((char *) block) + blksize;
950 
951  /* Mark unallocated space NOACCESS. */
953  blksize - ALLOC_BLOCKHDRSZ);
954 
955  block->prev = NULL;
956  block->next = set->blocks;
957  if (block->next)
958  block->next->prev = block;
959  set->blocks = block;
960  }
961 
962  /*
963  * OK, do the allocation
964  */
965  chunk = (MemoryChunk *) (block->freeptr);
966 
967  /* Prepare to initialize the chunk header. */
969 
970  block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
971  Assert(block->freeptr <= block->endptr);
972 
973  /* store the free list index in the value field */
974  MemoryChunkSetHdrMask(chunk, block, fidx, MCTX_ASET_ID);
975 
976 #ifdef MEMORY_CONTEXT_CHECKING
977  chunk->requested_size = size;
978  /* set mark to catch clobber of "unused" space */
979  if (size < chunk_size)
980  set_sentinel(MemoryChunkGetPointer(chunk), size);
981 #endif
982 #ifdef RANDOMIZE_ALLOCATED_MEMORY
983  /* fill the allocated space with junk */
984  randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
985 #endif
986 
987  /* Ensure any padding bytes are marked NOACCESS. */
988  VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
989  chunk_size - size);
990 
991  /* Disallow access to the chunk header. */
993 
994  return MemoryChunkGetPointer(chunk);
995 }
#define AllocSetIsValid(set)
Definition: aset.c:200
#define GetFreeListLink(chkptr)
Definition: aset.c:132
#define ALLOC_CHUNKHDRSZ
Definition: aset.c:105
#define GetChunkSizeFromFreeListIdx(fidx)
Definition: aset.c:140
#define ALLOC_MINBITS
Definition: aset.c:83
struct AllocBlockData * AllocBlock
Definition: aset.c:107
static int AllocSetFreeIndex(Size size)
Definition: aset.c:277
#define ALLOC_BLOCKHDRSZ
Definition: aset.c:104
AllocSetContext * AllocSet
Definition: aset.c:167
#define MAXALIGN(LEN)
Definition: c.h:800
#define malloc(a)
Definition: header.h:50
#define VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
Definition: memdebug.h:28
#define InvalidAllocSize
Definition: memutils.h:47
#define MemoryChunkGetPointer(c)
static void MemoryChunkSetHdrMaskExternal(MemoryChunk *chunk, MemoryContextMethodID methodid)
static void MemoryChunkSetHdrMask(MemoryChunk *chunk, void *block, Size value, MemoryContextMethodID methodid)
AllocBlock prev
Definition: aset.c:184
AllocSet aset
Definition: aset.c:183
char * freeptr
Definition: aset.c:186
AllocBlock next
Definition: aset.c:185
char * endptr
Definition: aset.c:187
uint32 maxBlockSize
Definition: aset.c:160
uint32 allocChunkLimit
Definition: aset.c:162
AllocBlock blocks
Definition: aset.c:156
uint32 nextBlockSize
Definition: aset.c:161
MemoryChunk * freelist[ALLOCSET_NUM_FREELISTS]
Definition: aset.c:157
Size mem_allocated
Definition: memnodes.h:87

References ALLOC_BLOCKHDRSZ, ALLOC_CHUNKHDRSZ, ALLOC_MINBITS, AllocSetContext::allocChunkLimit, AllocSetFreeIndex(), AllocSetIsValid, AllocBlockData::aset, Assert(), AllocSetContext::blocks, AllocBlockData::endptr, AllocSetContext::freelist, AllocBlockData::freeptr, GetChunkSizeFromFreeListIdx, GetFreeListLink, InvalidAllocSize, link(), malloc, MAXALIGN, AllocSetContext::maxBlockSize, MCTX_ASET_ID, MemoryContextData::mem_allocated, MemoryChunkGetPointer, MemoryChunkGetValue(), MemoryChunkSetHdrMask(), MemoryChunkSetHdrMaskExternal(), AllocBlockData::next, AllocSetContext::nextBlockSize, AllocBlockData::prev, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MAKE_MEM_NOACCESS, and VALGRIND_MAKE_MEM_UNDEFINED.

Referenced by AllocSetRealloc().

◆ AllocSetDelete()

void AllocSetDelete ( MemoryContext  context)

Definition at line 607 of file aset.c.

608 {
609  AllocSet set = (AllocSet) context;
610  AllocBlock block = set->blocks;
611  Size keepersize PG_USED_FOR_ASSERTS_ONLY;
612 
613  Assert(AllocSetIsValid(set));
614 
615 #ifdef MEMORY_CONTEXT_CHECKING
616  /* Check for corruption and leaks before freeing */
617  AllocSetCheck(context);
618 #endif
619 
620  /* Remember keeper block size for Assert below */
621  keepersize = KeeperBlock(set)->endptr - ((char *) set);
622 
623  /*
624  * If the context is a candidate for a freelist, put it into that freelist
625  * instead of destroying it.
626  */
627  if (set->freeListIndex >= 0)
628  {
630 
631  /*
632  * Reset the context, if it needs it, so that we aren't hanging on to
633  * more than the initial malloc chunk.
634  */
635  if (!context->isReset)
636  MemoryContextResetOnly(context);
637 
638  /*
639  * If the freelist is full, just discard what's already in it. See
640  * comments with context_freelists[].
641  */
642  if (freelist->num_free >= MAX_FREE_CONTEXTS)
643  {
644  while (freelist->first_free != NULL)
645  {
646  AllocSetContext *oldset = freelist->first_free;
647 
648  freelist->first_free = (AllocSetContext *) oldset->header.nextchild;
649  freelist->num_free--;
650 
651  /* All that remains is to free the header/initial block */
652  free(oldset);
653  }
654  Assert(freelist->num_free == 0);
655  }
656 
657  /* Now add the just-deleted context to the freelist. */
658  set->header.nextchild = (MemoryContext) freelist->first_free;
659  freelist->first_free = set;
660  freelist->num_free++;
661 
662  return;
663  }
664 
665  /* Free all blocks, except the keeper which is part of context header */
666  while (block != NULL)
667  {
668  AllocBlock next = block->next;
669 
670  if (!IsKeeperBlock(set, block))
671  context->mem_allocated -= block->endptr - ((char *) block);
672 
673 #ifdef CLOBBER_FREED_MEMORY
674  wipe_mem(block, block->freeptr - ((char *) block));
675 #endif
676 
677  if (!IsKeeperBlock(set, block))
678  free(block);
679 
680  block = next;
681  }
682 
683  Assert(context->mem_allocated == keepersize);
684 
685  /* Finally, free the context header, including the keeper block */
686  free(set);
687 }
#define IsKeeperBlock(set, block)
Definition: aset.c:248
#define KeeperBlock(set)
Definition: aset.c:244
#define MAX_FREE_CONTEXTS
Definition: aset.c:241
static AllocSetFreeList context_freelists[2]
Definition: aset.c:257
static int32 next
Definition: blutils.c:220
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:171
#define free(a)
Definition: header.h:65
void MemoryContextResetOnly(MemoryContext context)
Definition: mcxt.c:349
struct MemoryContextData * MemoryContext
Definition: palloc.h:36
MemoryContextData header
Definition: aset.c:154
int freeListIndex
Definition: aset.c:164
int num_free
Definition: aset.c:252
AllocSetContext * first_free
Definition: aset.c:253
MemoryContext nextchild
Definition: memnodes.h:92

References AllocSetIsValid, Assert(), AllocSetContext::blocks, context_freelists, AllocBlockData::endptr, AllocSetFreeList::first_free, free, AllocSetContext::freeListIndex, AllocBlockData::freeptr, AllocSetContext::header, IsKeeperBlock, MemoryContextData::isReset, KeeperBlock, MAX_FREE_CONTEXTS, MemoryContextData::mem_allocated, MemoryContextResetOnly(), next, AllocBlockData::next, MemoryContextData::nextchild, AllocSetFreeList::num_free, and PG_USED_FOR_ASSERTS_ONLY.

◆ AllocSetFree()

void AllocSetFree ( void *  pointer)

Definition at line 1002 of file aset.c.

1003 {
1004  AllocSet set;
1005  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1006 
1007  /* Allow access to the chunk header. */
1009 
1010  if (MemoryChunkIsExternal(chunk))
1011  {
1012  /* Release single-chunk block. */
1013  AllocBlock block = ExternalChunkGetBlock(chunk);
1014 
1015  /*
1016  * Try to verify that we have a sane block pointer: the block header
1017  * should reference an aset and the freeptr should match the endptr.
1018  */
1019  if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
1020  elog(ERROR, "could not find block containing chunk %p", chunk);
1021 
1022  set = block->aset;
1023 
1024 #ifdef MEMORY_CONTEXT_CHECKING
1025  {
1026  /* Test for someone scribbling on unused space in chunk */
1027  Assert(chunk->requested_size < (block->endptr - (char *) pointer));
1028  if (!sentinel_ok(pointer, chunk->requested_size))
1029  elog(WARNING, "detected write past chunk end in %s %p",
1030  set->header.name, chunk);
1031  }
1032 #endif
1033 
1034  /* OK, remove block from aset's list and free it */
1035  if (block->prev)
1036  block->prev->next = block->next;
1037  else
1038  set->blocks = block->next;
1039  if (block->next)
1040  block->next->prev = block->prev;
1041 
1042  set->header.mem_allocated -= block->endptr - ((char *) block);
1043 
1044 #ifdef CLOBBER_FREED_MEMORY
1045  wipe_mem(block, block->freeptr - ((char *) block));
1046 #endif
1047  free(block);
1048  }
1049  else
1050  {
1051  AllocBlock block = MemoryChunkGetBlock(chunk);
1052  int fidx;
1054 
1055  /*
1056  * In this path, for speed reasons we just Assert that the referenced
1057  * block is good. We can also Assert that the value field is sane.
1058  * Future field experience may show that these Asserts had better
1059  * become regular runtime test-and-elog checks.
1060  */
1061  Assert(AllocBlockIsValid(block));
1062  set = block->aset;
1063 
1064  fidx = MemoryChunkGetValue(chunk);
1065  Assert(FreeListIdxIsValid(fidx));
1066  link = GetFreeListLink(chunk);
1067 
1068 #ifdef MEMORY_CONTEXT_CHECKING
1069  /* Test for someone scribbling on unused space in chunk */
1070  if (chunk->requested_size < GetChunkSizeFromFreeListIdx(fidx))
1071  if (!sentinel_ok(pointer, chunk->requested_size))
1072  elog(WARNING, "detected write past chunk end in %s %p",
1073  set->header.name, chunk);
1074 #endif
1075 
1076 #ifdef CLOBBER_FREED_MEMORY
1077  wipe_mem(pointer, GetChunkSizeFromFreeListIdx(fidx));
1078 #endif
1079  /* push this chunk onto the top of the free list */
1081  link->next = set->freelist[fidx];
1083  set->freelist[fidx] = chunk;
1084 
1085 #ifdef MEMORY_CONTEXT_CHECKING
1086 
1087  /*
1088  * Reset requested_size to InvalidAllocSize in chunks that are on free
1089  * list.
1090  */
1091  chunk->requested_size = InvalidAllocSize;
1092 #endif
1093  }
1094 }
#define AllocBlockIsValid(block)
Definition: aset.c:207
#define FreeListIdxIsValid(fidx)
Definition: aset.c:136
#define ExternalChunkGetBlock(chunk)
Definition: aset.c:215
#define ERROR
Definition: elog.h:39
const char * name
Definition: memnodes.h:93

References ALLOC_CHUNKHDRSZ, AllocBlockIsValid, AllocBlockData::aset, Assert(), AllocSetContext::blocks, elog(), AllocBlockData::endptr, ERROR, ExternalChunkGetBlock, free, AllocSetContext::freelist, FreeListIdxIsValid, AllocBlockData::freeptr, GetChunkSizeFromFreeListIdx, GetFreeListLink, AllocSetContext::header, InvalidAllocSize, link(), MemoryContextData::mem_allocated, MemoryChunkGetBlock(), MemoryChunkGetValue(), MemoryChunkIsExternal(), MemoryContextData::name, AllocBlockData::next, PointerGetMemoryChunk, AllocBlockData::prev, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MAKE_MEM_NOACCESS, and WARNING.

Referenced by AllocSetRealloc().

◆ AllocSetGetChunkContext()

MemoryContext AllocSetGetChunkContext ( void *  pointer)

Definition at line 1370 of file aset.c.

1371 {
1372  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1373  AllocBlock block;
1374  AllocSet set;
1375 
1376  /* Allow access to the chunk header. */
1378 
1379  if (MemoryChunkIsExternal(chunk))
1380  block = ExternalChunkGetBlock(chunk);
1381  else
1382  block = (AllocBlock) MemoryChunkGetBlock(chunk);
1383 
1384  /* Disallow access to the chunk header. */
1386 
1387  Assert(AllocBlockIsValid(block));
1388  set = block->aset;
1389 
1390  return &set->header;
1391 }

References ALLOC_CHUNKHDRSZ, AllocBlockIsValid, AllocBlockData::aset, Assert(), ExternalChunkGetBlock, AllocSetContext::header, MemoryChunkGetBlock(), MemoryChunkIsExternal(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ AllocSetGetChunkSpace()

Size AllocSetGetChunkSpace ( void *  pointer)

Definition at line 1399 of file aset.c.

1400 {
1401  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1402  int fidx;
1403 
1404  /* Allow access to the chunk header. */
1406 
1407  if (MemoryChunkIsExternal(chunk))
1408  {
1409  AllocBlock block = ExternalChunkGetBlock(chunk);
1410 
1411  /* Disallow access to the chunk header. */
1413 
1414  Assert(AllocBlockIsValid(block));
1415 
1416  return block->endptr - (char *) chunk;
1417  }
1418 
1419  fidx = MemoryChunkGetValue(chunk);
1420  Assert(FreeListIdxIsValid(fidx));
1421 
1422  /* Disallow access to the chunk header. */
1424 
1426 }

References ALLOC_CHUNKHDRSZ, AllocBlockIsValid, Assert(), AllocBlockData::endptr, ExternalChunkGetBlock, FreeListIdxIsValid, GetChunkSizeFromFreeListIdx, MemoryChunkGetValue(), MemoryChunkIsExternal(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ AllocSetIsEmpty()

bool AllocSetIsEmpty ( MemoryContext  context)

Definition at line 1433 of file aset.c.

1434 {
1435  Assert(AllocSetIsValid(context));
1436 
1437  /*
1438  * For now, we say "empty" only if the context is new or just reset. We
1439  * could examine the freelists to determine if all space has been freed,
1440  * but it's not really worth the trouble for present uses of this
1441  * functionality.
1442  */
1443  if (context->isReset)
1444  return true;
1445  return false;
1446 }

References AllocSetIsValid, Assert(), and MemoryContextData::isReset.

◆ AllocSetRealloc()

void* AllocSetRealloc ( void *  pointer,
Size  size 
)

Definition at line 1109 of file aset.c.

1110 {
1111  AllocBlock block;
1112  AllocSet set;
1113  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1114  Size oldchksize;
1115  int fidx;
1116 
1117  /* Allow access to the chunk header. */
1119 
1120  if (MemoryChunkIsExternal(chunk))
1121  {
1122  /*
1123  * The chunk must have been allocated as a single-chunk block. Use
1124  * realloc() to make the containing block bigger, or smaller, with
1125  * minimum space wastage.
1126  */
1127  Size chksize;
1128  Size blksize;
1129  Size oldblksize;
1130 
1131  block = ExternalChunkGetBlock(chunk);
1132 
1133  /*
1134  * Try to verify that we have a sane block pointer: the block header
1135  * should reference an aset and the freeptr should match the endptr.
1136  */
1137  if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
1138  elog(ERROR, "could not find block containing chunk %p", chunk);
1139 
1140  set = block->aset;
1141 
1142  oldchksize = block->endptr - (char *) pointer;
1143 
1144 #ifdef MEMORY_CONTEXT_CHECKING
1145  /* Test for someone scribbling on unused space in chunk */
1146  Assert(chunk->requested_size < oldchksize);
1147  if (!sentinel_ok(pointer, chunk->requested_size))
1148  elog(WARNING, "detected write past chunk end in %s %p",
1149  set->header.name, chunk);
1150 #endif
1151 
1152 #ifdef MEMORY_CONTEXT_CHECKING
1153  /* ensure there's always space for the sentinel byte */
1154  chksize = MAXALIGN(size + 1);
1155 #else
1156  chksize = MAXALIGN(size);
1157 #endif
1158 
1159  /* Do the realloc */
1160  blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
1161  oldblksize = block->endptr - ((char *) block);
1162 
1163  block = (AllocBlock) realloc(block, blksize);
1164  if (block == NULL)
1165  {
1166  /* Disallow access to the chunk header. */
1168  return NULL;
1169  }
1170 
1171  /* updated separately, not to underflow when (oldblksize > blksize) */
1172  set->header.mem_allocated -= oldblksize;
1173  set->header.mem_allocated += blksize;
1174 
1175  block->freeptr = block->endptr = ((char *) block) + blksize;
1176 
1177  /* Update pointers since block has likely been moved */
1178  chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
1179  pointer = MemoryChunkGetPointer(chunk);
1180  if (block->prev)
1181  block->prev->next = block;
1182  else
1183  set->blocks = block;
1184  if (block->next)
1185  block->next->prev = block;
1186 
1187 #ifdef MEMORY_CONTEXT_CHECKING
1188 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1189 
1190  /*
1191  * We can only randomize the extra space if we know the prior request.
1192  * When using Valgrind, randomize_mem() also marks memory UNDEFINED.
1193  */
1194  if (size > chunk->requested_size)
1195  randomize_mem((char *) pointer + chunk->requested_size,
1196  size - chunk->requested_size);
1197 #else
1198 
1199  /*
1200  * If this is an increase, realloc() will have marked any
1201  * newly-allocated part (from oldchksize to chksize) UNDEFINED, but we
1202  * also need to adjust trailing bytes from the old allocation (from
1203  * chunk->requested_size to oldchksize) as they are marked NOACCESS.
1204  * Make sure not to mark too many bytes in case chunk->requested_size
1205  * < size < oldchksize.
1206  */
1207 #ifdef USE_VALGRIND
1208  if (Min(size, oldchksize) > chunk->requested_size)
1209  VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1210  Min(size, oldchksize) - chunk->requested_size);
1211 #endif
1212 #endif
1213 
1214  chunk->requested_size = size;
1215  /* set mark to catch clobber of "unused" space */
1216  Assert(size < chksize);
1217  set_sentinel(pointer, size);
1218 #else /* !MEMORY_CONTEXT_CHECKING */
1219 
1220  /*
1221  * We may need to adjust marking of bytes from the old allocation as
1222  * some of them may be marked NOACCESS. We don't know how much of the
1223  * old chunk size was the requested size; it could have been as small
1224  * as one byte. We have to be conservative and just mark the entire
1225  * old portion DEFINED. Make sure not to mark memory beyond the new
1226  * allocation in case it's smaller than the old one.
1227  */
1228  VALGRIND_MAKE_MEM_DEFINED(pointer, Min(size, oldchksize));
1229 #endif
1230 
1231  /* Ensure any padding bytes are marked NOACCESS. */
1232  VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1233 
1234  /* Disallow access to the chunk header . */
1236 
1237  return pointer;
1238  }
1239 
1240  block = MemoryChunkGetBlock(chunk);
1241 
1242  /*
1243  * In this path, for speed reasons we just Assert that the referenced
1244  * block is good. We can also Assert that the value field is sane. Future
1245  * field experience may show that these Asserts had better become regular
1246  * runtime test-and-elog checks.
1247  */
1248  Assert(AllocBlockIsValid(block));
1249  set = block->aset;
1250 
1251  fidx = MemoryChunkGetValue(chunk);
1252  Assert(FreeListIdxIsValid(fidx));
1253  oldchksize = GetChunkSizeFromFreeListIdx(fidx);
1254 
1255 #ifdef MEMORY_CONTEXT_CHECKING
1256  /* Test for someone scribbling on unused space in chunk */
1257  if (chunk->requested_size < oldchksize)
1258  if (!sentinel_ok(pointer, chunk->requested_size))
1259  elog(WARNING, "detected write past chunk end in %s %p",
1260  set->header.name, chunk);
1261 #endif
1262 
1263  /*
1264  * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1265  * allocated area already is >= the new size. (In particular, we will
1266  * fall out here if the requested size is a decrease.)
1267  */
1268  if (oldchksize >= size)
1269  {
1270 #ifdef MEMORY_CONTEXT_CHECKING
1271  Size oldrequest = chunk->requested_size;
1272 
1273 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1274  /* We can only fill the extra space if we know the prior request */
1275  if (size > oldrequest)
1276  randomize_mem((char *) pointer + oldrequest,
1277  size - oldrequest);
1278 #endif
1279 
1280  chunk->requested_size = size;
1281 
1282  /*
1283  * If this is an increase, mark any newly-available part UNDEFINED.
1284  * Otherwise, mark the obsolete part NOACCESS.
1285  */
1286  if (size > oldrequest)
1287  VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1288  size - oldrequest);
1289  else
1290  VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1291  oldchksize - size);
1292 
1293  /* set mark to catch clobber of "unused" space */
1294  if (size < oldchksize)
1295  set_sentinel(pointer, size);
1296 #else /* !MEMORY_CONTEXT_CHECKING */
1297 
1298  /*
1299  * We don't have the information to determine whether we're growing
1300  * the old request or shrinking it, so we conservatively mark the
1301  * entire new allocation DEFINED.
1302  */
1303  VALGRIND_MAKE_MEM_NOACCESS(pointer, oldchksize);
1304  VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1305 #endif
1306 
1307  /* Disallow access to the chunk header. */
1309 
1310  return pointer;
1311  }
1312  else
1313  {
1314  /*
1315  * Enlarge-a-small-chunk case. We just do this by brute force, ie,
1316  * allocate a new chunk and copy the data. Since we know the existing
1317  * data isn't huge, this won't involve any great memcpy expense, so
1318  * it's not worth being smarter. (At one time we tried to avoid
1319  * memcpy when it was possible to enlarge the chunk in-place, but that
1320  * turns out to misbehave unpleasantly for repeated cycles of
1321  * palloc/repalloc/pfree: the eventually freed chunks go into the
1322  * wrong freelist for the next initial palloc request, and so we leak
1323  * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1324  */
1325  AllocPointer newPointer;
1326  Size oldsize;
1327 
1328  /* allocate new chunk */
1329  newPointer = AllocSetAlloc((MemoryContext) set, size);
1330 
1331  /* leave immediately if request was not completed */
1332  if (newPointer == NULL)
1333  {
1334  /* Disallow access to the chunk header. */
1336  return NULL;
1337  }
1338 
1339  /*
1340  * AllocSetAlloc() may have returned a region that is still NOACCESS.
1341  * Change it to UNDEFINED for the moment; memcpy() will then transfer
1342  * definedness from the old allocation to the new. If we know the old
1343  * allocation, copy just that much. Otherwise, make the entire old
1344  * chunk defined to avoid errors as we copy the currently-NOACCESS
1345  * trailing bytes.
1346  */
1347  VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
1348 #ifdef MEMORY_CONTEXT_CHECKING
1349  oldsize = chunk->requested_size;
1350 #else
1351  oldsize = oldchksize;
1352  VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1353 #endif
1354 
1355  /* transfer existing data (certain to fit) */
1356  memcpy(newPointer, pointer, oldsize);
1357 
1358  /* free old chunk */
1359  AllocSetFree(pointer);
1360 
1361  return newPointer;
1362  }
1363 }
void * AllocPointer
Definition: aset.c:113
void AllocSetFree(void *pointer)
Definition: aset.c:1002
void * AllocSetAlloc(MemoryContext context, Size size)
Definition: aset.c:703
#define realloc(a, b)
Definition: header.h:60

References ALLOC_BLOCKHDRSZ, ALLOC_CHUNKHDRSZ, AllocBlockIsValid, AllocSetAlloc(), AllocSetFree(), AllocBlockData::aset, Assert(), AllocSetContext::blocks, elog(), AllocBlockData::endptr, ERROR, ExternalChunkGetBlock, FreeListIdxIsValid, AllocBlockData::freeptr, GetChunkSizeFromFreeListIdx, AllocSetContext::header, MAXALIGN, MemoryContextData::mem_allocated, MemoryChunkGetBlock(), MemoryChunkGetPointer, MemoryChunkGetValue(), MemoryChunkIsExternal(), Min, MemoryContextData::name, AllocBlockData::next, PointerGetMemoryChunk, AllocBlockData::prev, realloc, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MAKE_MEM_NOACCESS, VALGRIND_MAKE_MEM_UNDEFINED, and WARNING.

◆ AllocSetReset()

void AllocSetReset ( MemoryContext  context)

Definition at line 537 of file aset.c.

538 {
539  AllocSet set = (AllocSet) context;
540  AllocBlock block;
541  Size keepersize PG_USED_FOR_ASSERTS_ONLY;
542 
543  Assert(AllocSetIsValid(set));
544 
545 #ifdef MEMORY_CONTEXT_CHECKING
546  /* Check for corruption and leaks before freeing */
547  AllocSetCheck(context);
548 #endif
549 
550  /* Remember keeper block size for Assert below */
551  keepersize = KeeperBlock(set)->endptr - ((char *) set);
552 
553  /* Clear chunk freelists */
554  MemSetAligned(set->freelist, 0, sizeof(set->freelist));
555 
556  block = set->blocks;
557 
558  /* New blocks list will be just the keeper block */
559  set->blocks = KeeperBlock(set);
560 
561  while (block != NULL)
562  {
563  AllocBlock next = block->next;
564 
565  if (IsKeeperBlock(set, block))
566  {
567  /* Reset the block, but don't return it to malloc */
568  char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
569 
570 #ifdef CLOBBER_FREED_MEMORY
571  wipe_mem(datastart, block->freeptr - datastart);
572 #else
573  /* wipe_mem() would have done this */
574  VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
575 #endif
576  block->freeptr = datastart;
577  block->prev = NULL;
578  block->next = NULL;
579  }
580  else
581  {
582  /* Normal case, release the block */
583  context->mem_allocated -= block->endptr - ((char *) block);
584 
585 #ifdef CLOBBER_FREED_MEMORY
586  wipe_mem(block, block->freeptr - ((char *) block));
587 #endif
588  free(block);
589  }
590  block = next;
591  }
592 
593  Assert(context->mem_allocated == keepersize);
594 
595  /* Reset block size allocation sequence, too */
596  set->nextBlockSize = set->initBlockSize;
597 }
#define MemSetAligned(start, val, len)
Definition: c.h:1039
uint32 initBlockSize
Definition: aset.c:159

References ALLOC_BLOCKHDRSZ, AllocSetIsValid, Assert(), AllocSetContext::blocks, AllocBlockData::endptr, free, AllocSetContext::freelist, AllocBlockData::freeptr, AllocSetContext::initBlockSize, IsKeeperBlock, KeeperBlock, MemoryContextData::mem_allocated, MemSetAligned, next, AllocBlockData::next, AllocSetContext::nextBlockSize, PG_USED_FOR_ASSERTS_ONLY, AllocBlockData::prev, and VALGRIND_MAKE_MEM_NOACCESS.

◆ AllocSetStats()

void AllocSetStats ( MemoryContext  context,
MemoryStatsPrintFunc  printfunc,
void *  passthru,
MemoryContextCounters totals,
bool  print_to_stderr 
)

Definition at line 1458 of file aset.c.

1461 {
1462  AllocSet set = (AllocSet) context;
1463  Size nblocks = 0;
1464  Size freechunks = 0;
1465  Size totalspace;
1466  Size freespace = 0;
1467  AllocBlock block;
1468  int fidx;
1469 
1470  Assert(AllocSetIsValid(set));
1471 
1472  /* Include context header in totalspace */
1473  totalspace = MAXALIGN(sizeof(AllocSetContext));
1474 
1475  for (block = set->blocks; block != NULL; block = block->next)
1476  {
1477  nblocks++;
1478  totalspace += block->endptr - ((char *) block);
1479  freespace += block->endptr - block->freeptr;
1480  }
1481  for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1482  {
1483  Size chksz = GetChunkSizeFromFreeListIdx(fidx);
1484  MemoryChunk *chunk = set->freelist[fidx];
1485 
1486  while (chunk != NULL)
1487  {
1489 
1490  /* Allow access to the chunk header. */
1492  Assert(MemoryChunkGetValue(chunk) == fidx);
1494 
1495  freechunks++;
1496  freespace += chksz + ALLOC_CHUNKHDRSZ;
1497 
1499  chunk = link->next;
1501  }
1502  }
1503 
1504  if (printfunc)
1505  {
1506  char stats_string[200];
1507 
1508  snprintf(stats_string, sizeof(stats_string),
1509  "%zu total in %zu blocks; %zu free (%zu chunks); %zu used",
1510  totalspace, nblocks, freespace, freechunks,
1511  totalspace - freespace);
1512  printfunc(context, passthru, stats_string, print_to_stderr);
1513  }
1514 
1515  if (totals)
1516  {
1517  totals->nblocks += nblocks;
1518  totals->freechunks += freechunks;
1519  totals->totalspace += totalspace;
1520  totals->freespace += freespace;
1521  }
1522 }
#define ALLOCSET_NUM_FREELISTS
Definition: aset.c:84
#define snprintf
Definition: port.h:238

References ALLOC_CHUNKHDRSZ, ALLOCSET_NUM_FREELISTS, AllocSetIsValid, Assert(), AllocSetContext::blocks, AllocBlockData::endptr, MemoryContextCounters::freechunks, AllocSetContext::freelist, AllocBlockData::freeptr, MemoryContextCounters::freespace, GetChunkSizeFromFreeListIdx, GetFreeListLink, MAXALIGN, MemoryChunkGetValue(), MemoryContextCounters::nblocks, AllocBlockData::next, snprintf, MemoryContextCounters::totalspace, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ GenerationAlloc()

void* GenerationAlloc ( MemoryContext  context,
Size  size 
)

Definition at line 345 of file generation.c.

346 {
347  GenerationContext *set = (GenerationContext *) context;
348  GenerationBlock *block;
349  MemoryChunk *chunk;
350  Size chunk_size;
351  Size required_size;
352 
354 
355 #ifdef MEMORY_CONTEXT_CHECKING
356  /* ensure there's always space for the sentinel byte */
357  chunk_size = MAXALIGN(size + 1);
358 #else
359  chunk_size = MAXALIGN(size);
360 #endif
361  required_size = chunk_size + Generation_CHUNKHDRSZ;
362 
363  /* is it an over-sized chunk? if yes, allocate special block */
364  if (chunk_size > set->allocChunkLimit)
365  {
366  Size blksize = required_size + Generation_BLOCKHDRSZ;
367 
368  block = (GenerationBlock *) malloc(blksize);
369  if (block == NULL)
370  return NULL;
371 
372  context->mem_allocated += blksize;
373 
374  /* block with a single (used) chunk */
375  block->context = set;
376  block->blksize = blksize;
377  block->nchunks = 1;
378  block->nfree = 0;
379 
380  /* the block is completely full */
381  block->freeptr = block->endptr = ((char *) block) + blksize;
382 
383  chunk = (MemoryChunk *) (((char *) block) + Generation_BLOCKHDRSZ);
384 
385  /* mark the MemoryChunk as externally managed */
387 
388 #ifdef MEMORY_CONTEXT_CHECKING
389  chunk->requested_size = size;
390  /* set mark to catch clobber of "unused" space */
391  Assert(size < chunk_size);
392  set_sentinel(MemoryChunkGetPointer(chunk), size);
393 #endif
394 #ifdef RANDOMIZE_ALLOCATED_MEMORY
395  /* fill the allocated space with junk */
396  randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
397 #endif
398 
399  /* add the block to the list of allocated blocks */
400  dlist_push_head(&set->blocks, &block->node);
401 
402  /* Ensure any padding bytes are marked NOACCESS. */
403  VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
404  chunk_size - size);
405 
406  /* Disallow access to the chunk header. */
408 
409  return MemoryChunkGetPointer(chunk);
410  }
411 
412  /*
413  * Not an oversized chunk. We try to first make use of the current block,
414  * but if there's not enough space in it, instead of allocating a new
415  * block, we look to see if the freeblock is empty and has enough space.
416  * If not, we'll also try the same using the keeper block. The keeper
417  * block may have become empty and we have no other way to reuse it again
418  * if we don't try to use it explicitly here.
419  *
420  * We don't want to start filling the freeblock before the current block
421  * is full, otherwise we may cause fragmentation in FIFO type workloads.
422  * We only switch to using the freeblock or keeper block if those blocks
423  * are completely empty. If we didn't do that we could end up fragmenting
424  * consecutive allocations over multiple blocks which would be a problem
425  * that would compound over time.
426  */
427  block = set->block;
428 
429  if (block == NULL ||
430  GenerationBlockFreeBytes(block) < required_size)
431  {
432  Size blksize;
433  GenerationBlock *freeblock = set->freeblock;
434 
435  if (freeblock != NULL &&
436  GenerationBlockIsEmpty(freeblock) &&
437  GenerationBlockFreeBytes(freeblock) >= required_size)
438  {
439  block = freeblock;
440 
441  /*
442  * Zero out the freeblock as we'll set this to the current block
443  * below
444  */
445  set->freeblock = NULL;
446  }
447  else if (GenerationBlockIsEmpty(KeeperBlock(set)) &&
448  GenerationBlockFreeBytes(KeeperBlock(set)) >= required_size)
449  {
450  block = KeeperBlock(set);
451  }
452  else
453  {
454  /*
455  * The first such block has size initBlockSize, and we double the
456  * space in each succeeding block, but not more than maxBlockSize.
457  */
458  blksize = set->nextBlockSize;
459  set->nextBlockSize <<= 1;
460  if (set->nextBlockSize > set->maxBlockSize)
461  set->nextBlockSize = set->maxBlockSize;
462 
463  /* we'll need a block hdr too, so add that to the required size */
464  required_size += Generation_BLOCKHDRSZ;
465 
466  /* round the size up to the next power of 2 */
467  if (blksize < required_size)
468  blksize = pg_nextpower2_size_t(required_size);
469 
470  block = (GenerationBlock *) malloc(blksize);
471 
472  if (block == NULL)
473  return NULL;
474 
475  context->mem_allocated += blksize;
476 
477  /* initialize the new block */
478  GenerationBlockInit(set, block, blksize);
479 
480  /* add it to the doubly-linked list of blocks */
481  dlist_push_head(&set->blocks, &block->node);
482 
483  /* Zero out the freeblock in case it's become full */
484  set->freeblock = NULL;
485  }
486 
487  /* and also use it as the current allocation block */
488  set->block = block;
489  }
490 
491  /* we're supposed to have a block with enough free space now */
492  Assert(block != NULL);
493  Assert((block->endptr - block->freeptr) >= Generation_CHUNKHDRSZ + chunk_size);
494 
495  chunk = (MemoryChunk *) block->freeptr;
496 
497  /* Prepare to initialize the chunk header. */
499 
500  block->nchunks += 1;
501  block->freeptr += (Generation_CHUNKHDRSZ + chunk_size);
502 
503  Assert(block->freeptr <= block->endptr);
504 
505  MemoryChunkSetHdrMask(chunk, block, chunk_size, MCTX_GENERATION_ID);
506 #ifdef MEMORY_CONTEXT_CHECKING
507  chunk->requested_size = size;
508  /* set mark to catch clobber of "unused" space */
509  Assert(size < chunk_size);
510  set_sentinel(MemoryChunkGetPointer(chunk), size);
511 #endif
512 #ifdef RANDOMIZE_ALLOCATED_MEMORY
513  /* fill the allocated space with junk */
514  randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
515 #endif
516 
517  /* Ensure any padding bytes are marked NOACCESS. */
518  VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
519  chunk_size - size);
520 
521  /* Disallow access to the chunk header. */
523 
524  return MemoryChunkGetPointer(chunk);
525 }
static void GenerationBlockInit(GenerationContext *context, GenerationBlock *block, Size blksize)
Definition: generation.c:533
static Size GenerationBlockFreeBytes(GenerationBlock *block)
Definition: generation.c:588
#define KeeperBlock(set)
Definition: generation.c:123
#define Generation_CHUNKHDRSZ
Definition: generation.c:47
#define Generation_BLOCKHDRSZ
Definition: generation.c:46
static bool GenerationBlockIsEmpty(GenerationBlock *block)
Definition: generation.c:554
#define GenerationIsValid(set)
Definition: generation.c:104
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:347
#define pg_nextpower2_size_t
Definition: pg_bitutils.h:339
char * freeptr
Definition: generation.c:96
dlist_node node
Definition: generation.c:91
GenerationContext * context
Definition: generation.c:92
GenerationBlock * freeblock
Definition: generation.c:72
uint32 maxBlockSize
Definition: generation.c:65
uint32 nextBlockSize
Definition: generation.c:66
dlist_head blocks
Definition: generation.c:74
GenerationBlock * block
Definition: generation.c:69
uint32 allocChunkLimit
Definition: generation.c:67

References GenerationContext::allocChunkLimit, Assert(), GenerationBlock::blksize, GenerationContext::block, GenerationContext::blocks, GenerationBlock::context, dlist_push_head(), GenerationBlock::endptr, GenerationContext::freeblock, GenerationBlock::freeptr, Generation_BLOCKHDRSZ, Generation_CHUNKHDRSZ, GenerationBlockFreeBytes(), GenerationBlockInit(), GenerationBlockIsEmpty(), GenerationIsValid, KeeperBlock, malloc, MAXALIGN, GenerationContext::maxBlockSize, MCTX_GENERATION_ID, MemoryContextData::mem_allocated, MemoryChunkGetPointer, MemoryChunkSetHdrMask(), MemoryChunkSetHdrMaskExternal(), GenerationBlock::nchunks, GenerationContext::nextBlockSize, GenerationBlock::nfree, GenerationBlock::node, pg_nextpower2_size_t, VALGRIND_MAKE_MEM_NOACCESS, and VALGRIND_MAKE_MEM_UNDEFINED.

Referenced by GenerationRealloc().

◆ GenerationDelete()

void GenerationDelete ( MemoryContext  context)

Definition at line 323 of file generation.c.

324 {
325  /* Reset to release all releasable GenerationBlocks */
326  GenerationReset(context);
327  /* And free the context header and keeper block */
328  free(context);
329 }
void GenerationReset(MemoryContext context)
Definition: generation.c:278

References free, and GenerationReset().

◆ GenerationFree()

void GenerationFree ( void *  pointer)

Definition at line 623 of file generation.c.

624 {
625  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
626  GenerationBlock *block;
627  GenerationContext *set;
628 #if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \
629  || defined(CLOBBER_FREED_MEMORY)
630  Size chunksize;
631 #endif
632 
633  /* Allow access to the chunk header. */
635 
636  if (MemoryChunkIsExternal(chunk))
637  {
638  block = ExternalChunkGetBlock(chunk);
639 
640  /*
641  * Try to verify that we have a sane block pointer: the block header
642  * should reference a generation context.
643  */
644  if (!GenerationBlockIsValid(block))
645  elog(ERROR, "could not find block containing chunk %p", chunk);
646 
647 #if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \
648  || defined(CLOBBER_FREED_MEMORY)
649  chunksize = block->endptr - (char *) pointer;
650 #endif
651  }
652  else
653  {
654  block = MemoryChunkGetBlock(chunk);
655 
656  /*
657  * In this path, for speed reasons we just Assert that the referenced
658  * block is good. Future field experience may show that this Assert
659  * had better become a regular runtime test-and-elog check.
660  */
662 
663 #if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \
664  || defined(CLOBBER_FREED_MEMORY)
665  chunksize = MemoryChunkGetValue(chunk);
666 #endif
667  }
668 
669 #ifdef MEMORY_CONTEXT_CHECKING
670  /* Test for someone scribbling on unused space in chunk */
671  Assert(chunk->requested_size < chunksize);
672  if (!sentinel_ok(pointer, chunk->requested_size))
673  elog(WARNING, "detected write past chunk end in %s %p",
674  ((MemoryContext) block->context)->name, chunk);
675 #endif
676 
677 #ifdef CLOBBER_FREED_MEMORY
678  wipe_mem(pointer, chunksize);
679 #endif
680 
681 #ifdef MEMORY_CONTEXT_CHECKING
682  /* Reset requested_size to InvalidAllocSize in freed chunks */
683  chunk->requested_size = InvalidAllocSize;
684 #endif
685 
686  block->nfree += 1;
687 
688  Assert(block->nchunks > 0);
689  Assert(block->nfree <= block->nchunks);
690 
691  /* If there are still allocated chunks in the block, we're done. */
692  if (block->nfree < block->nchunks)
693  return;
694 
695  set = block->context;
696 
697  /* Don't try to free the keeper block, just mark it empty */
698  if (IsKeeperBlock(set, block))
699  {
701  return;
702  }
703 
704  /*
705  * If there is no freeblock set or if this is the freeblock then instead
706  * of freeing this memory, we keep it around so that new allocations have
707  * the option of recycling it.
708  */
709  if (set->freeblock == NULL || set->freeblock == block)
710  {
711  /* XXX should we only recycle maxBlockSize sized blocks? */
712  set->freeblock = block;
714  return;
715  }
716 
717  /* Also make sure the block is not marked as the current block. */
718  if (set->block == block)
719  set->block = NULL;
720 
721  /*
722  * The block is empty, so let's get rid of it. First remove it from the
723  * list of blocks, then return it to malloc().
724  */
725  dlist_delete(&block->node);
726 
727  set->header.mem_allocated -= block->blksize;
728  free(block);
729 }
#define IsKeeperBlock(set, block)
Definition: generation.c:128
static void GenerationBlockMarkEmpty(GenerationBlock *block)
Definition: generation.c:564
#define GenerationBlockIsValid(block)
Definition: generation.c:111
#define ExternalChunkGetBlock(chunk)
Definition: generation.c:119
static void dlist_delete(dlist_node *node)
Definition: ilist.h:405
MemoryContextData header
Definition: generation.c:61

References Assert(), GenerationBlock::blksize, GenerationContext::block, GenerationBlock::context, dlist_delete(), elog(), GenerationBlock::endptr, ERROR, ExternalChunkGetBlock, free, GenerationContext::freeblock, Generation_CHUNKHDRSZ, GenerationBlockIsValid, GenerationBlockMarkEmpty(), GenerationContext::header, InvalidAllocSize, IsKeeperBlock, MemoryContextData::mem_allocated, MemoryChunkGetBlock(), MemoryChunkGetValue(), MemoryChunkIsExternal(), GenerationBlock::nchunks, GenerationBlock::nfree, GenerationBlock::node, PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and WARNING.

Referenced by GenerationRealloc().

◆ GenerationGetChunkContext()

MemoryContext GenerationGetChunkContext ( void *  pointer)

Definition at line 880 of file generation.c.

881 {
882  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
883  GenerationBlock *block;
884 
885  /* Allow access to the chunk header. */
887 
888  if (MemoryChunkIsExternal(chunk))
889  block = ExternalChunkGetBlock(chunk);
890  else
891  block = (GenerationBlock *) MemoryChunkGetBlock(chunk);
892 
893  /* Disallow access to the chunk header. */
895 
897  return &block->context->header;
898 }

References Assert(), GenerationBlock::context, ExternalChunkGetBlock, Generation_CHUNKHDRSZ, GenerationBlockIsValid, GenerationContext::header, MemoryChunkGetBlock(), MemoryChunkIsExternal(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ GenerationGetChunkSpace()

Size GenerationGetChunkSpace ( void *  pointer)

Definition at line 906 of file generation.c.

907 {
908  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
909  Size chunksize;
910 
911  /* Allow access to the chunk header. */
913 
914  if (MemoryChunkIsExternal(chunk))
915  {
916  GenerationBlock *block = ExternalChunkGetBlock(chunk);
917 
919  chunksize = block->endptr - (char *) pointer;
920  }
921  else
922  chunksize = MemoryChunkGetValue(chunk);
923 
924  /* Disallow access to the chunk header. */
926 
927  return Generation_CHUNKHDRSZ + chunksize;
928 }

References Assert(), GenerationBlock::endptr, ExternalChunkGetBlock, Generation_CHUNKHDRSZ, GenerationBlockIsValid, MemoryChunkGetValue(), MemoryChunkIsExternal(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ GenerationIsEmpty()

bool GenerationIsEmpty ( MemoryContext  context)

Definition at line 935 of file generation.c.

936 {
937  GenerationContext *set = (GenerationContext *) context;
938  dlist_iter iter;
939 
941 
942  dlist_foreach(iter, &set->blocks)
943  {
944  GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
945 
946  if (block->nchunks > 0)
947  return false;
948  }
949 
950  return true;
951 }
#define dlist_foreach(iter, lhead)
Definition: ilist.h:623
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
dlist_node * cur
Definition: ilist.h:179

References Assert(), GenerationContext::blocks, dlist_iter::cur, dlist_container, dlist_foreach, GenerationIsValid, and GenerationBlock::nchunks.

◆ GenerationRealloc()

void* GenerationRealloc ( void *  pointer,
Size  size 
)

Definition at line 738 of file generation.c.

739 {
740  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
741  GenerationContext *set;
742  GenerationBlock *block;
743  GenerationPointer newPointer;
744  Size oldsize;
745 
746  /* Allow access to the chunk header. */
748 
749  if (MemoryChunkIsExternal(chunk))
750  {
751  block = ExternalChunkGetBlock(chunk);
752 
753  /*
754  * Try to verify that we have a sane block pointer: the block header
755  * should reference a generation context.
756  */
757  if (!GenerationBlockIsValid(block))
758  elog(ERROR, "could not find block containing chunk %p", chunk);
759 
760  oldsize = block->endptr - (char *) pointer;
761  }
762  else
763  {
764  block = MemoryChunkGetBlock(chunk);
765 
766  /*
767  * In this path, for speed reasons we just Assert that the referenced
768  * block is good. Future field experience may show that this Assert
769  * had better become a regular runtime test-and-elog check.
770  */
772 
773  oldsize = MemoryChunkGetValue(chunk);
774  }
775 
776  set = block->context;
777 
778 #ifdef MEMORY_CONTEXT_CHECKING
779  /* Test for someone scribbling on unused space in chunk */
780  Assert(chunk->requested_size < oldsize);
781  if (!sentinel_ok(pointer, chunk->requested_size))
782  elog(WARNING, "detected write past chunk end in %s %p",
783  ((MemoryContext) set)->name, chunk);
784 #endif
785 
786  /*
787  * Maybe the allocated area already is >= the new size. (In particular,
788  * we always fall out here if the requested size is a decrease.)
789  *
790  * This memory context does not use power-of-2 chunk sizing and instead
791  * carves the chunks to be as small as possible, so most repalloc() calls
792  * will end up in the palloc/memcpy/pfree branch.
793  *
794  * XXX Perhaps we should annotate this condition with unlikely()?
795  */
796  if (oldsize >= size)
797  {
798 #ifdef MEMORY_CONTEXT_CHECKING
799  Size oldrequest = chunk->requested_size;
800 
801 #ifdef RANDOMIZE_ALLOCATED_MEMORY
802  /* We can only fill the extra space if we know the prior request */
803  if (size > oldrequest)
804  randomize_mem((char *) pointer + oldrequest,
805  size - oldrequest);
806 #endif
807 
808  chunk->requested_size = size;
809 
810  /*
811  * If this is an increase, mark any newly-available part UNDEFINED.
812  * Otherwise, mark the obsolete part NOACCESS.
813  */
814  if (size > oldrequest)
815  VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
816  size - oldrequest);
817  else
818  VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
819  oldsize - size);
820 
821  /* set mark to catch clobber of "unused" space */
822  set_sentinel(pointer, size);
823 #else /* !MEMORY_CONTEXT_CHECKING */
824 
825  /*
826  * We don't have the information to determine whether we're growing
827  * the old request or shrinking it, so we conservatively mark the
828  * entire new allocation DEFINED.
829  */
830  VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
831  VALGRIND_MAKE_MEM_DEFINED(pointer, size);
832 #endif
833 
834  /* Disallow access to the chunk header. */
836 
837  return pointer;
838  }
839 
840  /* allocate new chunk */
841  newPointer = GenerationAlloc((MemoryContext) set, size);
842 
843  /* leave immediately if request was not completed */
844  if (newPointer == NULL)
845  {
846  /* Disallow access to the chunk header. */
848  return NULL;
849  }
850 
851  /*
852  * GenerationAlloc() may have returned a region that is still NOACCESS.
853  * Change it to UNDEFINED for the moment; memcpy() will then transfer
854  * definedness from the old allocation to the new. If we know the old
855  * allocation, copy just that much. Otherwise, make the entire old chunk
856  * defined to avoid errors as we copy the currently-NOACCESS trailing
857  * bytes.
858  */
859  VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
860 #ifdef MEMORY_CONTEXT_CHECKING
861  oldsize = chunk->requested_size;
862 #else
863  VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
864 #endif
865 
866  /* transfer existing data (certain to fit) */
867  memcpy(newPointer, pointer, oldsize);
868 
869  /* free old chunk */
870  GenerationFree(pointer);
871 
872  return newPointer;
873 }
void GenerationFree(void *pointer)
Definition: generation.c:623
void * GenerationPointer
Definition: generation.c:53
void * GenerationAlloc(MemoryContext context, Size size)
Definition: generation.c:345

References Assert(), GenerationBlock::context, elog(), GenerationBlock::endptr, ERROR, ExternalChunkGetBlock, Generation_CHUNKHDRSZ, GenerationAlloc(), GenerationBlockIsValid, GenerationFree(), MemoryChunkGetBlock(), MemoryChunkGetValue(), MemoryChunkIsExternal(), name, PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MAKE_MEM_NOACCESS, VALGRIND_MAKE_MEM_UNDEFINED, and WARNING.

◆ GenerationReset()

void GenerationReset ( MemoryContext  context)

Definition at line 278 of file generation.c.

279 {
280  GenerationContext *set = (GenerationContext *) context;
281  dlist_mutable_iter miter;
282 
284 
285 #ifdef MEMORY_CONTEXT_CHECKING
286  /* Check for corruption and leaks before freeing */
287  GenerationCheck(context);
288 #endif
289 
290  /*
291  * NULLify the free block pointer. We must do this before calling
292  * GenerationBlockFree as that function never expects to free the
293  * freeblock.
294  */
295  set->freeblock = NULL;
296 
297  dlist_foreach_modify(miter, &set->blocks)
298  {
299  GenerationBlock *block = dlist_container(GenerationBlock, node, miter.cur);
300 
301  if (IsKeeperBlock(set, block))
303  else
304  GenerationBlockFree(set, block);
305  }
306 
307  /* set it so new allocations to make use of the keeper block */
308  set->block = KeeperBlock(set);
309 
310  /* Reset block size allocation sequence, too */
311  set->nextBlockSize = set->initBlockSize;
312 
313  /* Ensure there is only 1 item in the dlist */
314  Assert(!dlist_is_empty(&set->blocks));
316 }
static void GenerationBlockFree(GenerationContext *set, GenerationBlock *block)
Definition: generation.c:598
static bool dlist_has_next(const dlist_head *head, const dlist_node *node)
Definition: ilist.h:503
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:640
static bool dlist_is_empty(const dlist_head *head)
Definition: ilist.h:336
static dlist_node * dlist_head_node(dlist_head *head)
Definition: ilist.h:565
uint32 initBlockSize
Definition: generation.c:64
dlist_node * cur
Definition: ilist.h:200

References Assert(), GenerationContext::block, GenerationContext::blocks, dlist_mutable_iter::cur, dlist_container, dlist_foreach_modify, dlist_has_next(), dlist_head_node(), dlist_is_empty(), GenerationContext::freeblock, GenerationBlockFree(), GenerationBlockMarkEmpty(), GenerationIsValid, GenerationContext::initBlockSize, IsKeeperBlock, KeeperBlock, and GenerationContext::nextBlockSize.

Referenced by GenerationDelete().

◆ GenerationStats()

void GenerationStats ( MemoryContext  context,
MemoryStatsPrintFunc  printfunc,
void *  passthru,
MemoryContextCounters totals,
bool  print_to_stderr 
)

Definition at line 966 of file generation.c.

969 {
970  GenerationContext *set = (GenerationContext *) context;
971  Size nblocks = 0;
972  Size nchunks = 0;
973  Size nfreechunks = 0;
974  Size totalspace;
975  Size freespace = 0;
976  dlist_iter iter;
977 
979 
980  /* Include context header in totalspace */
981  totalspace = MAXALIGN(sizeof(GenerationContext));
982 
983  dlist_foreach(iter, &set->blocks)
984  {
985  GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
986 
987  nblocks++;
988  nchunks += block->nchunks;
989  nfreechunks += block->nfree;
990  totalspace += block->blksize;
991  freespace += (block->endptr - block->freeptr);
992  }
993 
994  if (printfunc)
995  {
996  char stats_string[200];
997 
998  snprintf(stats_string, sizeof(stats_string),
999  "%zu total in %zu blocks (%zu chunks); %zu free (%zu chunks); %zu used",
1000  totalspace, nblocks, nchunks, freespace,
1001  nfreechunks, totalspace - freespace);
1002  printfunc(context, passthru, stats_string, print_to_stderr);
1003  }
1004 
1005  if (totals)
1006  {
1007  totals->nblocks += nblocks;
1008  totals->freechunks += nfreechunks;
1009  totals->totalspace += totalspace;
1010  totals->freespace += freespace;
1011  }
1012 }

References Assert(), GenerationBlock::blksize, GenerationContext::blocks, dlist_iter::cur, dlist_container, dlist_foreach, GenerationBlock::endptr, MemoryContextCounters::freechunks, GenerationBlock::freeptr, MemoryContextCounters::freespace, GenerationIsValid, MAXALIGN, MemoryContextCounters::nblocks, GenerationBlock::nchunks, GenerationBlock::nfree, snprintf, and MemoryContextCounters::totalspace.

◆ MemoryContextCreate()

void MemoryContextCreate ( MemoryContext  node,
NodeTag  tag,
MemoryContextMethodID  method_id,
MemoryContext  parent,
const char *  name 
)

Definition at line 973 of file mcxt.c.

978 {
979  /* Creating new memory contexts is not allowed in a critical section */
980  Assert(CritSectionCount == 0);
981 
982  /* Initialize all standard fields of memory context header */
983  node->type = tag;
984  node->isReset = true;
985  node->methods = &mcxt_methods[method_id];
986  node->parent = parent;
987  node->firstchild = NULL;
988  node->mem_allocated = 0;
989  node->prevchild = NULL;
990  node->name = name;
991  node->ident = NULL;
992  node->reset_cbs = NULL;
993 
994  /* OK to link node into context tree */
995  if (parent)
996  {
997  node->nextchild = parent->firstchild;
998  if (parent->firstchild != NULL)
999  parent->firstchild->prevchild = node;
1000  parent->firstchild = node;
1001  /* inherit allowInCritSection flag from parent */
1002  node->allowInCritSection = parent->allowInCritSection;
1003  }
1004  else
1005  {
1006  node->nextchild = NULL;
1007  node->allowInCritSection = false;
1008  }
1009 
1010  VALGRIND_CREATE_MEMPOOL(node, 0, false);
1011 }
volatile uint32 CritSectionCount
Definition: globals.c:42
static const MemoryContextMethods mcxt_methods[]
Definition: mcxt.c:45
#define VALGRIND_CREATE_MEMPOOL(context, redzones, zeroed)
Definition: memdebug.h:24
MemoryContext prevchild
Definition: memnodes.h:91
MemoryContext firstchild
Definition: memnodes.h:90
bool allowInCritSection
Definition: memnodes.h:86
const char * ident
Definition: memnodes.h:94
MemoryContext parent
Definition: memnodes.h:89
MemoryContextCallback * reset_cbs
Definition: memnodes.h:95
const MemoryContextMethods * methods
Definition: memnodes.h:88

References MemoryContextData::allowInCritSection, Assert(), CritSectionCount, MemoryContextData::firstchild, MemoryContextData::ident, MemoryContextData::isReset, mcxt_methods, MemoryContextData::mem_allocated, MemoryContextData::methods, name, MemoryContextData::name, MemoryContextData::nextchild, MemoryContextData::parent, MemoryContextData::prevchild, MemoryContextData::reset_cbs, and VALGRIND_CREATE_MEMPOOL.

Referenced by AllocSetContextCreateInternal(), GenerationContextCreate(), and SlabContextCreate().

◆ SlabAlloc()

void* SlabAlloc ( MemoryContext  context,
Size  size 
)

Definition at line 499 of file slab.c.

500 {
501  SlabContext *slab = (SlabContext *) context;
502  SlabBlock *block;
503  MemoryChunk *chunk;
504 
505  Assert(SlabIsValid(slab));
506 
507  /* sanity check that this is pointing to a valid blocklist */
508  Assert(slab->curBlocklistIndex >= 0);
510 
511  /* make sure we only allow correct request size */
512  if (unlikely(size != slab->chunkSize))
513  elog(ERROR, "unexpected alloc chunk size %zu (expected %u)",
514  size, slab->chunkSize);
515 
516  /*
517  * Handle the case when there are no partially filled blocks available.
518  * SlabFree() will have updated the curBlocklistIndex setting it to zero
519  * to indicate that it has freed the final block. Also later in
520  * SlabAlloc() we will set the curBlocklistIndex to zero if we end up
521  * filling the final block.
522  */
523  if (unlikely(slab->curBlocklistIndex == 0))
524  {
525  dlist_head *blocklist;
526  int blocklist_idx;
527 
528  /* to save allocating a new one, first check the empty blocks list */
529  if (dclist_count(&slab->emptyblocks) > 0)
530  {
532 
533  block = dlist_container(SlabBlock, node, node);
534 
535  /*
536  * SlabFree() should have left this block in a valid state with
537  * all chunks free. Ensure that's the case.
538  */
539  Assert(block->nfree == slab->chunksPerBlock);
540 
541  /* fetch the next chunk from this block */
542  chunk = SlabGetNextFreeChunk(slab, block);
543  }
544  else
545  {
546  block = (SlabBlock *) malloc(slab->blockSize);
547 
548  if (unlikely(block == NULL))
549  return NULL;
550 
551  block->slab = slab;
552  context->mem_allocated += slab->blockSize;
553 
554  /* use the first chunk in the new block */
555  chunk = SlabBlockGetChunk(slab, block, 0);
556 
557  block->nfree = slab->chunksPerBlock - 1;
558  block->unused = SlabBlockGetChunk(slab, block, 1);
559  block->freehead = NULL;
560  block->nunused = slab->chunksPerBlock - 1;
561  }
562 
563  /* find the blocklist element for storing blocks with 1 used chunk */
564  blocklist_idx = SlabBlocklistIndex(slab, block->nfree);
565  blocklist = &slab->blocklist[blocklist_idx];
566 
567  /* this better be empty. We just added a block thinking it was */
568  Assert(dlist_is_empty(blocklist));
569 
570  dlist_push_head(blocklist, &block->node);
571 
572  slab->curBlocklistIndex = blocklist_idx;
573  }
574  else
575  {
576  dlist_head *blocklist = &slab->blocklist[slab->curBlocklistIndex];
577  int new_blocklist_idx;
578 
579  Assert(!dlist_is_empty(blocklist));
580 
581  /* grab the block from the blocklist */
582  block = dlist_head_element(SlabBlock, node, blocklist);
583 
584  /* make sure we actually got a valid block, with matching nfree */
585  Assert(block != NULL);
586  Assert(slab->curBlocklistIndex == SlabBlocklistIndex(slab, block->nfree));
587  Assert(block->nfree > 0);
588 
589  /* fetch the next chunk from this block */
590  chunk = SlabGetNextFreeChunk(slab, block);
591 
592  /* get the new blocklist index based on the new free chunk count */
593  new_blocklist_idx = SlabBlocklistIndex(slab, block->nfree);
594 
595  /*
596  * Handle the case where the blocklist index changes. This also deals
597  * with blocks becoming full as only full blocks go at index 0.
598  */
599  if (unlikely(slab->curBlocklistIndex != new_blocklist_idx))
600  {
601  dlist_delete_from(blocklist, &block->node);
602  dlist_push_head(&slab->blocklist[new_blocklist_idx], &block->node);
603 
604  if (dlist_is_empty(blocklist))
606  }
607  }
608 
609  /*
610  * Check that the chunk pointer is actually somewhere on the block and is
611  * aligned as expected.
612  */
613  Assert(chunk >= SlabBlockGetChunk(slab, block, 0));
614  Assert(chunk <= SlabBlockGetChunk(slab, block, slab->chunksPerBlock - 1));
615  Assert(SlabChunkMod(slab, block, chunk) == 0);
616 
617  /* Prepare to initialize the chunk header. */
619 
620  MemoryChunkSetHdrMask(chunk, block, MAXALIGN(slab->chunkSize),
621  MCTX_SLAB_ID);
622 #ifdef MEMORY_CONTEXT_CHECKING
623  /* slab mark to catch clobber of "unused" space */
624  Assert(slab->chunkSize < (slab->fullChunkSize - Slab_CHUNKHDRSZ));
625  set_sentinel(MemoryChunkGetPointer(chunk), size);
626  VALGRIND_MAKE_MEM_NOACCESS(((char *) chunk) +
627  Slab_CHUNKHDRSZ + slab->chunkSize,
628  slab->fullChunkSize -
629  (slab->chunkSize + Slab_CHUNKHDRSZ));
630 #endif
631 
632 #ifdef RANDOMIZE_ALLOCATED_MEMORY
633  /* fill the allocated space with junk */
634  randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
635 #endif
636 
637  /* Disallow access to the chunk header. */
639 
640  return MemoryChunkGetPointer(chunk);
641 }
#define unlikely(x)
Definition: c.h:300
static void dlist_delete_from(dlist_head *head, dlist_node *node)
Definition: ilist.h:429
#define dlist_head_element(type, membername, lhead)
Definition: ilist.h:603
static uint32 dclist_count(const dclist_head *head)
Definition: ilist.h:932
static dlist_node * dclist_pop_head_node(dclist_head *head)
Definition: ilist.h:789
#define SlabIsValid(set)
Definition: slab.c:196
#define Slab_CHUNKHDRSZ
Definition: slab.c:157
static int32 SlabBlocklistIndex(SlabContext *slab, int nfree)
Definition: slab.c:211
static int32 SlabFindNextBlockListIndex(SlabContext *slab)
Definition: slab.c:251
static MemoryChunk * SlabGetNextFreeChunk(SlabContext *slab, SlabBlock *block)
Definition: slab.c:271
#define SlabBlockGetChunk(slab, block, n)
Definition: slab.c:165
int32 nfree
Definition: slab.c:149
MemoryChunk * freehead
Definition: slab.c:151
MemoryChunk * unused
Definition: slab.c:152
SlabContext * slab
Definition: slab.c:148
dlist_node node
Definition: slab.c:153
int32 nunused
Definition: slab.c:150
dlist_head blocklist[SLAB_BLOCKLIST_COUNT]
Definition: slab.c:129
int32 chunksPerBlock
Definition: slab.c:110
uint32 fullChunkSize
Definition: slab.c:108
uint32 blockSize
Definition: slab.c:109
int32 curBlocklistIndex
Definition: slab.c:111
uint32 chunkSize
Definition: slab.c:107
dclist_head emptyblocks
Definition: slab.c:120

References Assert(), SlabContext::blocklist, SlabContext::blockSize, SlabContext::chunkSize, SlabContext::chunksPerBlock, SlabContext::curBlocklistIndex, dclist_count(), dclist_pop_head_node(), dlist_container, dlist_delete_from(), dlist_head_element, dlist_is_empty(), dlist_push_head(), elog(), SlabContext::emptyblocks, ERROR, SlabBlock::freehead, SlabContext::fullChunkSize, malloc, MAXALIGN, MCTX_SLAB_ID, MemoryContextData::mem_allocated, MemoryChunkGetPointer, MemoryChunkSetHdrMask(), SlabBlock::nfree, SlabBlock::node, SlabBlock::nunused, SlabBlock::slab, Slab_CHUNKHDRSZ, SlabBlockGetChunk, SlabBlocklistIndex(), SlabFindNextBlockListIndex(), SlabGetNextFreeChunk(), SlabIsValid, unlikely, SlabBlock::unused, VALGRIND_MAKE_MEM_NOACCESS, and VALGRIND_MAKE_MEM_UNDEFINED.

◆ SlabDelete()

void SlabDelete ( MemoryContext  context)

Definition at line 485 of file slab.c.

486 {
487  /* Reset to release all the SlabBlocks */
488  SlabReset(context);
489  /* And free the context header */
490  free(context);
491 }
void SlabReset(MemoryContext context)
Definition: slab.c:431

References free, and SlabReset().

◆ SlabFree()

void SlabFree ( void *  pointer)

Definition at line 648 of file slab.c.

649 {
650  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
651  SlabBlock *block;
652  SlabContext *slab;
653  int curBlocklistIdx;
654  int newBlocklistIdx;
655 
656  /* Allow access to the chunk header. */
658 
659  block = MemoryChunkGetBlock(chunk);
660 
661  /*
662  * For speed reasons we just Assert that the referenced block is good.
663  * Future field experience may show that this Assert had better become a
664  * regular runtime test-and-elog check.
665  */
666  Assert(SlabBlockIsValid(block));
667  slab = block->slab;
668 
669 #ifdef MEMORY_CONTEXT_CHECKING
670  /* Test for someone scribbling on unused space in chunk */
671  Assert(slab->chunkSize < (slab->fullChunkSize - Slab_CHUNKHDRSZ));
672  if (!sentinel_ok(pointer, slab->chunkSize))
673  elog(WARNING, "detected write past chunk end in %s %p",
674  slab->header.name, chunk);
675 #endif
676 
677  /* push this chunk onto the head of the block's free list */
678  *(MemoryChunk **) pointer = block->freehead;
679  block->freehead = chunk;
680 
681  block->nfree++;
682 
683  Assert(block->nfree > 0);
684  Assert(block->nfree <= slab->chunksPerBlock);
685 
686 #ifdef CLOBBER_FREED_MEMORY
687  /* don't wipe the free list MemoryChunk pointer stored in the chunk */
688  wipe_mem((char *) pointer + sizeof(MemoryChunk *),
689  slab->chunkSize - sizeof(MemoryChunk *));
690 #endif
691 
692  curBlocklistIdx = SlabBlocklistIndex(slab, block->nfree - 1);
693  newBlocklistIdx = SlabBlocklistIndex(slab, block->nfree);
694 
695  /*
696  * Check if the block needs to be moved to another element on the
697  * blocklist based on it now having 1 more free chunk.
698  */
699  if (unlikely(curBlocklistIdx != newBlocklistIdx))
700  {
701  /* do the move */
702  dlist_delete_from(&slab->blocklist[curBlocklistIdx], &block->node);
703  dlist_push_head(&slab->blocklist[newBlocklistIdx], &block->node);
704 
705  /*
706  * The blocklist[curBlocklistIdx] may now be empty or we may now be
707  * able to use a lower-element blocklist. We'll need to redetermine
708  * what the slab->curBlocklistIndex is if the current blocklist was
709  * changed or if a lower element one was changed. We must ensure we
710  * use the list with the fullest block(s).
711  */
712  if (slab->curBlocklistIndex >= curBlocklistIdx)
713  {
715 
716  /*
717  * We know there must be a block with at least 1 unused chunk as
718  * we just pfree'd one. Ensure curBlocklistIndex reflects this.
719  */
720  Assert(slab->curBlocklistIndex > 0);
721  }
722  }
723 
724  /* Handle when a block becomes completely empty */
725  if (unlikely(block->nfree == slab->chunksPerBlock))
726  {
727  /* remove the block */
728  dlist_delete_from(&slab->blocklist[newBlocklistIdx], &block->node);
729 
730  /*
731  * To avoid thrashing malloc/free, we keep a list of empty blocks that
732  * we can reuse again instead of having to malloc a new one.
733  */
735  dclist_push_head(&slab->emptyblocks, &block->node);
736  else
737  {
738  /*
739  * When we have enough empty blocks stored already, we actually
740  * free the block.
741  */
742 #ifdef CLOBBER_FREED_MEMORY
743  wipe_mem(block, slab->blockSize);
744 #endif
745  free(block);
746  slab->header.mem_allocated -= slab->blockSize;
747  }
748 
749  /*
750  * Check if we need to reset the blocklist index. This is required
751  * when the blocklist this block is on has become completely empty.
752  */
753  if (slab->curBlocklistIndex == newBlocklistIdx &&
754  dlist_is_empty(&slab->blocklist[newBlocklistIdx]))
756  }
757 }
static void dclist_push_head(dclist_head *head, dlist_node *node)
Definition: ilist.h:693
#define SlabBlockIsValid(block)
Definition: slab.c:202
#define SLAB_MAXIMUM_EMPTY_BLOCKS
Definition: slab.c:98
MemoryContextData header
Definition: slab.c:105

References Assert(), SlabContext::blocklist, SlabContext::blockSize, SlabContext::chunkSize, SlabContext::chunksPerBlock, SlabContext::curBlocklistIndex, dclist_count(), dclist_push_head(), dlist_delete_from(), dlist_is_empty(), dlist_push_head(), elog(), SlabContext::emptyblocks, free, SlabBlock::freehead, SlabContext::fullChunkSize, SlabContext::header, MemoryContextData::mem_allocated, MemoryChunkGetBlock(), MemoryContextData::name, SlabBlock::nfree, SlabBlock::node, PointerGetMemoryChunk, SlabBlock::slab, Slab_CHUNKHDRSZ, SLAB_MAXIMUM_EMPTY_BLOCKS, SlabBlockIsValid, SlabBlocklistIndex(), SlabFindNextBlockListIndex(), unlikely, VALGRIND_MAKE_MEM_DEFINED, and WARNING.

◆ SlabGetChunkContext()

MemoryContext SlabGetChunkContext ( void *  pointer)

Definition at line 810 of file slab.c.

811 {
812  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
813  SlabBlock *block;
814 
815  /* Allow access to the chunk header. */
817 
818  block = MemoryChunkGetBlock(chunk);
819 
820  /* Disallow access to the chunk header. */
822 
823  Assert(SlabBlockIsValid(block));
824 
825  return &block->slab->header;
826 }

References Assert(), SlabContext::header, MemoryChunkGetBlock(), PointerGetMemoryChunk, SlabBlock::slab, Slab_CHUNKHDRSZ, SlabBlockIsValid, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ SlabGetChunkSpace()

Size SlabGetChunkSpace ( void *  pointer)

Definition at line 834 of file slab.c.

835 {
836  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
837  SlabBlock *block;
838  SlabContext *slab;
839 
840  /* Allow access to the chunk header. */
842 
843  block = MemoryChunkGetBlock(chunk);
844 
845  /* Disallow access to the chunk header. */
847 
848  Assert(SlabBlockIsValid(block));
849  slab = block->slab;
850 
851  return slab->fullChunkSize;
852 }

References Assert(), SlabContext::fullChunkSize, MemoryChunkGetBlock(), PointerGetMemoryChunk, SlabBlock::slab, Slab_CHUNKHDRSZ, SlabBlockIsValid, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ SlabIsEmpty()

bool SlabIsEmpty ( MemoryContext  context)

Definition at line 859 of file slab.c.

860 {
861  Assert(SlabIsValid((SlabContext *) context));
862 
863  return (context->mem_allocated == 0);
864 }

References Assert(), MemoryContextData::mem_allocated, and SlabIsValid.

◆ SlabRealloc()

void* SlabRealloc ( void *  pointer,
Size  size 
)

Definition at line 773 of file slab.c.

774 {
775  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
776  SlabBlock *block;
777  SlabContext *slab;
778 
779  /* Allow access to the chunk header. */
781 
782  block = MemoryChunkGetBlock(chunk);
783 
784  /* Disallow access to the chunk header. */
786 
787  /*
788  * Try to verify that we have a sane block pointer: the block header
789  * should reference a slab context. (We use a test-and-elog, not just
790  * Assert, because it seems highly likely that we're here in error in the
791  * first place.)
792  */
793  if (!SlabBlockIsValid(block))
794  elog(ERROR, "could not find block containing chunk %p", chunk);
795  slab = block->slab;
796 
797  /* can't do actual realloc with slab, but let's try to be gentle */
798  if (size == slab->chunkSize)
799  return pointer;
800 
801  elog(ERROR, "slab allocator does not support realloc()");
802  return NULL; /* keep compiler quiet */
803 }

References SlabContext::chunkSize, elog(), ERROR, MemoryChunkGetBlock(), PointerGetMemoryChunk, SlabBlock::slab, Slab_CHUNKHDRSZ, SlabBlockIsValid, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ SlabReset()

void SlabReset ( MemoryContext  context)

Definition at line 431 of file slab.c.

432 {
433  SlabContext *slab = (SlabContext *) context;
434  dlist_mutable_iter miter;
435  int i;
436 
437  Assert(SlabIsValid(slab));
438 
439 #ifdef MEMORY_CONTEXT_CHECKING
440  /* Check for corruption and leaks before freeing */
441  SlabCheck(context);
442 #endif
443 
444  /* release any retained empty blocks */
445  dclist_foreach_modify(miter, &slab->emptyblocks)
446  {
447  SlabBlock *block = dlist_container(SlabBlock, node, miter.cur);
448 
449  dclist_delete_from(&slab->emptyblocks, miter.cur);
450 
451 #ifdef CLOBBER_FREED_MEMORY
452  wipe_mem(block, slab->blockSize);
453 #endif
454  free(block);
455  context->mem_allocated -= slab->blockSize;
456  }
457 
458  /* walk over blocklist and free the blocks */
459  for (i = 0; i < SLAB_BLOCKLIST_COUNT; i++)
460  {
461  dlist_foreach_modify(miter, &slab->blocklist[i])
462  {
463  SlabBlock *block = dlist_container(SlabBlock, node, miter.cur);
464 
465  dlist_delete(miter.cur);
466 
467 #ifdef CLOBBER_FREED_MEMORY
468  wipe_mem(block, slab->blockSize);
469 #endif
470  free(block);
471  context->mem_allocated -= slab->blockSize;
472  }
473  }
474 
475  slab->curBlocklistIndex = 0;
476 
477  Assert(context->mem_allocated == 0);
478 }
static void dclist_delete_from(dclist_head *head, dlist_node *node)
Definition: ilist.h:763
#define dclist_foreach_modify(iter, lhead)
Definition: ilist.h:973
int i
Definition: isn.c:73
#define SLAB_BLOCKLIST_COUNT
Definition: slab.c:95

References Assert(), SlabContext::blocklist, SlabContext::blockSize, dlist_mutable_iter::cur, SlabContext::curBlocklistIndex, dclist_delete_from(), dclist_foreach_modify, dlist_container, dlist_delete(), dlist_foreach_modify, SlabContext::emptyblocks, free, i, MemoryContextData::mem_allocated, SLAB_BLOCKLIST_COUNT, and SlabIsValid.

Referenced by SlabDelete().

◆ SlabStats()

void SlabStats ( MemoryContext  context,
MemoryStatsPrintFunc  printfunc,
void *  passthru,
MemoryContextCounters totals,
bool  print_to_stderr 
)

Definition at line 876 of file slab.c.

880 {
881  SlabContext *slab = (SlabContext *) context;
882  Size nblocks = 0;
883  Size freechunks = 0;
884  Size totalspace;
885  Size freespace = 0;
886  int i;
887 
888  Assert(SlabIsValid(slab));
889 
890  /* Include context header in totalspace */
891  totalspace = Slab_CONTEXT_HDRSZ(slab->chunksPerBlock);
892 
893  /* Add the space consumed by blocks in the emptyblocks list */
894  totalspace += dclist_count(&slab->emptyblocks) * slab->blockSize;
895 
896  for (i = 0; i < SLAB_BLOCKLIST_COUNT; i++)
897  {
898  dlist_iter iter;
899 
900  dlist_foreach(iter, &slab->blocklist[i])
901  {
902  SlabBlock *block = dlist_container(SlabBlock, node, iter.cur);
903 
904  nblocks++;
905  totalspace += slab->blockSize;
906  freespace += slab->fullChunkSize * block->nfree;
907  freechunks += block->nfree;
908  }
909  }
910 
911  if (printfunc)
912  {
913  char stats_string[200];
914 
915  /* XXX should we include free chunks on empty blocks? */
916  snprintf(stats_string, sizeof(stats_string),
917  "%zu total in %zu blocks; %u empty blocks; %zu free (%zu chunks); %zu used",
918  totalspace, nblocks, dclist_count(&slab->emptyblocks),
919  freespace, freechunks, totalspace - freespace);
920  printfunc(context, passthru, stats_string, print_to_stderr);
921  }
922 
923  if (totals)
924  {
925  totals->nblocks += nblocks;
926  totals->freechunks += freechunks;
927  totals->totalspace += totalspace;
928  totals->freespace += freespace;
929  }
930 }
#define Slab_CONTEXT_HDRSZ(chunksPerBlock)
Definition: slab.c:88

References Assert(), SlabContext::blocklist, SlabContext::blockSize, SlabContext::chunksPerBlock, dlist_iter::cur, dclist_count(), dlist_container, dlist_foreach, SlabContext::emptyblocks, MemoryContextCounters::freechunks, MemoryContextCounters::freespace, SlabContext::fullChunkSize, i, MemoryContextCounters::nblocks, SlabBlock::nfree, SLAB_BLOCKLIST_COUNT, Slab_CONTEXT_HDRSZ, SlabIsValid, snprintf, and MemoryContextCounters::totalspace.