PostgreSQL Source Code  git master
memutils_internal.h File Reference
#include "utils/memutils.h"
Include dependency graph for memutils_internal.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Macros

#define PallocAlignedExtraBytes(alignto)    ((alignto) + (sizeof(MemoryChunk) - MAXIMUM_ALIGNOF))
 
#define MEMORY_CONTEXT_METHODID_BITS   3
 
#define MEMORY_CONTEXT_METHODID_MASK    ((((uint64) 1) << MEMORY_CONTEXT_METHODID_BITS) - 1)
 

Typedefs

typedef enum MemoryContextMethodID MemoryContextMethodID
 

Enumerations

enum  MemoryContextMethodID {
  MCTX_UNUSED1_ID , MCTX_UNUSED2_ID , MCTX_UNUSED3_ID , MCTX_ASET_ID ,
  MCTX_GENERATION_ID , MCTX_SLAB_ID , MCTX_ALIGNED_REDIRECT_ID , MCTX_UNUSED4_ID
}
 

Functions

void * AllocSetAlloc (MemoryContext context, Size size)
 
void AllocSetFree (void *pointer)
 
void * AllocSetRealloc (void *pointer, Size size)
 
void AllocSetReset (MemoryContext context)
 
void AllocSetDelete (MemoryContext context)
 
MemoryContext AllocSetGetChunkContext (void *pointer)
 
Size AllocSetGetChunkSpace (void *pointer)
 
bool AllocSetIsEmpty (MemoryContext context)
 
void AllocSetStats (MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
 
void * GenerationAlloc (MemoryContext context, Size size)
 
void GenerationFree (void *pointer)
 
void * GenerationRealloc (void *pointer, Size size)
 
void GenerationReset (MemoryContext context)
 
void GenerationDelete (MemoryContext context)
 
MemoryContext GenerationGetChunkContext (void *pointer)
 
Size GenerationGetChunkSpace (void *pointer)
 
bool GenerationIsEmpty (MemoryContext context)
 
void GenerationStats (MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
 
void * SlabAlloc (MemoryContext context, Size size)
 
void SlabFree (void *pointer)
 
void * SlabRealloc (void *pointer, Size size)
 
void SlabReset (MemoryContext context)
 
void SlabDelete (MemoryContext context)
 
MemoryContext SlabGetChunkContext (void *pointer)
 
Size SlabGetChunkSpace (void *pointer)
 
bool SlabIsEmpty (MemoryContext context)
 
void SlabStats (MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
 
void AlignedAllocFree (void *pointer)
 
void * AlignedAllocRealloc (void *pointer, Size size)
 
MemoryContext AlignedAllocGetChunkContext (void *pointer)
 
Size AlignedAllocGetChunkSpace (void *pointer)
 
void MemoryContextCreate (MemoryContext node, NodeTag tag, MemoryContextMethodID method_id, MemoryContext parent, const char *name)
 

Macro Definition Documentation

◆ MEMORY_CONTEXT_METHODID_BITS

#define MEMORY_CONTEXT_METHODID_BITS   3

Definition at line 121 of file memutils_internal.h.

◆ MEMORY_CONTEXT_METHODID_MASK

#define MEMORY_CONTEXT_METHODID_MASK    ((((uint64) 1) << MEMORY_CONTEXT_METHODID_BITS) - 1)

Definition at line 122 of file memutils_internal.h.

◆ PallocAlignedExtraBytes

#define PallocAlignedExtraBytes (   alignto)     ((alignto) + (sizeof(MemoryChunk) - MAXIMUM_ALIGNOF))

Definition at line 88 of file memutils_internal.h.

Typedef Documentation

◆ MemoryContextMethodID

Enumeration Type Documentation

◆ MemoryContextMethodID

Enumerator
MCTX_UNUSED1_ID 
MCTX_UNUSED2_ID 
MCTX_UNUSED3_ID 
MCTX_ASET_ID 
MCTX_GENERATION_ID 
MCTX_SLAB_ID 
MCTX_ALIGNED_REDIRECT_ID 
MCTX_UNUSED4_ID 

Definition at line 105 of file memutils_internal.h.

106 {
107  MCTX_UNUSED1_ID, /* 000 occurs in never-used memory */
108  MCTX_UNUSED2_ID, /* glibc malloc'd chunks usually match 001 */
109  MCTX_UNUSED3_ID, /* glibc malloc'd chunks > 128kB match 010 */
110  MCTX_ASET_ID,
112  MCTX_SLAB_ID,
114  MCTX_UNUSED4_ID /* 111 occurs in wipe_mem'd memory */
MemoryContextMethodID
@ MCTX_GENERATION_ID
@ MCTX_UNUSED4_ID
@ MCTX_UNUSED3_ID
@ MCTX_UNUSED1_ID
@ MCTX_UNUSED2_ID
@ MCTX_SLAB_ID
@ MCTX_ASET_ID
@ MCTX_ALIGNED_REDIRECT_ID

Function Documentation

◆ AlignedAllocFree()

void AlignedAllocFree ( void *  pointer)

Definition at line 29 of file alignedalloc.c.

30 {
31  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
32  void *unaligned;
33 
34  VALGRIND_MAKE_MEM_DEFINED(chunk, sizeof(MemoryChunk));
35 
37 
38  /* obtain the original (unaligned) allocated pointer */
39  unaligned = MemoryChunkGetBlock(chunk);
40 
41 #ifdef MEMORY_CONTEXT_CHECKING
42  /* Test for someone scribbling on unused space in chunk */
43  if (!sentinel_ok(pointer, chunk->requested_size))
44  elog(WARNING, "detected write past chunk end in %s %p",
45  GetMemoryChunkContext(unaligned)->name, chunk);
46 #endif
47 
48  pfree(unaligned);
49 }
#define WARNING
Definition: elog.h:36
const char * name
Definition: encode.c:571
Assert(fmt[strlen(fmt) - 1] !='\n')
void pfree(void *pointer)
Definition: mcxt.c:1456
MemoryContext GetMemoryChunkContext(void *pointer)
Definition: mcxt.c:616
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
static bool MemoryChunkIsExternal(MemoryChunk *chunk)
static void * MemoryChunkGetBlock(MemoryChunk *chunk)
#define PointerGetMemoryChunk(p)

References Assert(), elog(), GetMemoryChunkContext(), MemoryChunkGetBlock(), MemoryChunkIsExternal(), name, pfree(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and WARNING.

◆ AlignedAllocGetChunkContext()

MemoryContext AlignedAllocGetChunkContext ( void *  pointer)

Definition at line 118 of file alignedalloc.c.

119 {
120  MemoryChunk *redirchunk = PointerGetMemoryChunk(pointer);
121  MemoryContext cxt;
122 
123  VALGRIND_MAKE_MEM_DEFINED(redirchunk, sizeof(MemoryChunk));
124 
125  Assert(!MemoryChunkIsExternal(redirchunk));
126 
127  cxt = GetMemoryChunkContext(MemoryChunkGetBlock(redirchunk));
128 
129  VALGRIND_MAKE_MEM_NOACCESS(redirchunk, sizeof(MemoryChunk));
130 
131  return cxt;
132 }
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition: memdebug.h:27

References Assert(), GetMemoryChunkContext(), MemoryChunkGetBlock(), MemoryChunkIsExternal(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ AlignedAllocGetChunkSpace()

Size AlignedAllocGetChunkSpace ( void *  pointer)

Definition at line 140 of file alignedalloc.c.

141 {
142  MemoryChunk *redirchunk = PointerGetMemoryChunk(pointer);
143  void *unaligned;
144  Size space;
145 
146  VALGRIND_MAKE_MEM_DEFINED(redirchunk, sizeof(MemoryChunk));
147 
148  unaligned = MemoryChunkGetBlock(redirchunk);
149  space = GetMemoryChunkSpace(unaligned);
150 
151  VALGRIND_MAKE_MEM_NOACCESS(redirchunk, sizeof(MemoryChunk));
152 
153  return space;
154 }
size_t Size
Definition: c.h:589
Size GetMemoryChunkSpace(void *pointer)
Definition: mcxt.c:630

References GetMemoryChunkSpace(), MemoryChunkGetBlock(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ AlignedAllocRealloc()

void* AlignedAllocRealloc ( void *  pointer,
Size  size 
)

Definition at line 60 of file alignedalloc.c.

61 {
62  MemoryChunk *redirchunk = PointerGetMemoryChunk(pointer);
63  Size alignto;
64  void *unaligned;
65  MemoryContext ctx;
66  Size old_size;
67  void *newptr;
68 
69  VALGRIND_MAKE_MEM_DEFINED(redirchunk, sizeof(MemoryChunk));
70 
71  alignto = MemoryChunkGetValue(redirchunk);
72  unaligned = MemoryChunkGetBlock(redirchunk);
73 
74  /* sanity check this is a power of 2 value */
75  Assert((alignto & (alignto - 1)) == 0);
76 
77  /*
78  * Determine the size of the original allocation. We can't determine this
79  * exactly as GetMemoryChunkSpace() returns the total space used for the
80  * allocation, which for contexts like aset includes rounding up to the
81  * next power of 2. However, this value is just used to memcpy() the old
82  * data into the new allocation, so we only need to concern ourselves with
83  * not reading beyond the end of the original allocation's memory. The
84  * drawback here is that we may copy more bytes than we need to, which
85  * only amounts to wasted effort. We can safely subtract the extra bytes
86  * that we requested to allow us to align the pointer. We must also
87  * subtract the space for the unaligned pointer's MemoryChunk since
88  * GetMemoryChunkSpace should have included that. This does assume that
89  * all context types use MemoryChunk as a chunk header.
90  */
91  old_size = GetMemoryChunkSpace(unaligned) -
92  PallocAlignedExtraBytes(alignto) - sizeof(MemoryChunk);
93 
94 #ifdef MEMORY_CONTEXT_CHECKING
95  /* check that GetMemoryChunkSpace returned something realistic */
96  Assert(old_size >= redirchunk->requested_size);
97 #endif
98 
99  ctx = GetMemoryChunkContext(unaligned);
100  newptr = MemoryContextAllocAligned(ctx, size, alignto, 0);
101 
102  /*
103  * We may memcpy beyond the end of the original allocation request size,
104  * so we must mark the entire allocation as defined.
105  */
106  VALGRIND_MAKE_MEM_DEFINED(pointer, old_size);
107  memcpy(newptr, pointer, Min(size, old_size));
108  pfree(unaligned);
109 
110  return newptr;
111 }
#define Min(x, y)
Definition: c.h:988
void * MemoryContextAllocAligned(MemoryContext context, Size size, Size alignto, int flags)
Definition: mcxt.c:1344
#define PallocAlignedExtraBytes(alignto)
static Size MemoryChunkGetValue(MemoryChunk *chunk)
struct MemoryChunk MemoryChunk

References Assert(), GetMemoryChunkContext(), GetMemoryChunkSpace(), MemoryChunkGetBlock(), MemoryChunkGetValue(), MemoryContextAllocAligned(), Min, PallocAlignedExtraBytes, pfree(), PointerGetMemoryChunk, and VALGRIND_MAKE_MEM_DEFINED.

◆ AllocSetAlloc()

void* AllocSetAlloc ( MemoryContext  context,
Size  size 
)

Definition at line 699 of file aset.c.

700 {
701  AllocSet set = (AllocSet) context;
702  AllocBlock block;
703  MemoryChunk *chunk;
704  int fidx;
705  Size chunk_size;
706  Size blksize;
707 
708  Assert(AllocSetIsValid(set));
709 
710  /*
711  * If requested size exceeds maximum for chunks, allocate an entire block
712  * for this request.
713  */
714  if (size > set->allocChunkLimit)
715  {
716 #ifdef MEMORY_CONTEXT_CHECKING
717  /* ensure there's always space for the sentinel byte */
718  chunk_size = MAXALIGN(size + 1);
719 #else
720  chunk_size = MAXALIGN(size);
721 #endif
722 
723  blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
724  block = (AllocBlock) malloc(blksize);
725  if (block == NULL)
726  return NULL;
727 
728  context->mem_allocated += blksize;
729 
730  block->aset = set;
731  block->freeptr = block->endptr = ((char *) block) + blksize;
732 
733  chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
734 
735  /* mark the MemoryChunk as externally managed */
737 
738 #ifdef MEMORY_CONTEXT_CHECKING
739  chunk->requested_size = size;
740  /* set mark to catch clobber of "unused" space */
741  Assert(size < chunk_size);
742  set_sentinel(MemoryChunkGetPointer(chunk), size);
743 #endif
744 #ifdef RANDOMIZE_ALLOCATED_MEMORY
745  /* fill the allocated space with junk */
746  randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
747 #endif
748 
749  /*
750  * Stick the new block underneath the active allocation block, if any,
751  * so that we don't lose the use of the space remaining therein.
752  */
753  if (set->blocks != NULL)
754  {
755  block->prev = set->blocks;
756  block->next = set->blocks->next;
757  if (block->next)
758  block->next->prev = block;
759  set->blocks->next = block;
760  }
761  else
762  {
763  block->prev = NULL;
764  block->next = NULL;
765  set->blocks = block;
766  }
767 
768  /* Ensure any padding bytes are marked NOACCESS. */
769  VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
770  chunk_size - size);
771 
772  /* Disallow access to the chunk header. */
774 
775  return MemoryChunkGetPointer(chunk);
776  }
777 
778  /*
779  * Request is small enough to be treated as a chunk. Look in the
780  * corresponding free list to see if there is a free chunk we could reuse.
781  * If one is found, remove it from the free list, make it again a member
782  * of the alloc set and return its data address.
783  *
784  * Note that we don't attempt to ensure there's space for the sentinel
785  * byte here. We expect a large proportion of allocations to be for sizes
786  * which are already a power of 2. If we were to always make space for a
787  * sentinel byte in MEMORY_CONTEXT_CHECKING builds, then we'd end up
788  * doubling the memory requirements for such allocations.
789  */
790  fidx = AllocSetFreeIndex(size);
791  chunk = set->freelist[fidx];
792  if (chunk != NULL)
793  {
795 
796  /* Allow access to the chunk header. */
798 
799  Assert(fidx == MemoryChunkGetValue(chunk));
800 
801  /* pop this chunk off the freelist */
803  set->freelist[fidx] = link->next;
805 
806 #ifdef MEMORY_CONTEXT_CHECKING
807  chunk->requested_size = size;
808  /* set mark to catch clobber of "unused" space */
809  if (size < GetChunkSizeFromFreeListIdx(fidx))
810  set_sentinel(MemoryChunkGetPointer(chunk), size);
811 #endif
812 #ifdef RANDOMIZE_ALLOCATED_MEMORY
813  /* fill the allocated space with junk */
814  randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
815 #endif
816 
817  /* Ensure any padding bytes are marked NOACCESS. */
818  VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
819  GetChunkSizeFromFreeListIdx(fidx) - size);
820 
821  /* Disallow access to the chunk header. */
823 
824  return MemoryChunkGetPointer(chunk);
825  }
826 
827  /*
828  * Choose the actual chunk size to allocate.
829  */
830  chunk_size = GetChunkSizeFromFreeListIdx(fidx);
831  Assert(chunk_size >= size);
832 
833  /*
834  * If there is enough room in the active allocation block, we will put the
835  * chunk into that block. Else must start a new one.
836  */
837  if ((block = set->blocks) != NULL)
838  {
839  Size availspace = block->endptr - block->freeptr;
840 
841  if (availspace < (chunk_size + ALLOC_CHUNKHDRSZ))
842  {
843  /*
844  * The existing active (top) block does not have enough room for
845  * the requested allocation, but it might still have a useful
846  * amount of space in it. Once we push it down in the block list,
847  * we'll never try to allocate more space from it. So, before we
848  * do that, carve up its free space into chunks that we can put on
849  * the set's freelists.
850  *
851  * Because we can only get here when there's less than
852  * ALLOC_CHUNK_LIMIT left in the block, this loop cannot iterate
853  * more than ALLOCSET_NUM_FREELISTS-1 times.
854  */
855  while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
856  {
858  Size availchunk = availspace - ALLOC_CHUNKHDRSZ;
859  int a_fidx = AllocSetFreeIndex(availchunk);
860 
861  /*
862  * In most cases, we'll get back the index of the next larger
863  * freelist than the one we need to put this chunk on. The
864  * exception is when availchunk is exactly a power of 2.
865  */
866  if (availchunk != GetChunkSizeFromFreeListIdx(a_fidx))
867  {
868  a_fidx--;
869  Assert(a_fidx >= 0);
870  availchunk = GetChunkSizeFromFreeListIdx(a_fidx);
871  }
872 
873  chunk = (MemoryChunk *) (block->freeptr);
874 
875  /* Prepare to initialize the chunk header. */
877  block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
878  availspace -= (availchunk + ALLOC_CHUNKHDRSZ);
879 
880  /* store the freelist index in the value field */
881  MemoryChunkSetHdrMask(chunk, block, a_fidx, MCTX_ASET_ID);
882 #ifdef MEMORY_CONTEXT_CHECKING
883  chunk->requested_size = InvalidAllocSize; /* mark it free */
884 #endif
885  /* push this chunk onto the free list */
886  link = GetFreeListLink(chunk);
887 
889  link->next = set->freelist[a_fidx];
891 
892  set->freelist[a_fidx] = chunk;
893  }
894  /* Mark that we need to create a new block */
895  block = NULL;
896  }
897  }
898 
899  /*
900  * Time to create a new regular (multi-chunk) block?
901  */
902  if (block == NULL)
903  {
904  Size required_size;
905 
906  /*
907  * The first such block has size initBlockSize, and we double the
908  * space in each succeeding block, but not more than maxBlockSize.
909  */
910  blksize = set->nextBlockSize;
911  set->nextBlockSize <<= 1;
912  if (set->nextBlockSize > set->maxBlockSize)
913  set->nextBlockSize = set->maxBlockSize;
914 
915  /*
916  * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
917  * space... but try to keep it a power of 2.
918  */
919  required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
920  while (blksize < required_size)
921  blksize <<= 1;
922 
923  /* Try to allocate it */
924  block = (AllocBlock) malloc(blksize);
925 
926  /*
927  * We could be asking for pretty big blocks here, so cope if malloc
928  * fails. But give up if there's less than 1 MB or so available...
929  */
930  while (block == NULL && blksize > 1024 * 1024)
931  {
932  blksize >>= 1;
933  if (blksize < required_size)
934  break;
935  block = (AllocBlock) malloc(blksize);
936  }
937 
938  if (block == NULL)
939  return NULL;
940 
941  context->mem_allocated += blksize;
942 
943  block->aset = set;
944  block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
945  block->endptr = ((char *) block) + blksize;
946 
947  /* Mark unallocated space NOACCESS. */
949  blksize - ALLOC_BLOCKHDRSZ);
950 
951  block->prev = NULL;
952  block->next = set->blocks;
953  if (block->next)
954  block->next->prev = block;
955  set->blocks = block;
956  }
957 
958  /*
959  * OK, do the allocation
960  */
961  chunk = (MemoryChunk *) (block->freeptr);
962 
963  /* Prepare to initialize the chunk header. */
965 
966  block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
967  Assert(block->freeptr <= block->endptr);
968 
969  /* store the free list index in the value field */
970  MemoryChunkSetHdrMask(chunk, block, fidx, MCTX_ASET_ID);
971 
972 #ifdef MEMORY_CONTEXT_CHECKING
973  chunk->requested_size = size;
974  /* set mark to catch clobber of "unused" space */
975  if (size < chunk_size)
976  set_sentinel(MemoryChunkGetPointer(chunk), size);
977 #endif
978 #ifdef RANDOMIZE_ALLOCATED_MEMORY
979  /* fill the allocated space with junk */
980  randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
981 #endif
982 
983  /* Ensure any padding bytes are marked NOACCESS. */
984  VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
985  chunk_size - size);
986 
987  /* Disallow access to the chunk header. */
989 
990  return MemoryChunkGetPointer(chunk);
991 }
#define AllocSetIsValid(set)
Definition: aset.c:201
#define GetFreeListLink(chkptr)
Definition: aset.c:132
#define ALLOC_CHUNKHDRSZ
Definition: aset.c:105
#define GetChunkSizeFromFreeListIdx(fidx)
Definition: aset.c:140
#define ALLOC_MINBITS
Definition: aset.c:83
struct AllocBlockData * AllocBlock
Definition: aset.c:107
static int AllocSetFreeIndex(Size size)
Definition: aset.c:271
#define ALLOC_BLOCKHDRSZ
Definition: aset.c:104
AllocSetContext * AllocSet
Definition: aset.c:168
#define MAXALIGN(LEN)
Definition: c.h:795
#define malloc(a)
Definition: header.h:50
#define VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
Definition: memdebug.h:28
#define InvalidAllocSize
Definition: memutils.h:47
#define MemoryChunkGetPointer(c)
static void MemoryChunkSetHdrMaskExternal(MemoryChunk *chunk, MemoryContextMethodID methodid)
static void MemoryChunkSetHdrMask(MemoryChunk *chunk, void *block, Size value, MemoryContextMethodID methodid)
AllocBlock prev
Definition: aset.c:185
AllocSet aset
Definition: aset.c:184
char * freeptr
Definition: aset.c:187
AllocBlock next
Definition: aset.c:186
char * endptr
Definition: aset.c:188
AllocBlock blocks
Definition: aset.c:156
Size maxBlockSize
Definition: aset.c:160
MemoryChunk * freelist[ALLOCSET_NUM_FREELISTS]
Definition: aset.c:157
Size nextBlockSize
Definition: aset.c:161
Size allocChunkLimit
Definition: aset.c:162
Size mem_allocated
Definition: memnodes.h:87

References ALLOC_BLOCKHDRSZ, ALLOC_CHUNKHDRSZ, ALLOC_MINBITS, AllocSetContext::allocChunkLimit, AllocSetFreeIndex(), AllocSetIsValid, AllocBlockData::aset, Assert(), AllocSetContext::blocks, AllocBlockData::endptr, AllocSetContext::freelist, AllocBlockData::freeptr, GetChunkSizeFromFreeListIdx, GetFreeListLink, InvalidAllocSize, link(), malloc, MAXALIGN, AllocSetContext::maxBlockSize, MCTX_ASET_ID, MemoryContextData::mem_allocated, MemoryChunkGetPointer, MemoryChunkGetValue(), MemoryChunkSetHdrMask(), MemoryChunkSetHdrMaskExternal(), AllocBlockData::next, AllocSetContext::nextBlockSize, AllocBlockData::prev, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MAKE_MEM_NOACCESS, and VALGRIND_MAKE_MEM_UNDEFINED.

Referenced by AllocSetRealloc().

◆ AllocSetDelete()

void AllocSetDelete ( MemoryContext  context)

Definition at line 603 of file aset.c.

604 {
605  AllocSet set = (AllocSet) context;
606  AllocBlock block = set->blocks;
607  Size keepersize PG_USED_FOR_ASSERTS_ONLY;
608 
609  Assert(AllocSetIsValid(set));
610 
611 #ifdef MEMORY_CONTEXT_CHECKING
612  /* Check for corruption and leaks before freeing */
613  AllocSetCheck(context);
614 #endif
615 
616  /* Remember keeper block size for Assert below */
617  keepersize = set->keeper->endptr - ((char *) set);
618 
619  /*
620  * If the context is a candidate for a freelist, put it into that freelist
621  * instead of destroying it.
622  */
623  if (set->freeListIndex >= 0)
624  {
626 
627  /*
628  * Reset the context, if it needs it, so that we aren't hanging on to
629  * more than the initial malloc chunk.
630  */
631  if (!context->isReset)
632  MemoryContextResetOnly(context);
633 
634  /*
635  * If the freelist is full, just discard what's already in it. See
636  * comments with context_freelists[].
637  */
638  if (freelist->num_free >= MAX_FREE_CONTEXTS)
639  {
640  while (freelist->first_free != NULL)
641  {
642  AllocSetContext *oldset = freelist->first_free;
643 
644  freelist->first_free = (AllocSetContext *) oldset->header.nextchild;
645  freelist->num_free--;
646 
647  /* All that remains is to free the header/initial block */
648  free(oldset);
649  }
650  Assert(freelist->num_free == 0);
651  }
652 
653  /* Now add the just-deleted context to the freelist. */
654  set->header.nextchild = (MemoryContext) freelist->first_free;
655  freelist->first_free = set;
656  freelist->num_free++;
657 
658  return;
659  }
660 
661  /* Free all blocks, except the keeper which is part of context header */
662  while (block != NULL)
663  {
664  AllocBlock next = block->next;
665 
666  if (block != set->keeper)
667  context->mem_allocated -= block->endptr - ((char *) block);
668 
669 #ifdef CLOBBER_FREED_MEMORY
670  wipe_mem(block, block->freeptr - ((char *) block));
671 #endif
672 
673  if (block != set->keeper)
674  free(block);
675 
676  block = next;
677  }
678 
679  Assert(context->mem_allocated == keepersize);
680 
681  /* Finally, free the context header, including the keeper block */
682  free(set);
683 }
#define MAX_FREE_CONTEXTS
Definition: aset.c:242
static AllocSetFreeList context_freelists[2]
Definition: aset.c:251
static int32 next
Definition: blutils.c:219
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:166
#define free(a)
Definition: header.h:65
void MemoryContextResetOnly(MemoryContext context)
Definition: mcxt.c:349
struct MemoryContextData * MemoryContext
Definition: palloc.h:36
MemoryContextData header
Definition: aset.c:154
int freeListIndex
Definition: aset.c:165
AllocBlock keeper
Definition: aset.c:163
int num_free
Definition: aset.c:246
AllocSetContext * first_free
Definition: aset.c:247
MemoryContext nextchild
Definition: memnodes.h:92

References AllocSetIsValid, Assert(), AllocSetContext::blocks, context_freelists, AllocBlockData::endptr, AllocSetFreeList::first_free, free, AllocSetContext::freeListIndex, AllocBlockData::freeptr, AllocSetContext::header, MemoryContextData::isReset, AllocSetContext::keeper, MAX_FREE_CONTEXTS, MemoryContextData::mem_allocated, MemoryContextResetOnly(), next, AllocBlockData::next, MemoryContextData::nextchild, AllocSetFreeList::num_free, and PG_USED_FOR_ASSERTS_ONLY.

◆ AllocSetFree()

void AllocSetFree ( void *  pointer)

Definition at line 998 of file aset.c.

999 {
1000  AllocSet set;
1001  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1002 
1003  /* Allow access to the chunk header. */
1005 
1006  if (MemoryChunkIsExternal(chunk))
1007  {
1008  /* Release single-chunk block. */
1009  AllocBlock block = ExternalChunkGetBlock(chunk);
1010 
1011  /*
1012  * Try to verify that we have a sane block pointer: the block header
1013  * should reference an aset and the freeptr should match the endptr.
1014  */
1015  if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
1016  elog(ERROR, "could not find block containing chunk %p", chunk);
1017 
1018  set = block->aset;
1019 
1020 #ifdef MEMORY_CONTEXT_CHECKING
1021  {
1022  /* Test for someone scribbling on unused space in chunk */
1023  Assert(chunk->requested_size < (block->endptr - (char *) pointer));
1024  if (!sentinel_ok(pointer, chunk->requested_size))
1025  elog(WARNING, "detected write past chunk end in %s %p",
1026  set->header.name, chunk);
1027  }
1028 #endif
1029 
1030  /* OK, remove block from aset's list and free it */
1031  if (block->prev)
1032  block->prev->next = block->next;
1033  else
1034  set->blocks = block->next;
1035  if (block->next)
1036  block->next->prev = block->prev;
1037 
1038  set->header.mem_allocated -= block->endptr - ((char *) block);
1039 
1040 #ifdef CLOBBER_FREED_MEMORY
1041  wipe_mem(block, block->freeptr - ((char *) block));
1042 #endif
1043  free(block);
1044  }
1045  else
1046  {
1047  AllocBlock block = MemoryChunkGetBlock(chunk);
1048  int fidx;
1050 
1051  /*
1052  * In this path, for speed reasons we just Assert that the referenced
1053  * block is good. We can also Assert that the value field is sane.
1054  * Future field experience may show that these Asserts had better
1055  * become regular runtime test-and-elog checks.
1056  */
1057  Assert(AllocBlockIsValid(block));
1058  set = block->aset;
1059 
1060  fidx = MemoryChunkGetValue(chunk);
1061  Assert(FreeListIdxIsValid(fidx));
1062  link = GetFreeListLink(chunk);
1063 
1064 #ifdef MEMORY_CONTEXT_CHECKING
1065  /* Test for someone scribbling on unused space in chunk */
1066  if (chunk->requested_size < GetChunkSizeFromFreeListIdx(fidx))
1067  if (!sentinel_ok(pointer, chunk->requested_size))
1068  elog(WARNING, "detected write past chunk end in %s %p",
1069  set->header.name, chunk);
1070 #endif
1071 
1072 #ifdef CLOBBER_FREED_MEMORY
1073  wipe_mem(pointer, GetChunkSizeFromFreeListIdx(fidx));
1074 #endif
1075  /* push this chunk onto the top of the free list */
1077  link->next = set->freelist[fidx];
1079  set->freelist[fidx] = chunk;
1080 
1081 #ifdef MEMORY_CONTEXT_CHECKING
1082 
1083  /*
1084  * Reset requested_size to InvalidAllocSize in chunks that are on free
1085  * list.
1086  */
1087  chunk->requested_size = InvalidAllocSize;
1088 #endif
1089  }
1090 }
#define AllocBlockIsValid(block)
Definition: aset.c:208
#define FreeListIdxIsValid(fidx)
Definition: aset.c:136
#define ExternalChunkGetBlock(chunk)
Definition: aset.c:216
#define ERROR
Definition: elog.h:39
const char * name
Definition: memnodes.h:93

References ALLOC_CHUNKHDRSZ, AllocBlockIsValid, AllocBlockData::aset, Assert(), AllocSetContext::blocks, elog(), AllocBlockData::endptr, ERROR, ExternalChunkGetBlock, free, AllocSetContext::freelist, FreeListIdxIsValid, AllocBlockData::freeptr, GetChunkSizeFromFreeListIdx, GetFreeListLink, AllocSetContext::header, InvalidAllocSize, link(), MemoryContextData::mem_allocated, MemoryChunkGetBlock(), MemoryChunkGetValue(), MemoryChunkIsExternal(), MemoryContextData::name, AllocBlockData::next, PointerGetMemoryChunk, AllocBlockData::prev, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MAKE_MEM_NOACCESS, and WARNING.

Referenced by AllocSetRealloc().

◆ AllocSetGetChunkContext()

MemoryContext AllocSetGetChunkContext ( void *  pointer)

Definition at line 1366 of file aset.c.

1367 {
1368  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1369  AllocBlock block;
1370  AllocSet set;
1371 
1372  /* Allow access to the chunk header. */
1374 
1375  if (MemoryChunkIsExternal(chunk))
1376  block = ExternalChunkGetBlock(chunk);
1377  else
1378  block = (AllocBlock) MemoryChunkGetBlock(chunk);
1379 
1380  /* Disallow access to the chunk header. */
1382 
1383  Assert(AllocBlockIsValid(block));
1384  set = block->aset;
1385 
1386  return &set->header;
1387 }

References ALLOC_CHUNKHDRSZ, AllocBlockIsValid, AllocBlockData::aset, Assert(), ExternalChunkGetBlock, AllocSetContext::header, MemoryChunkGetBlock(), MemoryChunkIsExternal(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ AllocSetGetChunkSpace()

Size AllocSetGetChunkSpace ( void *  pointer)

Definition at line 1395 of file aset.c.

1396 {
1397  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1398  int fidx;
1399 
1400  /* Allow access to the chunk header. */
1402 
1403  if (MemoryChunkIsExternal(chunk))
1404  {
1405  AllocBlock block = ExternalChunkGetBlock(chunk);
1406 
1407  /* Disallow access to the chunk header. */
1409 
1410  Assert(AllocBlockIsValid(block));
1411 
1412  return block->endptr - (char *) chunk;
1413  }
1414 
1415  fidx = MemoryChunkGetValue(chunk);
1416  Assert(FreeListIdxIsValid(fidx));
1417 
1418  /* Disallow access to the chunk header. */
1420 
1422 }

References ALLOC_CHUNKHDRSZ, AllocBlockIsValid, Assert(), AllocBlockData::endptr, ExternalChunkGetBlock, FreeListIdxIsValid, GetChunkSizeFromFreeListIdx, MemoryChunkGetValue(), MemoryChunkIsExternal(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ AllocSetIsEmpty()

bool AllocSetIsEmpty ( MemoryContext  context)

Definition at line 1429 of file aset.c.

1430 {
1431  Assert(AllocSetIsValid(context));
1432 
1433  /*
1434  * For now, we say "empty" only if the context is new or just reset. We
1435  * could examine the freelists to determine if all space has been freed,
1436  * but it's not really worth the trouble for present uses of this
1437  * functionality.
1438  */
1439  if (context->isReset)
1440  return true;
1441  return false;
1442 }

References AllocSetIsValid, Assert(), and MemoryContextData::isReset.

◆ AllocSetRealloc()

void* AllocSetRealloc ( void *  pointer,
Size  size 
)

Definition at line 1105 of file aset.c.

1106 {
1107  AllocBlock block;
1108  AllocSet set;
1109  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1110  Size oldchksize;
1111  int fidx;
1112 
1113  /* Allow access to the chunk header. */
1115 
1116  if (MemoryChunkIsExternal(chunk))
1117  {
1118  /*
1119  * The chunk must have been allocated as a single-chunk block. Use
1120  * realloc() to make the containing block bigger, or smaller, with
1121  * minimum space wastage.
1122  */
1123  Size chksize;
1124  Size blksize;
1125  Size oldblksize;
1126 
1127  block = ExternalChunkGetBlock(chunk);
1128 
1129  /*
1130  * Try to verify that we have a sane block pointer: the block header
1131  * should reference an aset and the freeptr should match the endptr.
1132  */
1133  if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
1134  elog(ERROR, "could not find block containing chunk %p", chunk);
1135 
1136  set = block->aset;
1137 
1138  oldchksize = block->endptr - (char *) pointer;
1139 
1140 #ifdef MEMORY_CONTEXT_CHECKING
1141  /* Test for someone scribbling on unused space in chunk */
1142  Assert(chunk->requested_size < oldchksize);
1143  if (!sentinel_ok(pointer, chunk->requested_size))
1144  elog(WARNING, "detected write past chunk end in %s %p",
1145  set->header.name, chunk);
1146 #endif
1147 
1148 #ifdef MEMORY_CONTEXT_CHECKING
1149  /* ensure there's always space for the sentinel byte */
1150  chksize = MAXALIGN(size + 1);
1151 #else
1152  chksize = MAXALIGN(size);
1153 #endif
1154 
1155  /* Do the realloc */
1156  blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
1157  oldblksize = block->endptr - ((char *) block);
1158 
1159  block = (AllocBlock) realloc(block, blksize);
1160  if (block == NULL)
1161  {
1162  /* Disallow access to the chunk header. */
1164  return NULL;
1165  }
1166 
1167  /* updated separately, not to underflow when (oldblksize > blksize) */
1168  set->header.mem_allocated -= oldblksize;
1169  set->header.mem_allocated += blksize;
1170 
1171  block->freeptr = block->endptr = ((char *) block) + blksize;
1172 
1173  /* Update pointers since block has likely been moved */
1174  chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
1175  pointer = MemoryChunkGetPointer(chunk);
1176  if (block->prev)
1177  block->prev->next = block;
1178  else
1179  set->blocks = block;
1180  if (block->next)
1181  block->next->prev = block;
1182 
1183 #ifdef MEMORY_CONTEXT_CHECKING
1184 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1185 
1186  /*
1187  * We can only randomize the extra space if we know the prior request.
1188  * When using Valgrind, randomize_mem() also marks memory UNDEFINED.
1189  */
1190  if (size > chunk->requested_size)
1191  randomize_mem((char *) pointer + chunk->requested_size,
1192  size - chunk->requested_size);
1193 #else
1194 
1195  /*
1196  * If this is an increase, realloc() will have marked any
1197  * newly-allocated part (from oldchksize to chksize) UNDEFINED, but we
1198  * also need to adjust trailing bytes from the old allocation (from
1199  * chunk->requested_size to oldchksize) as they are marked NOACCESS.
1200  * Make sure not to mark too many bytes in case chunk->requested_size
1201  * < size < oldchksize.
1202  */
1203 #ifdef USE_VALGRIND
1204  if (Min(size, oldchksize) > chunk->requested_size)
1205  VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1206  Min(size, oldchksize) - chunk->requested_size);
1207 #endif
1208 #endif
1209 
1210  chunk->requested_size = size;
1211  /* set mark to catch clobber of "unused" space */
1212  Assert(size < chksize);
1213  set_sentinel(pointer, size);
1214 #else /* !MEMORY_CONTEXT_CHECKING */
1215 
1216  /*
1217  * We may need to adjust marking of bytes from the old allocation as
1218  * some of them may be marked NOACCESS. We don't know how much of the
1219  * old chunk size was the requested size; it could have been as small
1220  * as one byte. We have to be conservative and just mark the entire
1221  * old portion DEFINED. Make sure not to mark memory beyond the new
1222  * allocation in case it's smaller than the old one.
1223  */
1224  VALGRIND_MAKE_MEM_DEFINED(pointer, Min(size, oldchksize));
1225 #endif
1226 
1227  /* Ensure any padding bytes are marked NOACCESS. */
1228  VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1229 
1230  /* Disallow access to the chunk header . */
1232 
1233  return pointer;
1234  }
1235 
1236  block = MemoryChunkGetBlock(chunk);
1237 
1238  /*
1239  * In this path, for speed reasons we just Assert that the referenced
1240  * block is good. We can also Assert that the value field is sane. Future
1241  * field experience may show that these Asserts had better become regular
1242  * runtime test-and-elog checks.
1243  */
1244  Assert(AllocBlockIsValid(block));
1245  set = block->aset;
1246 
1247  fidx = MemoryChunkGetValue(chunk);
1248  Assert(FreeListIdxIsValid(fidx));
1249  oldchksize = GetChunkSizeFromFreeListIdx(fidx);
1250 
1251 #ifdef MEMORY_CONTEXT_CHECKING
1252  /* Test for someone scribbling on unused space in chunk */
1253  if (chunk->requested_size < oldchksize)
1254  if (!sentinel_ok(pointer, chunk->requested_size))
1255  elog(WARNING, "detected write past chunk end in %s %p",
1256  set->header.name, chunk);
1257 #endif
1258 
1259  /*
1260  * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1261  * allocated area already is >= the new size. (In particular, we will
1262  * fall out here if the requested size is a decrease.)
1263  */
1264  if (oldchksize >= size)
1265  {
1266 #ifdef MEMORY_CONTEXT_CHECKING
1267  Size oldrequest = chunk->requested_size;
1268 
1269 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1270  /* We can only fill the extra space if we know the prior request */
1271  if (size > oldrequest)
1272  randomize_mem((char *) pointer + oldrequest,
1273  size - oldrequest);
1274 #endif
1275 
1276  chunk->requested_size = size;
1277 
1278  /*
1279  * If this is an increase, mark any newly-available part UNDEFINED.
1280  * Otherwise, mark the obsolete part NOACCESS.
1281  */
1282  if (size > oldrequest)
1283  VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1284  size - oldrequest);
1285  else
1286  VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1287  oldchksize - size);
1288 
1289  /* set mark to catch clobber of "unused" space */
1290  if (size < oldchksize)
1291  set_sentinel(pointer, size);
1292 #else /* !MEMORY_CONTEXT_CHECKING */
1293 
1294  /*
1295  * We don't have the information to determine whether we're growing
1296  * the old request or shrinking it, so we conservatively mark the
1297  * entire new allocation DEFINED.
1298  */
1299  VALGRIND_MAKE_MEM_NOACCESS(pointer, oldchksize);
1300  VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1301 #endif
1302 
1303  /* Disallow access to the chunk header. */
1305 
1306  return pointer;
1307  }
1308  else
1309  {
1310  /*
1311  * Enlarge-a-small-chunk case. We just do this by brute force, ie,
1312  * allocate a new chunk and copy the data. Since we know the existing
1313  * data isn't huge, this won't involve any great memcpy expense, so
1314  * it's not worth being smarter. (At one time we tried to avoid
1315  * memcpy when it was possible to enlarge the chunk in-place, but that
1316  * turns out to misbehave unpleasantly for repeated cycles of
1317  * palloc/repalloc/pfree: the eventually freed chunks go into the
1318  * wrong freelist for the next initial palloc request, and so we leak
1319  * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1320  */
1321  AllocPointer newPointer;
1322  Size oldsize;
1323 
1324  /* allocate new chunk */
1325  newPointer = AllocSetAlloc((MemoryContext) set, size);
1326 
1327  /* leave immediately if request was not completed */
1328  if (newPointer == NULL)
1329  {
1330  /* Disallow access to the chunk header. */
1332  return NULL;
1333  }
1334 
1335  /*
1336  * AllocSetAlloc() may have returned a region that is still NOACCESS.
1337  * Change it to UNDEFINED for the moment; memcpy() will then transfer
1338  * definedness from the old allocation to the new. If we know the old
1339  * allocation, copy just that much. Otherwise, make the entire old
1340  * chunk defined to avoid errors as we copy the currently-NOACCESS
1341  * trailing bytes.
1342  */
1343  VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
1344 #ifdef MEMORY_CONTEXT_CHECKING
1345  oldsize = chunk->requested_size;
1346 #else
1347  oldsize = oldchksize;
1348  VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1349 #endif
1350 
1351  /* transfer existing data (certain to fit) */
1352  memcpy(newPointer, pointer, oldsize);
1353 
1354  /* free old chunk */
1355  AllocSetFree(pointer);
1356 
1357  return newPointer;
1358  }
1359 }
void * AllocPointer
Definition: aset.c:113
void AllocSetFree(void *pointer)
Definition: aset.c:998
void * AllocSetAlloc(MemoryContext context, Size size)
Definition: aset.c:699
#define realloc(a, b)
Definition: header.h:60

References ALLOC_BLOCKHDRSZ, ALLOC_CHUNKHDRSZ, AllocBlockIsValid, AllocSetAlloc(), AllocSetFree(), AllocBlockData::aset, Assert(), AllocSetContext::blocks, elog(), AllocBlockData::endptr, ERROR, ExternalChunkGetBlock, FreeListIdxIsValid, AllocBlockData::freeptr, GetChunkSizeFromFreeListIdx, AllocSetContext::header, MAXALIGN, MemoryContextData::mem_allocated, MemoryChunkGetBlock(), MemoryChunkGetPointer, MemoryChunkGetValue(), MemoryChunkIsExternal(), Min, MemoryContextData::name, AllocBlockData::next, PointerGetMemoryChunk, AllocBlockData::prev, realloc, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MAKE_MEM_NOACCESS, VALGRIND_MAKE_MEM_UNDEFINED, and WARNING.

◆ AllocSetReset()

void AllocSetReset ( MemoryContext  context)

Definition at line 533 of file aset.c.

534 {
535  AllocSet set = (AllocSet) context;
536  AllocBlock block;
537  Size keepersize PG_USED_FOR_ASSERTS_ONLY;
538 
539  Assert(AllocSetIsValid(set));
540 
541 #ifdef MEMORY_CONTEXT_CHECKING
542  /* Check for corruption and leaks before freeing */
543  AllocSetCheck(context);
544 #endif
545 
546  /* Remember keeper block size for Assert below */
547  keepersize = set->keeper->endptr - ((char *) set);
548 
549  /* Clear chunk freelists */
550  MemSetAligned(set->freelist, 0, sizeof(set->freelist));
551 
552  block = set->blocks;
553 
554  /* New blocks list will be just the keeper block */
555  set->blocks = set->keeper;
556 
557  while (block != NULL)
558  {
559  AllocBlock next = block->next;
560 
561  if (block == set->keeper)
562  {
563  /* Reset the block, but don't return it to malloc */
564  char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
565 
566 #ifdef CLOBBER_FREED_MEMORY
567  wipe_mem(datastart, block->freeptr - datastart);
568 #else
569  /* wipe_mem() would have done this */
570  VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
571 #endif
572  block->freeptr = datastart;
573  block->prev = NULL;
574  block->next = NULL;
575  }
576  else
577  {
578  /* Normal case, release the block */
579  context->mem_allocated -= block->endptr - ((char *) block);
580 
581 #ifdef CLOBBER_FREED_MEMORY
582  wipe_mem(block, block->freeptr - ((char *) block));
583 #endif
584  free(block);
585  }
586  block = next;
587  }
588 
589  Assert(context->mem_allocated == keepersize);
590 
591  /* Reset block size allocation sequence, too */
592  set->nextBlockSize = set->initBlockSize;
593 }
#define MemSetAligned(start, val, len)
Definition: c.h:1034
Size initBlockSize
Definition: aset.c:159

References ALLOC_BLOCKHDRSZ, AllocSetIsValid, Assert(), AllocSetContext::blocks, AllocBlockData::endptr, free, AllocSetContext::freelist, AllocBlockData::freeptr, AllocSetContext::initBlockSize, AllocSetContext::keeper, MemoryContextData::mem_allocated, MemSetAligned, next, AllocBlockData::next, AllocSetContext::nextBlockSize, PG_USED_FOR_ASSERTS_ONLY, AllocBlockData::prev, and VALGRIND_MAKE_MEM_NOACCESS.

◆ AllocSetStats()

void AllocSetStats ( MemoryContext  context,
MemoryStatsPrintFunc  printfunc,
void *  passthru,
MemoryContextCounters totals,
bool  print_to_stderr 
)

Definition at line 1454 of file aset.c.

1457 {
1458  AllocSet set = (AllocSet) context;
1459  Size nblocks = 0;
1460  Size freechunks = 0;
1461  Size totalspace;
1462  Size freespace = 0;
1463  AllocBlock block;
1464  int fidx;
1465 
1466  Assert(AllocSetIsValid(set));
1467 
1468  /* Include context header in totalspace */
1469  totalspace = MAXALIGN(sizeof(AllocSetContext));
1470 
1471  for (block = set->blocks; block != NULL; block = block->next)
1472  {
1473  nblocks++;
1474  totalspace += block->endptr - ((char *) block);
1475  freespace += block->endptr - block->freeptr;
1476  }
1477  for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1478  {
1479  Size chksz = GetChunkSizeFromFreeListIdx(fidx);
1480  MemoryChunk *chunk = set->freelist[fidx];
1481 
1482  while (chunk != NULL)
1483  {
1485 
1486  /* Allow access to the chunk header. */
1488  Assert(MemoryChunkGetValue(chunk) == fidx);
1490 
1491  freechunks++;
1492  freespace += chksz + ALLOC_CHUNKHDRSZ;
1493 
1495  chunk = link->next;
1497  }
1498  }
1499 
1500  if (printfunc)
1501  {
1502  char stats_string[200];
1503 
1504  snprintf(stats_string, sizeof(stats_string),
1505  "%zu total in %zu blocks; %zu free (%zu chunks); %zu used",
1506  totalspace, nblocks, freespace, freechunks,
1507  totalspace - freespace);
1508  printfunc(context, passthru, stats_string, print_to_stderr);
1509  }
1510 
1511  if (totals)
1512  {
1513  totals->nblocks += nblocks;
1514  totals->freechunks += freechunks;
1515  totals->totalspace += totalspace;
1516  totals->freespace += freespace;
1517  }
1518 }
#define ALLOCSET_NUM_FREELISTS
Definition: aset.c:84
#define snprintf
Definition: port.h:238

References ALLOC_CHUNKHDRSZ, ALLOCSET_NUM_FREELISTS, AllocSetIsValid, Assert(), AllocSetContext::blocks, AllocBlockData::endptr, MemoryContextCounters::freechunks, AllocSetContext::freelist, AllocBlockData::freeptr, MemoryContextCounters::freespace, GetChunkSizeFromFreeListIdx, GetFreeListLink, MAXALIGN, MemoryChunkGetValue(), MemoryContextCounters::nblocks, AllocBlockData::next, snprintf, MemoryContextCounters::totalspace, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ GenerationAlloc()

void* GenerationAlloc ( MemoryContext  context,
Size  size 
)

Definition at line 341 of file generation.c.

342 {
343  GenerationContext *set = (GenerationContext *) context;
344  GenerationBlock *block;
345  MemoryChunk *chunk;
346  Size chunk_size;
347  Size required_size;
348 
350 
351 #ifdef MEMORY_CONTEXT_CHECKING
352  /* ensure there's always space for the sentinel byte */
353  chunk_size = MAXALIGN(size + 1);
354 #else
355  chunk_size = MAXALIGN(size);
356 #endif
357  required_size = chunk_size + Generation_CHUNKHDRSZ;
358 
359  /* is it an over-sized chunk? if yes, allocate special block */
360  if (chunk_size > set->allocChunkLimit)
361  {
362  Size blksize = required_size + Generation_BLOCKHDRSZ;
363 
364  block = (GenerationBlock *) malloc(blksize);
365  if (block == NULL)
366  return NULL;
367 
368  context->mem_allocated += blksize;
369 
370  /* block with a single (used) chunk */
371  block->context = set;
372  block->blksize = blksize;
373  block->nchunks = 1;
374  block->nfree = 0;
375 
376  /* the block is completely full */
377  block->freeptr = block->endptr = ((char *) block) + blksize;
378 
379  chunk = (MemoryChunk *) (((char *) block) + Generation_BLOCKHDRSZ);
380 
381  /* mark the MemoryChunk as externally managed */
383 
384 #ifdef MEMORY_CONTEXT_CHECKING
385  chunk->requested_size = size;
386  /* set mark to catch clobber of "unused" space */
387  Assert(size < chunk_size);
388  set_sentinel(MemoryChunkGetPointer(chunk), size);
389 #endif
390 #ifdef RANDOMIZE_ALLOCATED_MEMORY
391  /* fill the allocated space with junk */
392  randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
393 #endif
394 
395  /* add the block to the list of allocated blocks */
396  dlist_push_head(&set->blocks, &block->node);
397 
398  /* Ensure any padding bytes are marked NOACCESS. */
399  VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
400  chunk_size - size);
401 
402  /* Disallow access to the chunk header. */
404 
405  return MemoryChunkGetPointer(chunk);
406  }
407 
408  /*
409  * Not an oversized chunk. We try to first make use of the current block,
410  * but if there's not enough space in it, instead of allocating a new
411  * block, we look to see if the freeblock is empty and has enough space.
412  * If not, we'll also try the same using the keeper block. The keeper
413  * block may have become empty and we have no other way to reuse it again
414  * if we don't try to use it explicitly here.
415  *
416  * We don't want to start filling the freeblock before the current block
417  * is full, otherwise we may cause fragmentation in FIFO type workloads.
418  * We only switch to using the freeblock or keeper block if those blocks
419  * are completely empty. If we didn't do that we could end up fragmenting
420  * consecutive allocations over multiple blocks which would be a problem
421  * that would compound over time.
422  */
423  block = set->block;
424 
425  if (block == NULL ||
426  GenerationBlockFreeBytes(block) < required_size)
427  {
428  Size blksize;
429  GenerationBlock *freeblock = set->freeblock;
430 
431  if (freeblock != NULL &&
432  GenerationBlockIsEmpty(freeblock) &&
433  GenerationBlockFreeBytes(freeblock) >= required_size)
434  {
435  block = freeblock;
436 
437  /*
438  * Zero out the freeblock as we'll set this to the current block
439  * below
440  */
441  set->freeblock = NULL;
442  }
443  else if (GenerationBlockIsEmpty(set->keeper) &&
444  GenerationBlockFreeBytes(set->keeper) >= required_size)
445  {
446  block = set->keeper;
447  }
448  else
449  {
450  /*
451  * The first such block has size initBlockSize, and we double the
452  * space in each succeeding block, but not more than maxBlockSize.
453  */
454  blksize = set->nextBlockSize;
455  set->nextBlockSize <<= 1;
456  if (set->nextBlockSize > set->maxBlockSize)
457  set->nextBlockSize = set->maxBlockSize;
458 
459  /* we'll need a block hdr too, so add that to the required size */
460  required_size += Generation_BLOCKHDRSZ;
461 
462  /* round the size up to the next power of 2 */
463  if (blksize < required_size)
464  blksize = pg_nextpower2_size_t(required_size);
465 
466  block = (GenerationBlock *) malloc(blksize);
467 
468  if (block == NULL)
469  return NULL;
470 
471  context->mem_allocated += blksize;
472 
473  /* initialize the new block */
474  GenerationBlockInit(set, block, blksize);
475 
476  /* add it to the doubly-linked list of blocks */
477  dlist_push_head(&set->blocks, &block->node);
478 
479  /* Zero out the freeblock in case it's become full */
480  set->freeblock = NULL;
481  }
482 
483  /* and also use it as the current allocation block */
484  set->block = block;
485  }
486 
487  /* we're supposed to have a block with enough free space now */
488  Assert(block != NULL);
489  Assert((block->endptr - block->freeptr) >= Generation_CHUNKHDRSZ + chunk_size);
490 
491  chunk = (MemoryChunk *) block->freeptr;
492 
493  /* Prepare to initialize the chunk header. */
495 
496  block->nchunks += 1;
497  block->freeptr += (Generation_CHUNKHDRSZ + chunk_size);
498 
499  Assert(block->freeptr <= block->endptr);
500 
501  MemoryChunkSetHdrMask(chunk, block, chunk_size, MCTX_GENERATION_ID);
502 #ifdef MEMORY_CONTEXT_CHECKING
503  chunk->requested_size = size;
504  /* set mark to catch clobber of "unused" space */
505  Assert(size < chunk_size);
506  set_sentinel(MemoryChunkGetPointer(chunk), size);
507 #endif
508 #ifdef RANDOMIZE_ALLOCATED_MEMORY
509  /* fill the allocated space with junk */
510  randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
511 #endif
512 
513  /* Ensure any padding bytes are marked NOACCESS. */
514  VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
515  chunk_size - size);
516 
517  /* Disallow access to the chunk header. */
519 
520  return MemoryChunkGetPointer(chunk);
521 }
static void GenerationBlockInit(GenerationContext *context, GenerationBlock *block, Size blksize)
Definition: generation.c:529
static Size GenerationBlockFreeBytes(GenerationBlock *block)
Definition: generation.c:584
#define Generation_CHUNKHDRSZ
Definition: generation.c:47
#define Generation_BLOCKHDRSZ
Definition: generation.c:46
static bool GenerationBlockIsEmpty(GenerationBlock *block)
Definition: generation.c:550
#define GenerationIsValid(set)
Definition: generation.c:105
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:347
#define pg_nextpower2_size_t
Definition: pg_bitutils.h:335
char * freeptr
Definition: generation.c:97
dlist_node node
Definition: generation.c:92
GenerationContext * context
Definition: generation.c:93
GenerationBlock * keeper
Definition: generation.c:74
GenerationBlock * freeblock
Definition: generation.c:72
dlist_head blocks
Definition: generation.c:75
GenerationBlock * block
Definition: generation.c:69

References GenerationContext::allocChunkLimit, Assert(), GenerationBlock::blksize, GenerationContext::block, GenerationContext::blocks, GenerationBlock::context, dlist_push_head(), GenerationBlock::endptr, GenerationContext::freeblock, GenerationBlock::freeptr, Generation_BLOCKHDRSZ, Generation_CHUNKHDRSZ, GenerationBlockFreeBytes(), GenerationBlockInit(), GenerationBlockIsEmpty(), GenerationIsValid, GenerationContext::keeper, malloc, MAXALIGN, GenerationContext::maxBlockSize, MCTX_GENERATION_ID, MemoryContextData::mem_allocated, MemoryChunkGetPointer, MemoryChunkSetHdrMask(), MemoryChunkSetHdrMaskExternal(), GenerationBlock::nchunks, GenerationContext::nextBlockSize, GenerationBlock::nfree, GenerationBlock::node, pg_nextpower2_size_t, VALGRIND_MAKE_MEM_NOACCESS, and VALGRIND_MAKE_MEM_UNDEFINED.

Referenced by GenerationRealloc().

◆ GenerationDelete()

void GenerationDelete ( MemoryContext  context)

Definition at line 319 of file generation.c.

320 {
321  /* Reset to release all releasable GenerationBlocks */
322  GenerationReset(context);
323  /* And free the context header and keeper block */
324  free(context);
325 }
void GenerationReset(MemoryContext context)
Definition: generation.c:274

References free, and GenerationReset().

◆ GenerationFree()

void GenerationFree ( void *  pointer)

Definition at line 619 of file generation.c.

620 {
621  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
622  GenerationBlock *block;
623  GenerationContext *set;
624 #if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \
625  || defined(CLOBBER_FREED_MEMORY)
626  Size chunksize;
627 #endif
628 
629  /* Allow access to the chunk header. */
631 
632  if (MemoryChunkIsExternal(chunk))
633  {
634  block = ExternalChunkGetBlock(chunk);
635 
636  /*
637  * Try to verify that we have a sane block pointer: the block header
638  * should reference a generation context.
639  */
640  if (!GenerationBlockIsValid(block))
641  elog(ERROR, "could not find block containing chunk %p", chunk);
642 
643 #if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \
644  || defined(CLOBBER_FREED_MEMORY)
645  chunksize = block->endptr - (char *) pointer;
646 #endif
647  }
648  else
649  {
650  block = MemoryChunkGetBlock(chunk);
651 
652  /*
653  * In this path, for speed reasons we just Assert that the referenced
654  * block is good. Future field experience may show that this Assert
655  * had better become a regular runtime test-and-elog check.
656  */
658 
659 #if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \
660  || defined(CLOBBER_FREED_MEMORY)
661  chunksize = MemoryChunkGetValue(chunk);
662 #endif
663  }
664 
665 #ifdef MEMORY_CONTEXT_CHECKING
666  /* Test for someone scribbling on unused space in chunk */
667  Assert(chunk->requested_size < chunksize);
668  if (!sentinel_ok(pointer, chunk->requested_size))
669  elog(WARNING, "detected write past chunk end in %s %p",
670  ((MemoryContext) block->context)->name, chunk);
671 #endif
672 
673 #ifdef CLOBBER_FREED_MEMORY
674  wipe_mem(pointer, chunksize);
675 #endif
676 
677 #ifdef MEMORY_CONTEXT_CHECKING
678  /* Reset requested_size to InvalidAllocSize in freed chunks */
679  chunk->requested_size = InvalidAllocSize;
680 #endif
681 
682  block->nfree += 1;
683 
684  Assert(block->nchunks > 0);
685  Assert(block->nfree <= block->nchunks);
686 
687  /* If there are still allocated chunks in the block, we're done. */
688  if (block->nfree < block->nchunks)
689  return;
690 
691  set = block->context;
692 
693  /* Don't try to free the keeper block, just mark it empty */
694  if (block == set->keeper)
695  {
697  return;
698  }
699 
700  /*
701  * If there is no freeblock set or if this is the freeblock then instead
702  * of freeing this memory, we keep it around so that new allocations have
703  * the option of recycling it.
704  */
705  if (set->freeblock == NULL || set->freeblock == block)
706  {
707  /* XXX should we only recycle maxBlockSize sized blocks? */
708  set->freeblock = block;
710  return;
711  }
712 
713  /* Also make sure the block is not marked as the current block. */
714  if (set->block == block)
715  set->block = NULL;
716 
717  /*
718  * The block is empty, so let's get rid of it. First remove it from the
719  * list of blocks, then return it to malloc().
720  */
721  dlist_delete(&block->node);
722 
723  set->header.mem_allocated -= block->blksize;
724  free(block);
725 }
static void GenerationBlockMarkEmpty(GenerationBlock *block)
Definition: generation.c:560
#define GenerationBlockIsValid(block)
Definition: generation.c:112
#define ExternalChunkGetBlock(chunk)
Definition: generation.c:120
static void dlist_delete(dlist_node *node)
Definition: ilist.h:405
MemoryContextData header
Definition: generation.c:61

References Assert(), GenerationBlock::blksize, GenerationContext::block, GenerationBlock::context, dlist_delete(), elog(), GenerationBlock::endptr, ERROR, ExternalChunkGetBlock, free, GenerationContext::freeblock, Generation_CHUNKHDRSZ, GenerationBlockIsValid, GenerationBlockMarkEmpty(), GenerationContext::header, InvalidAllocSize, GenerationContext::keeper, MemoryContextData::mem_allocated, MemoryChunkGetBlock(), MemoryChunkGetValue(), MemoryChunkIsExternal(), GenerationBlock::nchunks, GenerationBlock::nfree, GenerationBlock::node, PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and WARNING.

Referenced by GenerationRealloc().

◆ GenerationGetChunkContext()

MemoryContext GenerationGetChunkContext ( void *  pointer)

Definition at line 876 of file generation.c.

877 {
878  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
879  GenerationBlock *block;
880 
881  /* Allow access to the chunk header. */
883 
884  if (MemoryChunkIsExternal(chunk))
885  block = ExternalChunkGetBlock(chunk);
886  else
887  block = (GenerationBlock *) MemoryChunkGetBlock(chunk);
888 
889  /* Disallow access to the chunk header. */
891 
893  return &block->context->header;
894 }

References Assert(), GenerationBlock::context, ExternalChunkGetBlock, Generation_CHUNKHDRSZ, GenerationBlockIsValid, GenerationContext::header, MemoryChunkGetBlock(), MemoryChunkIsExternal(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ GenerationGetChunkSpace()

Size GenerationGetChunkSpace ( void *  pointer)

Definition at line 902 of file generation.c.

903 {
904  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
905  Size chunksize;
906 
907  /* Allow access to the chunk header. */
909 
910  if (MemoryChunkIsExternal(chunk))
911  {
912  GenerationBlock *block = ExternalChunkGetBlock(chunk);
913 
915  chunksize = block->endptr - (char *) pointer;
916  }
917  else
918  chunksize = MemoryChunkGetValue(chunk);
919 
920  /* Disallow access to the chunk header. */
922 
923  return Generation_CHUNKHDRSZ + chunksize;
924 }

References Assert(), GenerationBlock::endptr, ExternalChunkGetBlock, Generation_CHUNKHDRSZ, GenerationBlockIsValid, MemoryChunkGetValue(), MemoryChunkIsExternal(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ GenerationIsEmpty()

bool GenerationIsEmpty ( MemoryContext  context)

Definition at line 931 of file generation.c.

932 {
933  GenerationContext *set = (GenerationContext *) context;
934  dlist_iter iter;
935 
937 
938  dlist_foreach(iter, &set->blocks)
939  {
940  GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
941 
942  if (block->nchunks > 0)
943  return false;
944  }
945 
946  return true;
947 }
#define dlist_foreach(iter, lhead)
Definition: ilist.h:623
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
dlist_node * cur
Definition: ilist.h:179

References Assert(), GenerationContext::blocks, dlist_iter::cur, dlist_container, dlist_foreach, GenerationIsValid, and GenerationBlock::nchunks.

◆ GenerationRealloc()

void* GenerationRealloc ( void *  pointer,
Size  size 
)

Definition at line 734 of file generation.c.

735 {
736  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
737  GenerationContext *set;
738  GenerationBlock *block;
739  GenerationPointer newPointer;
740  Size oldsize;
741 
742  /* Allow access to the chunk header. */
744 
745  if (MemoryChunkIsExternal(chunk))
746  {
747  block = ExternalChunkGetBlock(chunk);
748 
749  /*
750  * Try to verify that we have a sane block pointer: the block header
751  * should reference a generation context.
752  */
753  if (!GenerationBlockIsValid(block))
754  elog(ERROR, "could not find block containing chunk %p", chunk);
755 
756  oldsize = block->endptr - (char *) pointer;
757  }
758  else
759  {
760  block = MemoryChunkGetBlock(chunk);
761 
762  /*
763  * In this path, for speed reasons we just Assert that the referenced
764  * block is good. Future field experience may show that this Assert
765  * had better become a regular runtime test-and-elog check.
766  */
768 
769  oldsize = MemoryChunkGetValue(chunk);
770  }
771 
772  set = block->context;
773 
774 #ifdef MEMORY_CONTEXT_CHECKING
775  /* Test for someone scribbling on unused space in chunk */
776  Assert(chunk->requested_size < oldsize);
777  if (!sentinel_ok(pointer, chunk->requested_size))
778  elog(WARNING, "detected write past chunk end in %s %p",
779  ((MemoryContext) set)->name, chunk);
780 #endif
781 
782  /*
783  * Maybe the allocated area already is >= the new size. (In particular,
784  * we always fall out here if the requested size is a decrease.)
785  *
786  * This memory context does not use power-of-2 chunk sizing and instead
787  * carves the chunks to be as small as possible, so most repalloc() calls
788  * will end up in the palloc/memcpy/pfree branch.
789  *
790  * XXX Perhaps we should annotate this condition with unlikely()?
791  */
792  if (oldsize >= size)
793  {
794 #ifdef MEMORY_CONTEXT_CHECKING
795  Size oldrequest = chunk->requested_size;
796 
797 #ifdef RANDOMIZE_ALLOCATED_MEMORY
798  /* We can only fill the extra space if we know the prior request */
799  if (size > oldrequest)
800  randomize_mem((char *) pointer + oldrequest,
801  size - oldrequest);
802 #endif
803 
804  chunk->requested_size = size;
805 
806  /*
807  * If this is an increase, mark any newly-available part UNDEFINED.
808  * Otherwise, mark the obsolete part NOACCESS.
809  */
810  if (size > oldrequest)
811  VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
812  size - oldrequest);
813  else
814  VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
815  oldsize - size);
816 
817  /* set mark to catch clobber of "unused" space */
818  set_sentinel(pointer, size);
819 #else /* !MEMORY_CONTEXT_CHECKING */
820 
821  /*
822  * We don't have the information to determine whether we're growing
823  * the old request or shrinking it, so we conservatively mark the
824  * entire new allocation DEFINED.
825  */
826  VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
827  VALGRIND_MAKE_MEM_DEFINED(pointer, size);
828 #endif
829 
830  /* Disallow access to the chunk header. */
832 
833  return pointer;
834  }
835 
836  /* allocate new chunk */
837  newPointer = GenerationAlloc((MemoryContext) set, size);
838 
839  /* leave immediately if request was not completed */
840  if (newPointer == NULL)
841  {
842  /* Disallow access to the chunk header. */
844  return NULL;
845  }
846 
847  /*
848  * GenerationAlloc() may have returned a region that is still NOACCESS.
849  * Change it to UNDEFINED for the moment; memcpy() will then transfer
850  * definedness from the old allocation to the new. If we know the old
851  * allocation, copy just that much. Otherwise, make the entire old chunk
852  * defined to avoid errors as we copy the currently-NOACCESS trailing
853  * bytes.
854  */
855  VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
856 #ifdef MEMORY_CONTEXT_CHECKING
857  oldsize = chunk->requested_size;
858 #else
859  VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
860 #endif
861 
862  /* transfer existing data (certain to fit) */
863  memcpy(newPointer, pointer, oldsize);
864 
865  /* free old chunk */
866  GenerationFree(pointer);
867 
868  return newPointer;
869 }
void GenerationFree(void *pointer)
Definition: generation.c:619
void * GenerationPointer
Definition: generation.c:53
void * GenerationAlloc(MemoryContext context, Size size)
Definition: generation.c:341

References Assert(), GenerationBlock::context, elog(), GenerationBlock::endptr, ERROR, ExternalChunkGetBlock, Generation_CHUNKHDRSZ, GenerationAlloc(), GenerationBlockIsValid, GenerationFree(), MemoryChunkGetBlock(), MemoryChunkGetValue(), MemoryChunkIsExternal(), name, PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MAKE_MEM_NOACCESS, VALGRIND_MAKE_MEM_UNDEFINED, and WARNING.

◆ GenerationReset()

void GenerationReset ( MemoryContext  context)

Definition at line 274 of file generation.c.

275 {
276  GenerationContext *set = (GenerationContext *) context;
277  dlist_mutable_iter miter;
278 
280 
281 #ifdef MEMORY_CONTEXT_CHECKING
282  /* Check for corruption and leaks before freeing */
283  GenerationCheck(context);
284 #endif
285 
286  /*
287  * NULLify the free block pointer. We must do this before calling
288  * GenerationBlockFree as that function never expects to free the
289  * freeblock.
290  */
291  set->freeblock = NULL;
292 
293  dlist_foreach_modify(miter, &set->blocks)
294  {
295  GenerationBlock *block = dlist_container(GenerationBlock, node, miter.cur);
296 
297  if (block == set->keeper)
299  else
300  GenerationBlockFree(set, block);
301  }
302 
303  /* set it so new allocations to make use of the keeper block */
304  set->block = set->keeper;
305 
306  /* Reset block size allocation sequence, too */
307  set->nextBlockSize = set->initBlockSize;
308 
309  /* Ensure there is only 1 item in the dlist */
310  Assert(!dlist_is_empty(&set->blocks));
312 }
static void GenerationBlockFree(GenerationContext *set, GenerationBlock *block)
Definition: generation.c:594
static bool dlist_has_next(const dlist_head *head, const dlist_node *node)
Definition: ilist.h:503
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:640
static bool dlist_is_empty(const dlist_head *head)
Definition: ilist.h:336
static dlist_node * dlist_head_node(dlist_head *head)
Definition: ilist.h:565
dlist_node * cur
Definition: ilist.h:200

References Assert(), GenerationContext::block, GenerationContext::blocks, dlist_mutable_iter::cur, dlist_container, dlist_foreach_modify, dlist_has_next(), dlist_head_node(), dlist_is_empty(), GenerationContext::freeblock, GenerationBlockFree(), GenerationBlockMarkEmpty(), GenerationIsValid, GenerationContext::initBlockSize, GenerationContext::keeper, and GenerationContext::nextBlockSize.

Referenced by GenerationDelete().

◆ GenerationStats()

void GenerationStats ( MemoryContext  context,
MemoryStatsPrintFunc  printfunc,
void *  passthru,
MemoryContextCounters totals,
bool  print_to_stderr 
)

Definition at line 962 of file generation.c.

965 {
966  GenerationContext *set = (GenerationContext *) context;
967  Size nblocks = 0;
968  Size nchunks = 0;
969  Size nfreechunks = 0;
970  Size totalspace;
971  Size freespace = 0;
972  dlist_iter iter;
973 
975 
976  /* Include context header in totalspace */
977  totalspace = MAXALIGN(sizeof(GenerationContext));
978 
979  dlist_foreach(iter, &set->blocks)
980  {
981  GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
982 
983  nblocks++;
984  nchunks += block->nchunks;
985  nfreechunks += block->nfree;
986  totalspace += block->blksize;
987  freespace += (block->endptr - block->freeptr);
988  }
989 
990  if (printfunc)
991  {
992  char stats_string[200];
993 
994  snprintf(stats_string, sizeof(stats_string),
995  "%zu total in %zu blocks (%zu chunks); %zu free (%zu chunks); %zu used",
996  totalspace, nblocks, nchunks, freespace,
997  nfreechunks, totalspace - freespace);
998  printfunc(context, passthru, stats_string, print_to_stderr);
999  }
1000 
1001  if (totals)
1002  {
1003  totals->nblocks += nblocks;
1004  totals->freechunks += nfreechunks;
1005  totals->totalspace += totalspace;
1006  totals->freespace += freespace;
1007  }
1008 }

References Assert(), GenerationBlock::blksize, GenerationContext::blocks, dlist_iter::cur, dlist_container, dlist_foreach, GenerationBlock::endptr, MemoryContextCounters::freechunks, GenerationBlock::freeptr, MemoryContextCounters::freespace, GenerationIsValid, MAXALIGN, MemoryContextCounters::nblocks, GenerationBlock::nchunks, GenerationBlock::nfree, snprintf, and MemoryContextCounters::totalspace.

◆ MemoryContextCreate()

void MemoryContextCreate ( MemoryContext  node,
NodeTag  tag,
MemoryContextMethodID  method_id,
MemoryContext  parent,
const char *  name 
)

Definition at line 973 of file mcxt.c.

978 {
979  /* Creating new memory contexts is not allowed in a critical section */
980  Assert(CritSectionCount == 0);
981 
982  /* Initialize all standard fields of memory context header */
983  node->type = tag;
984  node->isReset = true;
985  node->methods = &mcxt_methods[method_id];
986  node->parent = parent;
987  node->firstchild = NULL;
988  node->mem_allocated = 0;
989  node->prevchild = NULL;
990  node->name = name;
991  node->ident = NULL;
992  node->reset_cbs = NULL;
993 
994  /* OK to link node into context tree */
995  if (parent)
996  {
997  node->nextchild = parent->firstchild;
998  if (parent->firstchild != NULL)
999  parent->firstchild->prevchild = node;
1000  parent->firstchild = node;
1001  /* inherit allowInCritSection flag from parent */
1002  node->allowInCritSection = parent->allowInCritSection;
1003  }
1004  else
1005  {
1006  node->nextchild = NULL;
1007  node->allowInCritSection = false;
1008  }
1009 
1010  VALGRIND_CREATE_MEMPOOL(node, 0, false);
1011 }
volatile uint32 CritSectionCount
Definition: globals.c:42
static const MemoryContextMethods mcxt_methods[]
Definition: mcxt.c:45
#define VALGRIND_CREATE_MEMPOOL(context, redzones, zeroed)
Definition: memdebug.h:24
MemoryContext prevchild
Definition: memnodes.h:91
MemoryContext firstchild
Definition: memnodes.h:90
bool allowInCritSection
Definition: memnodes.h:86
const char * ident
Definition: memnodes.h:94
MemoryContext parent
Definition: memnodes.h:89
MemoryContextCallback * reset_cbs
Definition: memnodes.h:95
const MemoryContextMethods * methods
Definition: memnodes.h:88

References MemoryContextData::allowInCritSection, Assert(), CritSectionCount, MemoryContextData::firstchild, MemoryContextData::ident, MemoryContextData::isReset, mcxt_methods, MemoryContextData::mem_allocated, MemoryContextData::methods, name, MemoryContextData::name, MemoryContextData::nextchild, MemoryContextData::parent, MemoryContextData::prevchild, MemoryContextData::reset_cbs, and VALGRIND_CREATE_MEMPOOL.

Referenced by AllocSetContextCreateInternal(), GenerationContextCreate(), and SlabContextCreate().

◆ SlabAlloc()

void* SlabAlloc ( MemoryContext  context,
Size  size 
)

Definition at line 495 of file slab.c.

496 {
497  SlabContext *slab = (SlabContext *) context;
498  SlabBlock *block;
499  MemoryChunk *chunk;
500 
501  Assert(SlabIsValid(slab));
502 
503  /* sanity check that this is pointing to a valid blocklist */
504  Assert(slab->curBlocklistIndex >= 0);
506 
507  /* make sure we only allow correct request size */
508  if (unlikely(size != slab->chunkSize))
509  elog(ERROR, "unexpected alloc chunk size %zu (expected %zu)",
510  size, slab->chunkSize);
511 
512  /*
513  * Handle the case when there are no partially filled blocks available.
514  * SlabFree() will have updated the curBlocklistIndex setting it to zero
515  * to indicate that it has freed the final block. Also later in
516  * SlabAlloc() we will set the curBlocklistIndex to zero if we end up
517  * filling the final block.
518  */
519  if (unlikely(slab->curBlocklistIndex == 0))
520  {
521  dlist_head *blocklist;
522  int blocklist_idx;
523 
524  /* to save allocating a new one, first check the empty blocks list */
525  if (dclist_count(&slab->emptyblocks) > 0)
526  {
528 
529  block = dlist_container(SlabBlock, node, node);
530 
531  /*
532  * SlabFree() should have left this block in a valid state with
533  * all chunks free. Ensure that's the case.
534  */
535  Assert(block->nfree == slab->chunksPerBlock);
536 
537  /* fetch the next chunk from this block */
538  chunk = SlabGetNextFreeChunk(slab, block);
539  }
540  else
541  {
542  block = (SlabBlock *) malloc(slab->blockSize);
543 
544  if (unlikely(block == NULL))
545  return NULL;
546 
547  block->slab = slab;
548  context->mem_allocated += slab->blockSize;
549 
550  /* use the first chunk in the new block */
551  chunk = SlabBlockGetChunk(slab, block, 0);
552 
553  block->nfree = slab->chunksPerBlock - 1;
554  block->unused = SlabBlockGetChunk(slab, block, 1);
555  block->freehead = NULL;
556  block->nunused = slab->chunksPerBlock - 1;
557  }
558 
559  /* find the blocklist element for storing blocks with 1 used chunk */
560  blocklist_idx = SlabBlocklistIndex(slab, block->nfree);
561  blocklist = &slab->blocklist[blocklist_idx];
562 
563  /* this better be empty. We just added a block thinking it was */
564  Assert(dlist_is_empty(blocklist));
565 
566  dlist_push_head(blocklist, &block->node);
567 
568  slab->curBlocklistIndex = blocklist_idx;
569  }
570  else
571  {
572  dlist_head *blocklist = &slab->blocklist[slab->curBlocklistIndex];
573  int new_blocklist_idx;
574 
575  Assert(!dlist_is_empty(blocklist));
576 
577  /* grab the block from the blocklist */
578  block = dlist_head_element(SlabBlock, node, blocklist);
579 
580  /* make sure we actually got a valid block, with matching nfree */
581  Assert(block != NULL);
582  Assert(slab->curBlocklistIndex == SlabBlocklistIndex(slab, block->nfree));
583  Assert(block->nfree > 0);
584 
585  /* fetch the next chunk from this block */
586  chunk = SlabGetNextFreeChunk(slab, block);
587 
588  /* get the new blocklist index based on the new free chunk count */
589  new_blocklist_idx = SlabBlocklistIndex(slab, block->nfree);
590 
591  /*
592  * Handle the case where the blocklist index changes. This also deals
593  * with blocks becoming full as only full blocks go at index 0.
594  */
595  if (unlikely(slab->curBlocklistIndex != new_blocklist_idx))
596  {
597  dlist_delete_from(blocklist, &block->node);
598  dlist_push_head(&slab->blocklist[new_blocklist_idx], &block->node);
599 
600  if (dlist_is_empty(blocklist))
602  }
603  }
604 
605  /*
606  * Check that the chunk pointer is actually somewhere on the block and is
607  * aligned as expected.
608  */
609  Assert(chunk >= SlabBlockGetChunk(slab, block, 0));
610  Assert(chunk <= SlabBlockGetChunk(slab, block, slab->chunksPerBlock - 1));
611  Assert(SlabChunkMod(slab, block, chunk) == 0);
612 
613  /* Prepare to initialize the chunk header. */
615 
616  MemoryChunkSetHdrMask(chunk, block, MAXALIGN(slab->chunkSize),
617  MCTX_SLAB_ID);
618 #ifdef MEMORY_CONTEXT_CHECKING
619  /* slab mark to catch clobber of "unused" space */
620  Assert(slab->chunkSize < (slab->fullChunkSize - Slab_CHUNKHDRSZ));
621  set_sentinel(MemoryChunkGetPointer(chunk), size);
622  VALGRIND_MAKE_MEM_NOACCESS(((char *) chunk) +
623  Slab_CHUNKHDRSZ + slab->chunkSize,
624  slab->fullChunkSize -
625  (slab->chunkSize + Slab_CHUNKHDRSZ));
626 #endif
627 
628 #ifdef RANDOMIZE_ALLOCATED_MEMORY
629  /* fill the allocated space with junk */
630  randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
631 #endif
632 
633  /* Disallow access to the chunk header. */
635 
636  return MemoryChunkGetPointer(chunk);
637 }
#define unlikely(x)
Definition: c.h:295
static void dlist_delete_from(dlist_head *head, dlist_node *node)
Definition: ilist.h:429
#define dlist_head_element(type, membername, lhead)
Definition: ilist.h:603
static uint32 dclist_count(const dclist_head *head)
Definition: ilist.h:932
static dlist_node * dclist_pop_head_node(dclist_head *head)
Definition: ilist.h:789
#define SlabIsValid(set)
Definition: slab.c:196
#define Slab_CHUNKHDRSZ
Definition: slab.c:157
static int32 SlabBlocklistIndex(SlabContext *slab, int nfree)
Definition: slab.c:211
static int32 SlabFindNextBlockListIndex(SlabContext *slab)
Definition: slab.c:251
static MemoryChunk * SlabGetNextFreeChunk(SlabContext *slab, SlabBlock *block)
Definition: slab.c:271
#define SlabBlockGetChunk(slab, block, n)
Definition: slab.c:165
int32 nfree
Definition: slab.c:149
MemoryChunk * freehead
Definition: slab.c:151
MemoryChunk * unused
Definition: slab.c:152
SlabContext * slab
Definition: slab.c:148
dlist_node node
Definition: slab.c:153
int32 nunused
Definition: slab.c:150
Size blockSize
Definition: slab.c:109
Size fullChunkSize
Definition: slab.c:108
dlist_head blocklist[SLAB_BLOCKLIST_COUNT]
Definition: slab.c:129
int32 chunksPerBlock
Definition: slab.c:110
int32 curBlocklistIndex
Definition: slab.c:111
Size chunkSize
Definition: slab.c:107
dclist_head emptyblocks
Definition: slab.c:120

References Assert(), SlabContext::blocklist, SlabContext::blockSize, SlabContext::chunkSize, SlabContext::chunksPerBlock, SlabContext::curBlocklistIndex, dclist_count(), dclist_pop_head_node(), dlist_container, dlist_delete_from(), dlist_head_element, dlist_is_empty(), dlist_push_head(), elog(), SlabContext::emptyblocks, ERROR, SlabBlock::freehead, SlabContext::fullChunkSize, malloc, MAXALIGN, MCTX_SLAB_ID, MemoryContextData::mem_allocated, MemoryChunkGetPointer, MemoryChunkSetHdrMask(), SlabBlock::nfree, SlabBlock::node, SlabBlock::nunused, SlabBlock::slab, Slab_CHUNKHDRSZ, SlabBlockGetChunk, SlabBlocklistIndex(), SlabFindNextBlockListIndex(), SlabGetNextFreeChunk(), SlabIsValid, unlikely, SlabBlock::unused, VALGRIND_MAKE_MEM_NOACCESS, and VALGRIND_MAKE_MEM_UNDEFINED.

◆ SlabDelete()

void SlabDelete ( MemoryContext  context)

Definition at line 481 of file slab.c.

482 {
483  /* Reset to release all the SlabBlocks */
484  SlabReset(context);
485  /* And free the context header */
486  free(context);
487 }
void SlabReset(MemoryContext context)
Definition: slab.c:427

References free, and SlabReset().

◆ SlabFree()

void SlabFree ( void *  pointer)

Definition at line 644 of file slab.c.

645 {
646  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
647  SlabBlock *block;
648  SlabContext *slab;
649  int curBlocklistIdx;
650  int newBlocklistIdx;
651 
652  /* Allow access to the chunk header. */
654 
655  block = MemoryChunkGetBlock(chunk);
656 
657  /*
658  * For speed reasons we just Assert that the referenced block is good.
659  * Future field experience may show that this Assert had better become a
660  * regular runtime test-and-elog check.
661  */
662  Assert(SlabBlockIsValid(block));
663  slab = block->slab;
664 
665 #ifdef MEMORY_CONTEXT_CHECKING
666  /* Test for someone scribbling on unused space in chunk */
667  Assert(slab->chunkSize < (slab->fullChunkSize - Slab_CHUNKHDRSZ));
668  if (!sentinel_ok(pointer, slab->chunkSize))
669  elog(WARNING, "detected write past chunk end in %s %p",
670  slab->header.name, chunk);
671 #endif
672 
673  /* push this chunk onto the head of the block's free list */
674  *(MemoryChunk **) pointer = block->freehead;
675  block->freehead = chunk;
676 
677  block->nfree++;
678 
679  Assert(block->nfree > 0);
680  Assert(block->nfree <= slab->chunksPerBlock);
681 
682 #ifdef CLOBBER_FREED_MEMORY
683  /* don't wipe the free list MemoryChunk pointer stored in the chunk */
684  wipe_mem((char *) pointer + sizeof(MemoryChunk *),
685  slab->chunkSize - sizeof(MemoryChunk *));
686 #endif
687 
688  curBlocklistIdx = SlabBlocklistIndex(slab, block->nfree - 1);
689  newBlocklistIdx = SlabBlocklistIndex(slab, block->nfree);
690 
691  /*
692  * Check if the block needs to be moved to another element on the
693  * blocklist based on it now having 1 more free chunk.
694  */
695  if (unlikely(curBlocklistIdx != newBlocklistIdx))
696  {
697  /* do the move */
698  dlist_delete_from(&slab->blocklist[curBlocklistIdx], &block->node);
699  dlist_push_head(&slab->blocklist[newBlocklistIdx], &block->node);
700 
701  /*
702  * The blocklist[curBlocklistIdx] may now be empty or we may now be
703  * able to use a lower-element blocklist. We'll need to redetermine
704  * what the slab->curBlocklistIndex is if the current blocklist was
705  * changed or if a lower element one was changed. We must ensure we
706  * use the list with the fullest block(s).
707  */
708  if (slab->curBlocklistIndex >= curBlocklistIdx)
709  {
711 
712  /*
713  * We know there must be a block with at least 1 unused chunk as
714  * we just pfree'd one. Ensure curBlocklistIndex reflects this.
715  */
716  Assert(slab->curBlocklistIndex > 0);
717  }
718  }
719 
720  /* Handle when a block becomes completely empty */
721  if (unlikely(block->nfree == slab->chunksPerBlock))
722  {
723  /* remove the block */
724  dlist_delete_from(&slab->blocklist[newBlocklistIdx], &block->node);
725 
726  /*
727  * To avoid thrashing malloc/free, we keep a list of empty blocks that
728  * we can reuse again instead of having to malloc a new one.
729  */
731  dclist_push_head(&slab->emptyblocks, &block->node);
732  else
733  {
734  /*
735  * When we have enough empty blocks stored already, we actually
736  * free the block.
737  */
738 #ifdef CLOBBER_FREED_MEMORY
739  wipe_mem(block, slab->blockSize);
740 #endif
741  free(block);
742  slab->header.mem_allocated -= slab->blockSize;
743  }
744 
745  /*
746  * Check if we need to reset the blocklist index. This is required
747  * when the blocklist this block is on has become completely empty.
748  */
749  if (slab->curBlocklistIndex == newBlocklistIdx &&
750  dlist_is_empty(&slab->blocklist[newBlocklistIdx]))
752  }
753 }
static void dclist_push_head(dclist_head *head, dlist_node *node)
Definition: ilist.h:693
#define SlabBlockIsValid(block)
Definition: slab.c:202
#define SLAB_MAXIMUM_EMPTY_BLOCKS
Definition: slab.c:98
MemoryContextData header
Definition: slab.c:105

References Assert(), SlabContext::blocklist, SlabContext::blockSize, SlabContext::chunkSize, SlabContext::chunksPerBlock, SlabContext::curBlocklistIndex, dclist_count(), dclist_push_head(), dlist_delete_from(), dlist_is_empty(), dlist_push_head(), elog(), SlabContext::emptyblocks, free, SlabBlock::freehead, SlabContext::fullChunkSize, SlabContext::header, MemoryContextData::mem_allocated, MemoryChunkGetBlock(), MemoryContextData::name, SlabBlock::nfree, SlabBlock::node, PointerGetMemoryChunk, SlabBlock::slab, Slab_CHUNKHDRSZ, SLAB_MAXIMUM_EMPTY_BLOCKS, SlabBlockIsValid, SlabBlocklistIndex(), SlabFindNextBlockListIndex(), unlikely, VALGRIND_MAKE_MEM_DEFINED, and WARNING.

◆ SlabGetChunkContext()

MemoryContext SlabGetChunkContext ( void *  pointer)

Definition at line 806 of file slab.c.

807 {
808  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
809  SlabBlock *block;
810 
811  /* Allow access to the chunk header. */
813 
814  block = MemoryChunkGetBlock(chunk);
815 
816  /* Disallow access to the chunk header. */
818 
819  Assert(SlabBlockIsValid(block));
820 
821  return &block->slab->header;
822 }

References Assert(), SlabContext::header, MemoryChunkGetBlock(), PointerGetMemoryChunk, SlabBlock::slab, Slab_CHUNKHDRSZ, SlabBlockIsValid, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ SlabGetChunkSpace()

Size SlabGetChunkSpace ( void *  pointer)

Definition at line 830 of file slab.c.

831 {
832  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
833  SlabBlock *block;
834  SlabContext *slab;
835 
836  /* Allow access to the chunk header. */
838 
839  block = MemoryChunkGetBlock(chunk);
840 
841  /* Disallow access to the chunk header. */
843 
844  Assert(SlabBlockIsValid(block));
845  slab = block->slab;
846 
847  return slab->fullChunkSize;
848 }

References Assert(), SlabContext::fullChunkSize, MemoryChunkGetBlock(), PointerGetMemoryChunk, SlabBlock::slab, Slab_CHUNKHDRSZ, SlabBlockIsValid, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ SlabIsEmpty()

bool SlabIsEmpty ( MemoryContext  context)

Definition at line 855 of file slab.c.

856 {
857  Assert(SlabIsValid((SlabContext *) context));
858 
859  return (context->mem_allocated == 0);
860 }

References Assert(), MemoryContextData::mem_allocated, and SlabIsValid.

◆ SlabRealloc()

void* SlabRealloc ( void *  pointer,
Size  size 
)

Definition at line 769 of file slab.c.

770 {
771  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
772  SlabBlock *block;
773  SlabContext *slab;
774 
775  /* Allow access to the chunk header. */
777 
778  block = MemoryChunkGetBlock(chunk);
779 
780  /* Disallow access to the chunk header. */
782 
783  /*
784  * Try to verify that we have a sane block pointer: the block header
785  * should reference a slab context. (We use a test-and-elog, not just
786  * Assert, because it seems highly likely that we're here in error in the
787  * first place.)
788  */
789  if (!SlabBlockIsValid(block))
790  elog(ERROR, "could not find block containing chunk %p", chunk);
791  slab = block->slab;
792 
793  /* can't do actual realloc with slab, but let's try to be gentle */
794  if (size == slab->chunkSize)
795  return pointer;
796 
797  elog(ERROR, "slab allocator does not support realloc()");
798  return NULL; /* keep compiler quiet */
799 }

References SlabContext::chunkSize, elog(), ERROR, MemoryChunkGetBlock(), PointerGetMemoryChunk, SlabBlock::slab, Slab_CHUNKHDRSZ, SlabBlockIsValid, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ SlabReset()

void SlabReset ( MemoryContext  context)

Definition at line 427 of file slab.c.

428 {
429  SlabContext *slab = (SlabContext *) context;
430  dlist_mutable_iter miter;
431  int i;
432 
433  Assert(SlabIsValid(slab));
434 
435 #ifdef MEMORY_CONTEXT_CHECKING
436  /* Check for corruption and leaks before freeing */
437  SlabCheck(context);
438 #endif
439 
440  /* release any retained empty blocks */
441  dclist_foreach_modify(miter, &slab->emptyblocks)
442  {
443  SlabBlock *block = dlist_container(SlabBlock, node, miter.cur);
444 
445  dclist_delete_from(&slab->emptyblocks, miter.cur);
446 
447 #ifdef CLOBBER_FREED_MEMORY
448  wipe_mem(block, slab->blockSize);
449 #endif
450  free(block);
451  context->mem_allocated -= slab->blockSize;
452  }
453 
454  /* walk over blocklist and free the blocks */
455  for (i = 0; i < SLAB_BLOCKLIST_COUNT; i++)
456  {
457  dlist_foreach_modify(miter, &slab->blocklist[i])
458  {
459  SlabBlock *block = dlist_container(SlabBlock, node, miter.cur);
460 
461  dlist_delete(miter.cur);
462 
463 #ifdef CLOBBER_FREED_MEMORY
464  wipe_mem(block, slab->blockSize);
465 #endif
466  free(block);
467  context->mem_allocated -= slab->blockSize;
468  }
469  }
470 
471  slab->curBlocklistIndex = 0;
472 
473  Assert(context->mem_allocated == 0);
474 }
static void dclist_delete_from(dclist_head *head, dlist_node *node)
Definition: ilist.h:763
#define dclist_foreach_modify(iter, lhead)
Definition: ilist.h:973
int i
Definition: isn.c:73
#define SLAB_BLOCKLIST_COUNT
Definition: slab.c:95

References Assert(), SlabContext::blocklist, SlabContext::blockSize, dlist_mutable_iter::cur, SlabContext::curBlocklistIndex, dclist_delete_from(), dclist_foreach_modify, dlist_container, dlist_delete(), dlist_foreach_modify, SlabContext::emptyblocks, free, i, MemoryContextData::mem_allocated, SLAB_BLOCKLIST_COUNT, and SlabIsValid.

Referenced by SlabDelete().

◆ SlabStats()

void SlabStats ( MemoryContext  context,
MemoryStatsPrintFunc  printfunc,
void *  passthru,
MemoryContextCounters totals,
bool  print_to_stderr 
)

Definition at line 872 of file slab.c.

876 {
877  SlabContext *slab = (SlabContext *) context;
878  Size nblocks = 0;
879  Size freechunks = 0;
880  Size totalspace;
881  Size freespace = 0;
882  int i;
883 
884  Assert(SlabIsValid(slab));
885 
886  /* Include context header in totalspace */
887  totalspace = Slab_CONTEXT_HDRSZ(slab->chunksPerBlock);
888 
889  /* Add the space consumed by blocks in the emptyblocks list */
890  totalspace += dclist_count(&slab->emptyblocks) * slab->blockSize;
891 
892  for (i = 0; i < SLAB_BLOCKLIST_COUNT; i++)
893  {
894  dlist_iter iter;
895 
896  dlist_foreach(iter, &slab->blocklist[i])
897  {
898  SlabBlock *block = dlist_container(SlabBlock, node, iter.cur);
899 
900  nblocks++;
901  totalspace += slab->blockSize;
902  freespace += slab->fullChunkSize * block->nfree;
903  freechunks += block->nfree;
904  }
905  }
906 
907  if (printfunc)
908  {
909  char stats_string[200];
910 
911  /* XXX should we include free chunks on empty blocks? */
912  snprintf(stats_string, sizeof(stats_string),
913  "%zu total in %zu blocks; %u empty blocks; %zu free (%zu chunks); %zu used",
914  totalspace, nblocks, dclist_count(&slab->emptyblocks),
915  freespace, freechunks, totalspace - freespace);
916  printfunc(context, passthru, stats_string, print_to_stderr);
917  }
918 
919  if (totals)
920  {
921  totals->nblocks += nblocks;
922  totals->freechunks += freechunks;
923  totals->totalspace += totalspace;
924  totals->freespace += freespace;
925  }
926 }
#define Slab_CONTEXT_HDRSZ(chunksPerBlock)
Definition: slab.c:88

References Assert(), SlabContext::blocklist, SlabContext::blockSize, SlabContext::chunksPerBlock, dlist_iter::cur, dclist_count(), dlist_container, dlist_foreach, SlabContext::emptyblocks, MemoryContextCounters::freechunks, MemoryContextCounters::freespace, SlabContext::fullChunkSize, i, MemoryContextCounters::nblocks, SlabBlock::nfree, SLAB_BLOCKLIST_COUNT, Slab_CONTEXT_HDRSZ, SlabIsValid, snprintf, and MemoryContextCounters::totalspace.