PostgreSQL Source Code  git master
memutils_internal.h File Reference
#include "utils/memutils.h"
Include dependency graph for memutils_internal.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Macros

#define PallocAlignedExtraBytes(alignto)    ((alignto) + (sizeof(MemoryChunk) - MAXIMUM_ALIGNOF))
 
#define MEMORY_CONTEXT_METHODID_BITS   4
 
#define MEMORY_CONTEXT_METHODID_MASK    ((((uint64) 1) << MEMORY_CONTEXT_METHODID_BITS) - 1)
 

Typedefs

typedef enum MemoryContextMethodID MemoryContextMethodID
 

Enumerations

enum  MemoryContextMethodID {
  MCTX_0_RESERVED_UNUSEDMEM_ID , MCTX_1_RESERVED_GLIBC_ID , MCTX_2_RESERVED_GLIBC_ID , MCTX_ASET_ID ,
  MCTX_GENERATION_ID , MCTX_SLAB_ID , MCTX_ALIGNED_REDIRECT_ID , MCTX_BUMP_ID ,
  MCTX_8_UNUSED_ID , MCTX_9_UNUSED_ID , MCTX_10_UNUSED_ID , MCTX_11_UNUSED_ID ,
  MCTX_12_UNUSED_ID , MCTX_13_UNUSED_ID , MCTX_14_UNUSED_ID , MCTX_15_RESERVED_WIPEDMEM_ID
}
 

Functions

void * AllocSetAlloc (MemoryContext context, Size size, int flags)
 
void AllocSetFree (void *pointer)
 
void * AllocSetRealloc (void *pointer, Size size, int flags)
 
void AllocSetReset (MemoryContext context)
 
void AllocSetDelete (MemoryContext context)
 
MemoryContext AllocSetGetChunkContext (void *pointer)
 
Size AllocSetGetChunkSpace (void *pointer)
 
bool AllocSetIsEmpty (MemoryContext context)
 
void AllocSetStats (MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
 
void * GenerationAlloc (MemoryContext context, Size size, int flags)
 
void GenerationFree (void *pointer)
 
void * GenerationRealloc (void *pointer, Size size, int flags)
 
void GenerationReset (MemoryContext context)
 
void GenerationDelete (MemoryContext context)
 
MemoryContext GenerationGetChunkContext (void *pointer)
 
Size GenerationGetChunkSpace (void *pointer)
 
bool GenerationIsEmpty (MemoryContext context)
 
void GenerationStats (MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
 
void * SlabAlloc (MemoryContext context, Size size, int flags)
 
void SlabFree (void *pointer)
 
void * SlabRealloc (void *pointer, Size size, int flags)
 
void SlabReset (MemoryContext context)
 
void SlabDelete (MemoryContext context)
 
MemoryContext SlabGetChunkContext (void *pointer)
 
Size SlabGetChunkSpace (void *pointer)
 
bool SlabIsEmpty (MemoryContext context)
 
void SlabStats (MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
 
void AlignedAllocFree (void *pointer)
 
void * AlignedAllocRealloc (void *pointer, Size size, int flags)
 
MemoryContext AlignedAllocGetChunkContext (void *pointer)
 
Size AlignedAllocGetChunkSpace (void *pointer)
 
void * BumpAlloc (MemoryContext context, Size size, int flags)
 
void BumpFree (void *pointer)
 
void * BumpRealloc (void *pointer, Size size, int flags)
 
void BumpReset (MemoryContext context)
 
void BumpDelete (MemoryContext context)
 
MemoryContext BumpGetChunkContext (void *pointer)
 
Size BumpGetChunkSpace (void *pointer)
 
bool BumpIsEmpty (MemoryContext context)
 
void BumpStats (MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
 
void MemoryContextCreate (MemoryContext node, NodeTag tag, MemoryContextMethodID method_id, MemoryContext parent, const char *name)
 
void * MemoryContextAllocationFailure (MemoryContext context, Size size, int flags)
 
void MemoryContextSizeFailure (MemoryContext context, Size size, int flags) pg_attribute_noreturn()
 
static void MemoryContextCheckSize (MemoryContext context, Size size, int flags)
 

Macro Definition Documentation

◆ MEMORY_CONTEXT_METHODID_BITS

#define MEMORY_CONTEXT_METHODID_BITS   4

Definition at line 145 of file memutils_internal.h.

◆ MEMORY_CONTEXT_METHODID_MASK

#define MEMORY_CONTEXT_METHODID_MASK    ((((uint64) 1) << MEMORY_CONTEXT_METHODID_BITS) - 1)

Definition at line 146 of file memutils_internal.h.

◆ PallocAlignedExtraBytes

#define PallocAlignedExtraBytes (   alignto)     ((alignto) + (sizeof(MemoryChunk) - MAXIMUM_ALIGNOF))

Definition at line 104 of file memutils_internal.h.

Typedef Documentation

◆ MemoryContextMethodID

Enumeration Type Documentation

◆ MemoryContextMethodID

Enumerator
MCTX_0_RESERVED_UNUSEDMEM_ID 
MCTX_1_RESERVED_GLIBC_ID 
MCTX_2_RESERVED_GLIBC_ID 
MCTX_ASET_ID 
MCTX_GENERATION_ID 
MCTX_SLAB_ID 
MCTX_ALIGNED_REDIRECT_ID 
MCTX_BUMP_ID 
MCTX_8_UNUSED_ID 
MCTX_9_UNUSED_ID 
MCTX_10_UNUSED_ID 
MCTX_11_UNUSED_ID 
MCTX_12_UNUSED_ID 
MCTX_13_UNUSED_ID 
MCTX_14_UNUSED_ID 
MCTX_15_RESERVED_WIPEDMEM_ID 

Definition at line 121 of file memutils_internal.h.

122 {
123  MCTX_0_RESERVED_UNUSEDMEM_ID, /* 0000 occurs in never-used memory */
124  MCTX_1_RESERVED_GLIBC_ID, /* glibc malloc'd chunks usually match 0001 */
125  MCTX_2_RESERVED_GLIBC_ID, /* glibc malloc'd chunks > 128kB match 0010 */
126  MCTX_ASET_ID,
128  MCTX_SLAB_ID,
130  MCTX_BUMP_ID,
138  MCTX_15_RESERVED_WIPEDMEM_ID /* 1111 occurs in wipe_mem'd memory */
MemoryContextMethodID
@ MCTX_15_RESERVED_WIPEDMEM_ID
@ MCTX_GENERATION_ID
@ MCTX_14_UNUSED_ID
@ MCTX_12_UNUSED_ID
@ MCTX_10_UNUSED_ID
@ MCTX_BUMP_ID
@ MCTX_11_UNUSED_ID
@ MCTX_8_UNUSED_ID
@ MCTX_1_RESERVED_GLIBC_ID
@ MCTX_SLAB_ID
@ MCTX_9_UNUSED_ID
@ MCTX_0_RESERVED_UNUSEDMEM_ID
@ MCTX_ASET_ID
@ MCTX_2_RESERVED_GLIBC_ID
@ MCTX_ALIGNED_REDIRECT_ID
@ MCTX_13_UNUSED_ID

Function Documentation

◆ AlignedAllocFree()

void AlignedAllocFree ( void *  pointer)

Definition at line 29 of file alignedalloc.c.

30 {
32  void *unaligned;
33 
35 
37 
38  /* obtain the original (unaligned) allocated pointer */
39  unaligned = MemoryChunkGetBlock(chunk);
40 
41 #ifdef MEMORY_CONTEXT_CHECKING
42  /* Test for someone scribbling on unused space in chunk */
43  if (!sentinel_ok(pointer, chunk->requested_size))
44  elog(WARNING, "detected write past chunk end in %s %p",
45  GetMemoryChunkContext(unaligned)->name, chunk);
46 #endif
47 
48  pfree(unaligned);
49 }
#define Assert(condition)
Definition: c.h:812
#define WARNING
Definition: elog.h:36
#define elog(elevel,...)
Definition: elog.h:225
uint64 chunk
void pfree(void *pointer)
Definition: mcxt.c:1521
MemoryContext GetMemoryChunkContext(void *pointer)
Definition: mcxt.c:707
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
static bool MemoryChunkIsExternal(MemoryChunk *chunk)
static void * MemoryChunkGetBlock(MemoryChunk *chunk)
#define PointerGetMemoryChunk(p)
const char * name

References Assert, chunk, elog, GetMemoryChunkContext(), MemoryChunkGetBlock(), MemoryChunkIsExternal(), name, pfree(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and WARNING.

◆ AlignedAllocGetChunkContext()

MemoryContext AlignedAllocGetChunkContext ( void *  pointer)

Definition at line 121 of file alignedalloc.c.

122 {
123  MemoryChunk *redirchunk = PointerGetMemoryChunk(pointer);
124  MemoryContext cxt;
125 
126  VALGRIND_MAKE_MEM_DEFINED(redirchunk, sizeof(MemoryChunk));
127 
128  Assert(!MemoryChunkIsExternal(redirchunk));
129 
130  cxt = GetMemoryChunkContext(MemoryChunkGetBlock(redirchunk));
131 
132  VALGRIND_MAKE_MEM_NOACCESS(redirchunk, sizeof(MemoryChunk));
133 
134  return cxt;
135 }
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition: memdebug.h:27

References Assert, GetMemoryChunkContext(), MemoryChunkGetBlock(), MemoryChunkIsExternal(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ AlignedAllocGetChunkSpace()

Size AlignedAllocGetChunkSpace ( void *  pointer)

Definition at line 143 of file alignedalloc.c.

144 {
145  MemoryChunk *redirchunk = PointerGetMemoryChunk(pointer);
146  void *unaligned;
147  Size space;
148 
149  VALGRIND_MAKE_MEM_DEFINED(redirchunk, sizeof(MemoryChunk));
150 
151  unaligned = MemoryChunkGetBlock(redirchunk);
152  space = GetMemoryChunkSpace(unaligned);
153 
154  VALGRIND_MAKE_MEM_NOACCESS(redirchunk, sizeof(MemoryChunk));
155 
156  return space;
157 }
size_t Size
Definition: c.h:559
Size GetMemoryChunkSpace(void *pointer)
Definition: mcxt.c:721

References GetMemoryChunkSpace(), MemoryChunkGetBlock(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ AlignedAllocRealloc()

void* AlignedAllocRealloc ( void *  pointer,
Size  size,
int  flags 
)

Definition at line 60 of file alignedalloc.c.

61 {
62  MemoryChunk *redirchunk = PointerGetMemoryChunk(pointer);
63  Size alignto;
64  void *unaligned;
65  MemoryContext ctx;
66  Size old_size;
67  void *newptr;
68 
69  VALGRIND_MAKE_MEM_DEFINED(redirchunk, sizeof(MemoryChunk));
70 
71  alignto = MemoryChunkGetValue(redirchunk);
72  unaligned = MemoryChunkGetBlock(redirchunk);
73 
74  /* sanity check this is a power of 2 value */
75  Assert((alignto & (alignto - 1)) == 0);
76 
77  /*
78  * Determine the size of the original allocation. We can't determine this
79  * exactly as GetMemoryChunkSpace() returns the total space used for the
80  * allocation, which for contexts like aset includes rounding up to the
81  * next power of 2. However, this value is just used to memcpy() the old
82  * data into the new allocation, so we only need to concern ourselves with
83  * not reading beyond the end of the original allocation's memory. The
84  * drawback here is that we may copy more bytes than we need to, which
85  * only amounts to wasted effort. We can safely subtract the extra bytes
86  * that we requested to allow us to align the pointer. We must also
87  * subtract the space for the unaligned pointer's MemoryChunk since
88  * GetMemoryChunkSpace should have included that. This does assume that
89  * all context types use MemoryChunk as a chunk header.
90  */
91  old_size = GetMemoryChunkSpace(unaligned) -
92  PallocAlignedExtraBytes(alignto) - sizeof(MemoryChunk);
93 
94 #ifdef MEMORY_CONTEXT_CHECKING
95  /* check that GetMemoryChunkSpace returned something realistic */
96  Assert(old_size >= redirchunk->requested_size);
97 #endif
98 
99  ctx = GetMemoryChunkContext(unaligned);
100  newptr = MemoryContextAllocAligned(ctx, size, alignto, flags);
101 
102  /*
103  * We may memcpy beyond the end of the original allocation request size,
104  * so we must mark the entire allocation as defined.
105  */
106  if (likely(newptr != NULL))
107  {
108  VALGRIND_MAKE_MEM_DEFINED(pointer, old_size);
109  memcpy(newptr, pointer, Min(size, old_size));
110  }
111  pfree(unaligned);
112 
113  return newptr;
114 }
#define Min(x, y)
Definition: c.h:958
#define likely(x)
Definition: c.h:329
void * MemoryContextAllocAligned(MemoryContext context, Size size, Size alignto, int flags)
Definition: mcxt.c:1409
#define PallocAlignedExtraBytes(alignto)
static Size MemoryChunkGetValue(MemoryChunk *chunk)
struct MemoryChunk MemoryChunk
static pg_noinline void Size size
Definition: slab.c:607

References Assert, GetMemoryChunkContext(), GetMemoryChunkSpace(), likely, MemoryChunkGetBlock(), MemoryChunkGetValue(), MemoryContextAllocAligned(), Min, PallocAlignedExtraBytes, pfree(), PointerGetMemoryChunk, size, and VALGRIND_MAKE_MEM_DEFINED.

◆ AllocSetAlloc()

void* AllocSetAlloc ( MemoryContext  context,
Size  size,
int  flags 
)

Definition at line 967 of file aset.c.

968 {
969  AllocSet set = (AllocSet) context;
970  AllocBlock block;
972  int fidx;
973  Size chunk_size;
974  Size availspace;
975 
976  Assert(AllocSetIsValid(set));
977 
978  /* due to the keeper block set->blocks should never be NULL */
979  Assert(set->blocks != NULL);
980 
981  /*
982  * If requested size exceeds maximum for chunks we hand the request off to
983  * AllocSetAllocLarge().
984  */
985  if (size > set->allocChunkLimit)
986  return AllocSetAllocLarge(context, size, flags);
987 
988  /*
989  * Request is small enough to be treated as a chunk. Look in the
990  * corresponding free list to see if there is a free chunk we could reuse.
991  * If one is found, remove it from the free list, make it again a member
992  * of the alloc set and return its data address.
993  *
994  * Note that we don't attempt to ensure there's space for the sentinel
995  * byte here. We expect a large proportion of allocations to be for sizes
996  * which are already a power of 2. If we were to always make space for a
997  * sentinel byte in MEMORY_CONTEXT_CHECKING builds, then we'd end up
998  * doubling the memory requirements for such allocations.
999  */
1000  fidx = AllocSetFreeIndex(size);
1001  chunk = set->freelist[fidx];
1002  if (chunk != NULL)
1003  {
1005 
1006  /* Allow access to the chunk header. */
1008 
1009  Assert(fidx == MemoryChunkGetValue(chunk));
1010 
1011  /* pop this chunk off the freelist */
1013  set->freelist[fidx] = link->next;
1015 
1016 #ifdef MEMORY_CONTEXT_CHECKING
1017  chunk->requested_size = size;
1018  /* set mark to catch clobber of "unused" space */
1019  if (size < GetChunkSizeFromFreeListIdx(fidx))
1020  set_sentinel(MemoryChunkGetPointer(chunk), size);
1021 #endif
1022 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1023  /* fill the allocated space with junk */
1024  randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
1025 #endif
1026 
1027  /* Ensure any padding bytes are marked NOACCESS. */
1030 
1031  /* Disallow access to the chunk header. */
1033 
1034  return MemoryChunkGetPointer(chunk);
1035  }
1036 
1037  /*
1038  * Choose the actual chunk size to allocate.
1039  */
1040  chunk_size = GetChunkSizeFromFreeListIdx(fidx);
1041  Assert(chunk_size >= size);
1042 
1043  block = set->blocks;
1044  availspace = block->endptr - block->freeptr;
1045 
1046  /*
1047  * If there is enough room in the active allocation block, we will put the
1048  * chunk into that block. Else must start a new one.
1049  */
1050  if (unlikely(availspace < (chunk_size + ALLOC_CHUNKHDRSZ)))
1051  return AllocSetAllocFromNewBlock(context, size, flags, fidx);
1052 
1053  /* There's enough space on the current block, so allocate from that */
1054  return AllocSetAllocChunkFromBlock(context, block, size, chunk_size, fidx);
1055 }
#define AllocSetIsValid(set)
Definition: aset.c:200
#define GetFreeListLink(chkptr)
Definition: aset.c:132
#define ALLOC_CHUNKHDRSZ
Definition: aset.c:105
static void * AllocSetAllocChunkFromBlock(MemoryContext context, AllocBlock block, Size size, Size chunk_size, int fidx)
Definition: aset.c:774
#define GetChunkSizeFromFreeListIdx(fidx)
Definition: aset.c:140
static int AllocSetFreeIndex(Size size)
Definition: aset.c:277
static pg_noinline void * AllocSetAllocFromNewBlock(MemoryContext context, Size size, int flags, int fidx)
Definition: aset.c:819
static pg_noinline void * AllocSetAllocLarge(MemoryContext context, Size size, int flags)
Definition: aset.c:696
AllocSetContext * AllocSet
Definition: aset.c:167
#define unlikely(x)
Definition: c.h:330
#define MemoryChunkGetPointer(c)
tree context
Definition: radixtree.h:1837
char * freeptr
Definition: aset.c:186
char * endptr
Definition: aset.c:187
uint32 allocChunkLimit
Definition: aset.c:162
AllocBlock blocks
Definition: aset.c:156
MemoryChunk * freelist[ALLOCSET_NUM_FREELISTS]
Definition: aset.c:157

References ALLOC_CHUNKHDRSZ, AllocSetContext::allocChunkLimit, AllocSetAllocChunkFromBlock(), AllocSetAllocFromNewBlock(), AllocSetAllocLarge(), AllocSetFreeIndex(), AllocSetIsValid, Assert, AllocSetContext::blocks, chunk, context, AllocBlockData::endptr, AllocSetContext::freelist, AllocBlockData::freeptr, GetChunkSizeFromFreeListIdx, GetFreeListLink, MemoryChunkGetPointer, MemoryChunkGetValue(), size, unlikely, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

Referenced by AllocSetRealloc().

◆ AllocSetDelete()

void AllocSetDelete ( MemoryContext  context)

Definition at line 607 of file aset.c.

608 {
609  AllocSet set = (AllocSet) context;
610  AllocBlock block = set->blocks;
611  Size keepersize PG_USED_FOR_ASSERTS_ONLY;
612 
613  Assert(AllocSetIsValid(set));
614 
615 #ifdef MEMORY_CONTEXT_CHECKING
616  /* Check for corruption and leaks before freeing */
617  AllocSetCheck(context);
618 #endif
619 
620  /* Remember keeper block size for Assert below */
621  keepersize = KeeperBlock(set)->endptr - ((char *) set);
622 
623  /*
624  * If the context is a candidate for a freelist, put it into that freelist
625  * instead of destroying it.
626  */
627  if (set->freeListIndex >= 0)
628  {
630 
631  /*
632  * Reset the context, if it needs it, so that we aren't hanging on to
633  * more than the initial malloc chunk.
634  */
635  if (!context->isReset)
637 
638  /*
639  * If the freelist is full, just discard what's already in it. See
640  * comments with context_freelists[].
641  */
642  if (freelist->num_free >= MAX_FREE_CONTEXTS)
643  {
644  while (freelist->first_free != NULL)
645  {
646  AllocSetContext *oldset = freelist->first_free;
647 
648  freelist->first_free = (AllocSetContext *) oldset->header.nextchild;
649  freelist->num_free--;
650 
651  /* All that remains is to free the header/initial block */
652  free(oldset);
653  }
654  Assert(freelist->num_free == 0);
655  }
656 
657  /* Now add the just-deleted context to the freelist. */
658  set->header.nextchild = (MemoryContext) freelist->first_free;
659  freelist->first_free = set;
660  freelist->num_free++;
661 
662  return;
663  }
664 
665  /* Free all blocks, except the keeper which is part of context header */
666  while (block != NULL)
667  {
668  AllocBlock next = block->next;
669 
670  if (!IsKeeperBlock(set, block))
671  context->mem_allocated -= block->endptr - ((char *) block);
672 
673 #ifdef CLOBBER_FREED_MEMORY
674  wipe_mem(block, block->freeptr - ((char *) block));
675 #endif
676 
677  if (!IsKeeperBlock(set, block))
678  free(block);
679 
680  block = next;
681  }
682 
683  Assert(context->mem_allocated == keepersize);
684 
685  /* Finally, free the context header, including the keeper block */
686  free(set);
687 }
#define IsKeeperBlock(set, block)
Definition: aset.c:248
#define KeeperBlock(set)
Definition: aset.c:244
#define MAX_FREE_CONTEXTS
Definition: aset.c:241
static AllocSetFreeList context_freelists[2]
Definition: aset.c:257
static int32 next
Definition: blutils.c:219
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:201
#define free(a)
Definition: header.h:65
void MemoryContextResetOnly(MemoryContext context)
Definition: mcxt.c:402
struct MemoryContextData * MemoryContext
Definition: palloc.h:36
AllocBlock next
Definition: aset.c:185
MemoryContextData header
Definition: aset.c:154
int freeListIndex
Definition: aset.c:164
int num_free
Definition: aset.c:252
AllocSetContext * first_free
Definition: aset.c:253
MemoryContext nextchild
Definition: memnodes.h:130

References AllocSetIsValid, Assert, AllocSetContext::blocks, context, context_freelists, AllocBlockData::endptr, AllocSetFreeList::first_free, free, AllocSetContext::freeListIndex, AllocBlockData::freeptr, AllocSetContext::header, IsKeeperBlock, KeeperBlock, MAX_FREE_CONTEXTS, MemoryContextResetOnly(), next, AllocBlockData::next, MemoryContextData::nextchild, AllocSetFreeList::num_free, and PG_USED_FOR_ASSERTS_ONLY.

◆ AllocSetFree()

void AllocSetFree ( void *  pointer)

Definition at line 1062 of file aset.c.

1063 {
1064  AllocSet set;
1066 
1067  /* Allow access to the chunk header. */
1069 
1071  {
1072  /* Release single-chunk block. */
1074 
1075  /*
1076  * Try to verify that we have a sane block pointer: the block header
1077  * should reference an aset and the freeptr should match the endptr.
1078  */
1079  if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
1080  elog(ERROR, "could not find block containing chunk %p", chunk);
1081 
1082  set = block->aset;
1083 
1084 #ifdef MEMORY_CONTEXT_CHECKING
1085  {
1086  /* Test for someone scribbling on unused space in chunk */
1087  Assert(chunk->requested_size < (block->endptr - (char *) pointer));
1088  if (!sentinel_ok(pointer, chunk->requested_size))
1089  elog(WARNING, "detected write past chunk end in %s %p",
1090  set->header.name, chunk);
1091  }
1092 #endif
1093 
1094  /* OK, remove block from aset's list and free it */
1095  if (block->prev)
1096  block->prev->next = block->next;
1097  else
1098  set->blocks = block->next;
1099  if (block->next)
1100  block->next->prev = block->prev;
1101 
1102  set->header.mem_allocated -= block->endptr - ((char *) block);
1103 
1104 #ifdef CLOBBER_FREED_MEMORY
1105  wipe_mem(block, block->freeptr - ((char *) block));
1106 #endif
1107  free(block);
1108  }
1109  else
1110  {
1112  int fidx;
1114 
1115  /*
1116  * In this path, for speed reasons we just Assert that the referenced
1117  * block is good. We can also Assert that the value field is sane.
1118  * Future field experience may show that these Asserts had better
1119  * become regular runtime test-and-elog checks.
1120  */
1121  Assert(AllocBlockIsValid(block));
1122  set = block->aset;
1123 
1124  fidx = MemoryChunkGetValue(chunk);
1125  Assert(FreeListIdxIsValid(fidx));
1127 
1128 #ifdef MEMORY_CONTEXT_CHECKING
1129  /* Test for someone scribbling on unused space in chunk */
1130  if (chunk->requested_size < GetChunkSizeFromFreeListIdx(fidx))
1131  if (!sentinel_ok(pointer, chunk->requested_size))
1132  elog(WARNING, "detected write past chunk end in %s %p",
1133  set->header.name, chunk);
1134 #endif
1135 
1136 #ifdef CLOBBER_FREED_MEMORY
1137  wipe_mem(pointer, GetChunkSizeFromFreeListIdx(fidx));
1138 #endif
1139  /* push this chunk onto the top of the free list */
1141  link->next = set->freelist[fidx];
1143  set->freelist[fidx] = chunk;
1144 
1145 #ifdef MEMORY_CONTEXT_CHECKING
1146 
1147  /*
1148  * Reset requested_size to InvalidAllocSize in chunks that are on free
1149  * list.
1150  */
1151  chunk->requested_size = InvalidAllocSize;
1152 #endif
1153  }
1154 }
#define AllocBlockIsValid(block)
Definition: aset.c:207
#define FreeListIdxIsValid(fidx)
Definition: aset.c:136
#define ExternalChunkGetBlock(chunk)
Definition: aset.c:215
#define ERROR
Definition: elog.h:39
#define InvalidAllocSize
Definition: memutils.h:47
AllocBlock prev
Definition: aset.c:184
AllocSet aset
Definition: aset.c:183
const char * name
Definition: memnodes.h:131

References ALLOC_CHUNKHDRSZ, AllocBlockIsValid, AllocBlockData::aset, Assert, AllocSetContext::blocks, chunk, elog, AllocBlockData::endptr, ERROR, ExternalChunkGetBlock, free, AllocSetContext::freelist, FreeListIdxIsValid, AllocBlockData::freeptr, GetChunkSizeFromFreeListIdx, GetFreeListLink, AllocSetContext::header, InvalidAllocSize, link(), MemoryContextData::mem_allocated, MemoryChunkGetBlock(), MemoryChunkGetValue(), MemoryChunkIsExternal(), MemoryContextData::name, AllocBlockData::next, PointerGetMemoryChunk, AllocBlockData::prev, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MAKE_MEM_NOACCESS, and WARNING.

Referenced by AllocSetRealloc().

◆ AllocSetGetChunkContext()

MemoryContext AllocSetGetChunkContext ( void *  pointer)

Definition at line 1433 of file aset.c.

1434 {
1436  AllocBlock block;
1437  AllocSet set;
1438 
1439  /* Allow access to the chunk header. */
1441 
1443  block = ExternalChunkGetBlock(chunk);
1444  else
1446 
1447  /* Disallow access to the chunk header. */
1449 
1450  Assert(AllocBlockIsValid(block));
1451  set = block->aset;
1452 
1453  return &set->header;
1454 }
struct AllocBlockData * AllocBlock
Definition: aset.c:107

References ALLOC_CHUNKHDRSZ, AllocBlockIsValid, AllocBlockData::aset, Assert, chunk, ExternalChunkGetBlock, AllocSetContext::header, MemoryChunkGetBlock(), MemoryChunkIsExternal(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ AllocSetGetChunkSpace()

Size AllocSetGetChunkSpace ( void *  pointer)

Definition at line 1462 of file aset.c.

1463 {
1465  int fidx;
1466 
1467  /* Allow access to the chunk header. */
1469 
1471  {
1473 
1474  /* Disallow access to the chunk header. */
1476 
1477  Assert(AllocBlockIsValid(block));
1478 
1479  return block->endptr - (char *) chunk;
1480  }
1481 
1482  fidx = MemoryChunkGetValue(chunk);
1483  Assert(FreeListIdxIsValid(fidx));
1484 
1485  /* Disallow access to the chunk header. */
1487 
1489 }

References ALLOC_CHUNKHDRSZ, AllocBlockIsValid, Assert, chunk, AllocBlockData::endptr, ExternalChunkGetBlock, FreeListIdxIsValid, GetChunkSizeFromFreeListIdx, MemoryChunkGetValue(), MemoryChunkIsExternal(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ AllocSetIsEmpty()

bool AllocSetIsEmpty ( MemoryContext  context)

Definition at line 1496 of file aset.c.

1497 {
1499 
1500  /*
1501  * For now, we say "empty" only if the context is new or just reset. We
1502  * could examine the freelists to determine if all space has been freed,
1503  * but it's not really worth the trouble for present uses of this
1504  * functionality.
1505  */
1506  if (context->isReset)
1507  return true;
1508  return false;
1509 }

References AllocSetIsValid, Assert, and context.

◆ AllocSetRealloc()

void* AllocSetRealloc ( void *  pointer,
Size  size,
int  flags 
)

Definition at line 1169 of file aset.c.

1170 {
1171  AllocBlock block;
1172  AllocSet set;
1174  Size oldchksize;
1175  int fidx;
1176 
1177  /* Allow access to the chunk header. */
1179 
1181  {
1182  /*
1183  * The chunk must have been allocated as a single-chunk block. Use
1184  * realloc() to make the containing block bigger, or smaller, with
1185  * minimum space wastage.
1186  */
1187  Size chksize;
1188  Size blksize;
1189  Size oldblksize;
1190 
1191  block = ExternalChunkGetBlock(chunk);
1192 
1193  /*
1194  * Try to verify that we have a sane block pointer: the block header
1195  * should reference an aset and the freeptr should match the endptr.
1196  */
1197  if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
1198  elog(ERROR, "could not find block containing chunk %p", chunk);
1199 
1200  set = block->aset;
1201 
1202  /* only check size in paths where the limits could be hit */
1203  MemoryContextCheckSize((MemoryContext) set, size, flags);
1204 
1205  oldchksize = block->endptr - (char *) pointer;
1206 
1207 #ifdef MEMORY_CONTEXT_CHECKING
1208  /* Test for someone scribbling on unused space in chunk */
1209  Assert(chunk->requested_size < oldchksize);
1210  if (!sentinel_ok(pointer, chunk->requested_size))
1211  elog(WARNING, "detected write past chunk end in %s %p",
1212  set->header.name, chunk);
1213 #endif
1214 
1215 #ifdef MEMORY_CONTEXT_CHECKING
1216  /* ensure there's always space for the sentinel byte */
1217  chksize = MAXALIGN(size + 1);
1218 #else
1219  chksize = MAXALIGN(size);
1220 #endif
1221 
1222  /* Do the realloc */
1223  blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
1224  oldblksize = block->endptr - ((char *) block);
1225 
1226  block = (AllocBlock) realloc(block, blksize);
1227  if (block == NULL)
1228  {
1229  /* Disallow access to the chunk header. */
1231  return MemoryContextAllocationFailure(&set->header, size, flags);
1232  }
1233 
1234  /* updated separately, not to underflow when (oldblksize > blksize) */
1235  set->header.mem_allocated -= oldblksize;
1236  set->header.mem_allocated += blksize;
1237 
1238  block->freeptr = block->endptr = ((char *) block) + blksize;
1239 
1240  /* Update pointers since block has likely been moved */
1241  chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
1242  pointer = MemoryChunkGetPointer(chunk);
1243  if (block->prev)
1244  block->prev->next = block;
1245  else
1246  set->blocks = block;
1247  if (block->next)
1248  block->next->prev = block;
1249 
1250 #ifdef MEMORY_CONTEXT_CHECKING
1251 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1252 
1253  /*
1254  * We can only randomize the extra space if we know the prior request.
1255  * When using Valgrind, randomize_mem() also marks memory UNDEFINED.
1256  */
1257  if (size > chunk->requested_size)
1258  randomize_mem((char *) pointer + chunk->requested_size,
1259  size - chunk->requested_size);
1260 #else
1261 
1262  /*
1263  * If this is an increase, realloc() will have marked any
1264  * newly-allocated part (from oldchksize to chksize) UNDEFINED, but we
1265  * also need to adjust trailing bytes from the old allocation (from
1266  * chunk->requested_size to oldchksize) as they are marked NOACCESS.
1267  * Make sure not to mark too many bytes in case chunk->requested_size
1268  * < size < oldchksize.
1269  */
1270 #ifdef USE_VALGRIND
1271  if (Min(size, oldchksize) > chunk->requested_size)
1272  VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1273  Min(size, oldchksize) - chunk->requested_size);
1274 #endif
1275 #endif
1276 
1277  chunk->requested_size = size;
1278  /* set mark to catch clobber of "unused" space */
1279  Assert(size < chksize);
1280  set_sentinel(pointer, size);
1281 #else /* !MEMORY_CONTEXT_CHECKING */
1282 
1283  /*
1284  * We may need to adjust marking of bytes from the old allocation as
1285  * some of them may be marked NOACCESS. We don't know how much of the
1286  * old chunk size was the requested size; it could have been as small
1287  * as one byte. We have to be conservative and just mark the entire
1288  * old portion DEFINED. Make sure not to mark memory beyond the new
1289  * allocation in case it's smaller than the old one.
1290  */
1291  VALGRIND_MAKE_MEM_DEFINED(pointer, Min(size, oldchksize));
1292 #endif
1293 
1294  /* Ensure any padding bytes are marked NOACCESS. */
1295  VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1296 
1297  /* Disallow access to the chunk header . */
1299 
1300  return pointer;
1301  }
1302 
1303  block = MemoryChunkGetBlock(chunk);
1304 
1305  /*
1306  * In this path, for speed reasons we just Assert that the referenced
1307  * block is good. We can also Assert that the value field is sane. Future
1308  * field experience may show that these Asserts had better become regular
1309  * runtime test-and-elog checks.
1310  */
1311  Assert(AllocBlockIsValid(block));
1312  set = block->aset;
1313 
1314  fidx = MemoryChunkGetValue(chunk);
1315  Assert(FreeListIdxIsValid(fidx));
1316  oldchksize = GetChunkSizeFromFreeListIdx(fidx);
1317 
1318 #ifdef MEMORY_CONTEXT_CHECKING
1319  /* Test for someone scribbling on unused space in chunk */
1320  if (chunk->requested_size < oldchksize)
1321  if (!sentinel_ok(pointer, chunk->requested_size))
1322  elog(WARNING, "detected write past chunk end in %s %p",
1323  set->header.name, chunk);
1324 #endif
1325 
1326  /*
1327  * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1328  * allocated area already is >= the new size. (In particular, we will
1329  * fall out here if the requested size is a decrease.)
1330  */
1331  if (oldchksize >= size)
1332  {
1333 #ifdef MEMORY_CONTEXT_CHECKING
1334  Size oldrequest = chunk->requested_size;
1335 
1336 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1337  /* We can only fill the extra space if we know the prior request */
1338  if (size > oldrequest)
1339  randomize_mem((char *) pointer + oldrequest,
1340  size - oldrequest);
1341 #endif
1342 
1343  chunk->requested_size = size;
1344 
1345  /*
1346  * If this is an increase, mark any newly-available part UNDEFINED.
1347  * Otherwise, mark the obsolete part NOACCESS.
1348  */
1349  if (size > oldrequest)
1350  VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1351  size - oldrequest);
1352  else
1353  VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1354  oldchksize - size);
1355 
1356  /* set mark to catch clobber of "unused" space */
1357  if (size < oldchksize)
1358  set_sentinel(pointer, size);
1359 #else /* !MEMORY_CONTEXT_CHECKING */
1360 
1361  /*
1362  * We don't have the information to determine whether we're growing
1363  * the old request or shrinking it, so we conservatively mark the
1364  * entire new allocation DEFINED.
1365  */
1366  VALGRIND_MAKE_MEM_NOACCESS(pointer, oldchksize);
1367  VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1368 #endif
1369 
1370  /* Disallow access to the chunk header. */
1372 
1373  return pointer;
1374  }
1375  else
1376  {
1377  /*
1378  * Enlarge-a-small-chunk case. We just do this by brute force, ie,
1379  * allocate a new chunk and copy the data. Since we know the existing
1380  * data isn't huge, this won't involve any great memcpy expense, so
1381  * it's not worth being smarter. (At one time we tried to avoid
1382  * memcpy when it was possible to enlarge the chunk in-place, but that
1383  * turns out to misbehave unpleasantly for repeated cycles of
1384  * palloc/repalloc/pfree: the eventually freed chunks go into the
1385  * wrong freelist for the next initial palloc request, and so we leak
1386  * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1387  */
1388  AllocPointer newPointer;
1389  Size oldsize;
1390 
1391  /* allocate new chunk (this also checks size is valid) */
1392  newPointer = AllocSetAlloc((MemoryContext) set, size, flags);
1393 
1394  /* leave immediately if request was not completed */
1395  if (newPointer == NULL)
1396  {
1397  /* Disallow access to the chunk header. */
1399  return MemoryContextAllocationFailure((MemoryContext) set, size, flags);
1400  }
1401 
1402  /*
1403  * AllocSetAlloc() may have returned a region that is still NOACCESS.
1404  * Change it to UNDEFINED for the moment; memcpy() will then transfer
1405  * definedness from the old allocation to the new. If we know the old
1406  * allocation, copy just that much. Otherwise, make the entire old
1407  * chunk defined to avoid errors as we copy the currently-NOACCESS
1408  * trailing bytes.
1409  */
1410  VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
1411 #ifdef MEMORY_CONTEXT_CHECKING
1412  oldsize = chunk->requested_size;
1413 #else
1414  oldsize = oldchksize;
1415  VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1416 #endif
1417 
1418  /* transfer existing data (certain to fit) */
1419  memcpy(newPointer, pointer, oldsize);
1420 
1421  /* free old chunk */
1422  AllocSetFree(pointer);
1423 
1424  return newPointer;
1425  }
1426 }
void * AllocSetAlloc(MemoryContext context, Size size, int flags)
Definition: aset.c:967
#define ALLOC_BLOCKHDRSZ
Definition: aset.c:104
void * AllocPointer
Definition: aset.c:113
void AllocSetFree(void *pointer)
Definition: aset.c:1062
#define MAXALIGN(LEN)
Definition: c.h:765
#define realloc(a, b)
Definition: header.h:60
void * MemoryContextAllocationFailure(MemoryContext context, Size size, int flags)
Definition: mcxt.c:1147
#define VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
Definition: memdebug.h:28
static void MemoryContextCheckSize(MemoryContext context, Size size, int flags)

References ALLOC_BLOCKHDRSZ, ALLOC_CHUNKHDRSZ, AllocBlockIsValid, AllocSetAlloc(), AllocSetFree(), AllocBlockData::aset, Assert, chunk, elog, AllocBlockData::endptr, ERROR, ExternalChunkGetBlock, FreeListIdxIsValid, AllocBlockData::freeptr, GetChunkSizeFromFreeListIdx, AllocSetContext::header, MAXALIGN, MemoryContextData::mem_allocated, MemoryChunkGetBlock(), MemoryChunkGetPointer, MemoryChunkGetValue(), MemoryChunkIsExternal(), MemoryContextAllocationFailure(), MemoryContextCheckSize(), Min, MemoryContextData::name, AllocBlockData::next, PointerGetMemoryChunk, AllocBlockData::prev, realloc, size, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MAKE_MEM_NOACCESS, VALGRIND_MAKE_MEM_UNDEFINED, and WARNING.

◆ AllocSetReset()

void AllocSetReset ( MemoryContext  context)

Definition at line 537 of file aset.c.

538 {
539  AllocSet set = (AllocSet) context;
540  AllocBlock block;
541  Size keepersize PG_USED_FOR_ASSERTS_ONLY;
542 
543  Assert(AllocSetIsValid(set));
544 
545 #ifdef MEMORY_CONTEXT_CHECKING
546  /* Check for corruption and leaks before freeing */
547  AllocSetCheck(context);
548 #endif
549 
550  /* Remember keeper block size for Assert below */
551  keepersize = KeeperBlock(set)->endptr - ((char *) set);
552 
553  /* Clear chunk freelists */
554  MemSetAligned(set->freelist, 0, sizeof(set->freelist));
555 
556  block = set->blocks;
557 
558  /* New blocks list will be just the keeper block */
559  set->blocks = KeeperBlock(set);
560 
561  while (block != NULL)
562  {
563  AllocBlock next = block->next;
564 
565  if (IsKeeperBlock(set, block))
566  {
567  /* Reset the block, but don't return it to malloc */
568  char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
569 
570 #ifdef CLOBBER_FREED_MEMORY
571  wipe_mem(datastart, block->freeptr - datastart);
572 #else
573  /* wipe_mem() would have done this */
574  VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
575 #endif
576  block->freeptr = datastart;
577  block->prev = NULL;
578  block->next = NULL;
579  }
580  else
581  {
582  /* Normal case, release the block */
583  context->mem_allocated -= block->endptr - ((char *) block);
584 
585 #ifdef CLOBBER_FREED_MEMORY
586  wipe_mem(block, block->freeptr - ((char *) block));
587 #endif
588  free(block);
589  }
590  block = next;
591  }
592 
593  Assert(context->mem_allocated == keepersize);
594 
595  /* Reset block size allocation sequence, too */
596  set->nextBlockSize = set->initBlockSize;
597 }
#define MemSetAligned(start, val, len)
Definition: c.h:1004
uint32 initBlockSize
Definition: aset.c:159
uint32 nextBlockSize
Definition: aset.c:161

References ALLOC_BLOCKHDRSZ, AllocSetIsValid, Assert, AllocSetContext::blocks, context, AllocBlockData::endptr, free, AllocSetContext::freelist, AllocBlockData::freeptr, AllocSetContext::initBlockSize, IsKeeperBlock, KeeperBlock, MemSetAligned, next, AllocBlockData::next, AllocSetContext::nextBlockSize, PG_USED_FOR_ASSERTS_ONLY, AllocBlockData::prev, and VALGRIND_MAKE_MEM_NOACCESS.

◆ AllocSetStats()

void AllocSetStats ( MemoryContext  context,
MemoryStatsPrintFunc  printfunc,
void *  passthru,
MemoryContextCounters totals,
bool  print_to_stderr 
)

Definition at line 1521 of file aset.c.

1524 {
1525  AllocSet set = (AllocSet) context;
1526  Size nblocks = 0;
1527  Size freechunks = 0;
1528  Size totalspace;
1529  Size freespace = 0;
1530  AllocBlock block;
1531  int fidx;
1532 
1533  Assert(AllocSetIsValid(set));
1534 
1535  /* Include context header in totalspace */
1536  totalspace = MAXALIGN(sizeof(AllocSetContext));
1537 
1538  for (block = set->blocks; block != NULL; block = block->next)
1539  {
1540  nblocks++;
1541  totalspace += block->endptr - ((char *) block);
1542  freespace += block->endptr - block->freeptr;
1543  }
1544  for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1545  {
1546  Size chksz = GetChunkSizeFromFreeListIdx(fidx);
1547  MemoryChunk *chunk = set->freelist[fidx];
1548 
1549  while (chunk != NULL)
1550  {
1552 
1553  /* Allow access to the chunk header. */
1555  Assert(MemoryChunkGetValue(chunk) == fidx);
1557 
1558  freechunks++;
1559  freespace += chksz + ALLOC_CHUNKHDRSZ;
1560 
1562  chunk = link->next;
1564  }
1565  }
1566 
1567  if (printfunc)
1568  {
1569  char stats_string[200];
1570 
1571  snprintf(stats_string, sizeof(stats_string),
1572  "%zu total in %zu blocks; %zu free (%zu chunks); %zu used",
1573  totalspace, nblocks, freespace, freechunks,
1574  totalspace - freespace);
1575  printfunc(context, passthru, stats_string, print_to_stderr);
1576  }
1577 
1578  if (totals)
1579  {
1580  totals->nblocks += nblocks;
1581  totals->freechunks += freechunks;
1582  totals->totalspace += totalspace;
1583  totals->freespace += freespace;
1584  }
1585 }
#define ALLOCSET_NUM_FREELISTS
Definition: aset.c:84
#define snprintf
Definition: port.h:238

References ALLOC_CHUNKHDRSZ, ALLOCSET_NUM_FREELISTS, AllocSetIsValid, Assert, AllocSetContext::blocks, chunk, context, AllocBlockData::endptr, MemoryContextCounters::freechunks, AllocSetContext::freelist, AllocBlockData::freeptr, MemoryContextCounters::freespace, GetChunkSizeFromFreeListIdx, GetFreeListLink, MAXALIGN, MemoryChunkGetValue(), MemoryContextCounters::nblocks, AllocBlockData::next, snprintf, MemoryContextCounters::totalspace, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ BumpAlloc()

void* BumpAlloc ( MemoryContext  context,
Size  size,
int  flags 
)

Definition at line 491 of file bump.c.

492 {
493  BumpContext *set = (BumpContext *) context;
494  BumpBlock *block;
495  Size chunk_size;
496  Size required_size;
497 
498  Assert(BumpIsValid(set));
499 
500 #ifdef MEMORY_CONTEXT_CHECKING
501  /* ensure there's always space for the sentinel byte */
502  chunk_size = MAXALIGN(size + 1);
503 #else
504  chunk_size = MAXALIGN(size);
505 #endif
506 
507  /*
508  * If requested size exceeds maximum for chunks we hand the request off to
509  * BumpAllocLarge().
510  */
511  if (chunk_size > set->allocChunkLimit)
512  return BumpAllocLarge(context, size, flags);
513 
514  required_size = chunk_size + Bump_CHUNKHDRSZ;
515 
516  /*
517  * Not an oversized chunk. We try to first make use of the latest block,
518  * but if there's not enough space in it we must allocate a new block.
519  */
520  block = dlist_container(BumpBlock, node, dlist_head_node(&set->blocks));
521 
522  if (BumpBlockFreeBytes(block) < required_size)
523  return BumpAllocFromNewBlock(context, size, flags, chunk_size);
524 
525  /* The current block has space, so just allocate chunk there. */
526  return BumpAllocChunkFromBlock(context, block, size, chunk_size);
527 }
static pg_noinline void * BumpAllocLarge(MemoryContext context, Size size, int flags)
Definition: bump.c:293
#define Bump_CHUNKHDRSZ
Definition: bump.c:54
#define BumpIsValid(set)
Definition: bump.c:100
static void * BumpAllocChunkFromBlock(MemoryContext context, BumpBlock *block, Size size, Size chunk_size)
Definition: bump.c:371
static pg_noinline void * BumpAllocFromNewBlock(MemoryContext context, Size size, int flags, Size chunk_size)
Definition: bump.c:430
static Size BumpBlockFreeBytes(BumpBlock *block)
Definition: bump.c:585
static dlist_node * dlist_head_node(dlist_head *head)
Definition: ilist.h:565
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
Definition: bump.c:87
dlist_head blocks
Definition: bump.c:76
uint32 allocChunkLimit
Definition: bump.c:74

References BumpContext::allocChunkLimit, Assert, BumpContext::blocks, Bump_CHUNKHDRSZ, BumpAllocChunkFromBlock(), BumpAllocFromNewBlock(), BumpAllocLarge(), BumpBlockFreeBytes(), BumpIsValid, context, dlist_container, dlist_head_node(), MAXALIGN, and size.

◆ BumpDelete()

void BumpDelete ( MemoryContext  context)

Definition at line 278 of file bump.c.

279 {
280  /* Reset to release all releasable BumpBlocks */
282  /* And free the context header and keeper block */
283  free(context);
284 }
void BumpReset(MemoryContext context)
Definition: bump.c:243

References BumpReset(), context, and free.

◆ BumpFree()

void BumpFree ( void *  pointer)

Definition at line 617 of file bump.c.

618 {
619  elog(ERROR, "%s is not supported by the bump memory allocator", "pfree");
620 }

References elog, and ERROR.

◆ BumpGetChunkContext()

MemoryContext BumpGetChunkContext ( void *  pointer)

Definition at line 638 of file bump.c.

639 {
640  elog(ERROR, "%s is not supported by the bump memory allocator", "GetMemoryChunkContext");
641  return NULL; /* keep compiler quiet */
642 }

References elog, and ERROR.

◆ BumpGetChunkSpace()

Size BumpGetChunkSpace ( void *  pointer)

Definition at line 649 of file bump.c.

650 {
651  elog(ERROR, "%s is not supported by the bump memory allocator", "GetMemoryChunkSpace");
652  return 0; /* keep compiler quiet */
653 }

References elog, and ERROR.

◆ BumpIsEmpty()

bool BumpIsEmpty ( MemoryContext  context)

Definition at line 660 of file bump.c.

661 {
662  BumpContext *set = (BumpContext *) context;
663  dlist_iter iter;
664 
665  Assert(BumpIsValid(set));
666 
667  dlist_foreach(iter, &set->blocks)
668  {
669  BumpBlock *block = dlist_container(BumpBlock, node, iter.cur);
670 
671  if (!BumpBlockIsEmpty(block))
672  return false;
673  }
674 
675  return true;
676 }
static bool BumpBlockIsEmpty(BumpBlock *block)
Definition: bump.c:552
#define dlist_foreach(iter, lhead)
Definition: ilist.h:623
dlist_node * cur
Definition: ilist.h:179

References Assert, BumpContext::blocks, BumpBlockIsEmpty(), BumpIsValid, context, dlist_iter::cur, dlist_container, and dlist_foreach.

◆ BumpRealloc()

void* BumpRealloc ( void *  pointer,
Size  size,
int  flags 
)

Definition at line 627 of file bump.c.

628 {
629  elog(ERROR, "%s is not supported by the bump memory allocator", "realloc");
630  return NULL; /* keep compiler quiet */
631 }

References elog, and ERROR.

◆ BumpReset()

void BumpReset ( MemoryContext  context)

Definition at line 243 of file bump.c.

244 {
245  BumpContext *set = (BumpContext *) context;
246  dlist_mutable_iter miter;
247 
248  Assert(BumpIsValid(set));
249 
250 #ifdef MEMORY_CONTEXT_CHECKING
251  /* Check for corruption and leaks before freeing */
252  BumpCheck(context);
253 #endif
254 
255  dlist_foreach_modify(miter, &set->blocks)
256  {
257  BumpBlock *block = dlist_container(BumpBlock, node, miter.cur);
258 
259  if (IsKeeperBlock(set, block))
260  BumpBlockMarkEmpty(block);
261  else
262  BumpBlockFree(set, block);
263  }
264 
265  /* Reset block size allocation sequence, too */
266  set->nextBlockSize = set->initBlockSize;
267 
268  /* Ensure there is only 1 item in the dlist */
269  Assert(!dlist_is_empty(&set->blocks));
271 }
static void BumpBlockFree(BumpContext *set, BumpBlock *block)
Definition: bump.c:595
static void BumpBlockMarkEmpty(BumpBlock *block)
Definition: bump.c:563
#define IsKeeperBlock(set, blk)
Definition: bump.c:62
static bool dlist_has_next(const dlist_head *head, const dlist_node *node)
Definition: ilist.h:503
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:640
static bool dlist_is_empty(const dlist_head *head)
Definition: ilist.h:336
uint32 initBlockSize
Definition: bump.c:71
uint32 nextBlockSize
Definition: bump.c:73
dlist_node * cur
Definition: ilist.h:200

References Assert, BumpContext::blocks, BumpBlockFree(), BumpBlockMarkEmpty(), BumpIsValid, context, dlist_mutable_iter::cur, dlist_container, dlist_foreach_modify, dlist_has_next(), dlist_head_node(), dlist_is_empty(), BumpContext::initBlockSize, IsKeeperBlock, and BumpContext::nextBlockSize.

Referenced by BumpDelete().

◆ BumpStats()

void BumpStats ( MemoryContext  context,
MemoryStatsPrintFunc  printfunc,
void *  passthru,
MemoryContextCounters totals,
bool  print_to_stderr 
)

Definition at line 688 of file bump.c.

690 {
691  BumpContext *set = (BumpContext *) context;
692  Size nblocks = 0;
693  Size totalspace = 0;
694  Size freespace = 0;
695  dlist_iter iter;
696 
697  Assert(BumpIsValid(set));
698 
699  dlist_foreach(iter, &set->blocks)
700  {
701  BumpBlock *block = dlist_container(BumpBlock, node, iter.cur);
702 
703  nblocks++;
704  totalspace += (block->endptr - (char *) block);
705  freespace += (block->endptr - block->freeptr);
706  }
707 
708  if (printfunc)
709  {
710  char stats_string[200];
711 
712  snprintf(stats_string, sizeof(stats_string),
713  "%zu total in %zu blocks; %zu free; %zu used",
714  totalspace, nblocks, freespace, totalspace - freespace);
715  printfunc(context, passthru, stats_string, print_to_stderr);
716  }
717 
718  if (totals)
719  {
720  totals->nblocks += nblocks;
721  totals->totalspace += totalspace;
722  totals->freespace += freespace;
723  }
724 }
char * endptr
Definition: bump.c:93
char * freeptr
Definition: bump.c:92

References Assert, BumpContext::blocks, BumpIsValid, context, dlist_iter::cur, dlist_container, dlist_foreach, BumpBlock::endptr, BumpBlock::freeptr, MemoryContextCounters::freespace, MemoryContextCounters::nblocks, snprintf, and MemoryContextCounters::totalspace.

◆ GenerationAlloc()

void* GenerationAlloc ( MemoryContext  context,
Size  size,
int  flags 
)

Definition at line 527 of file generation.c.

528 {
530  GenerationBlock *block;
531  Size chunk_size;
532  Size required_size;
533 
535 
536 #ifdef MEMORY_CONTEXT_CHECKING
537  /* ensure there's always space for the sentinel byte */
538  chunk_size = MAXALIGN(size + 1);
539 #else
540  chunk_size = MAXALIGN(size);
541 #endif
542 
543  /*
544  * If requested size exceeds maximum for chunks we hand the request off to
545  * GenerationAllocLarge().
546  */
547  if (chunk_size > set->allocChunkLimit)
548  return GenerationAllocLarge(context, size, flags);
549 
550  required_size = chunk_size + Generation_CHUNKHDRSZ;
551 
552  /*
553  * Not an oversized chunk. We try to first make use of the current block,
554  * but if there's not enough space in it, instead of allocating a new
555  * block, we look to see if the empty freeblock has enough space. We
556  * don't try reusing the keeper block. If it's become empty we'll reuse
557  * that again only if the context is reset.
558  *
559  * We only try reusing the freeblock if we've no space for this allocation
560  * on the current block. When a freeblock exists, we'll switch to it once
561  * the first time we can't fit an allocation in the current block. We
562  * avoid ping-ponging between the two as we need to be careful not to
563  * fragment differently sized consecutive allocations between several
564  * blocks. Going between the two could cause fragmentation for FIFO
565  * workloads, which generation is meant to be good at.
566  */
567  block = set->block;
568 
569  if (unlikely(GenerationBlockFreeBytes(block) < required_size))
570  {
571  GenerationBlock *freeblock = set->freeblock;
572 
573  /* freeblock, if set, must be empty */
574  Assert(freeblock == NULL || GenerationBlockIsEmpty(freeblock));
575 
576  /* check if we have a freeblock and if it's big enough */
577  if (freeblock != NULL &&
578  GenerationBlockFreeBytes(freeblock) >= required_size)
579  {
580  /* make the freeblock the current block */
581  set->freeblock = NULL;
582  set->block = freeblock;
583 
585  freeblock,
586  size,
587  chunk_size);
588  }
589  else
590  {
591  /*
592  * No freeblock, or it's not big enough for this allocation. Make
593  * a new block.
594  */
595  return GenerationAllocFromNewBlock(context, size, flags, chunk_size);
596  }
597  }
598 
599  /* The current block has space, so just allocate chunk there. */
600  return GenerationAllocChunkFromBlock(context, block, size, chunk_size);
601 }
static pg_noinline void * GenerationAllocLarge(MemoryContext context, Size size, int flags)
Definition: generation.c:343
static Size GenerationBlockFreeBytes(GenerationBlock *block)
Definition: generation.c:654
#define Generation_CHUNKHDRSZ
Definition: generation.c:47
static pg_noinline void * GenerationAllocFromNewBlock(MemoryContext context, Size size, int flags, Size chunk_size)
Definition: generation.c:461
#define GenerationBlockIsEmpty(b)
Definition: generation.c:116
static void * GenerationAllocChunkFromBlock(MemoryContext context, GenerationBlock *block, Size size, Size chunk_size)
Definition: generation.c:413
#define GenerationIsValid(set)
Definition: generation.c:102
GenerationBlock * freeblock
Definition: generation.c:70
GenerationBlock * block
Definition: generation.c:69
uint32 allocChunkLimit
Definition: generation.c:67

References GenerationContext::allocChunkLimit, Assert, GenerationContext::block, context, GenerationContext::freeblock, Generation_CHUNKHDRSZ, GenerationAllocChunkFromBlock(), GenerationAllocFromNewBlock(), GenerationAllocLarge(), GenerationBlockFreeBytes(), GenerationBlockIsEmpty, GenerationIsValid, MAXALIGN, size, and unlikely.

Referenced by GenerationRealloc().

◆ GenerationDelete()

void GenerationDelete ( MemoryContext  context)

Definition at line 328 of file generation.c.

329 {
330  /* Reset to release all releasable GenerationBlocks */
332  /* And free the context header and keeper block */
333  free(context);
334 }
void GenerationReset(MemoryContext context)
Definition: generation.c:283

References context, free, and GenerationReset().

◆ GenerationFree()

void GenerationFree ( void *  pointer)

Definition at line 689 of file generation.c.

690 {
692  GenerationBlock *block;
693  GenerationContext *set;
694 #if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \
695  || defined(CLOBBER_FREED_MEMORY)
696  Size chunksize;
697 #endif
698 
699  /* Allow access to the chunk header. */
701 
703  {
704  block = ExternalChunkGetBlock(chunk);
705 
706  /*
707  * Try to verify that we have a sane block pointer: the block header
708  * should reference a generation context.
709  */
710  if (!GenerationBlockIsValid(block))
711  elog(ERROR, "could not find block containing chunk %p", chunk);
712 
713 #if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \
714  || defined(CLOBBER_FREED_MEMORY)
715  chunksize = block->endptr - (char *) pointer;
716 #endif
717  }
718  else
719  {
720  block = MemoryChunkGetBlock(chunk);
721 
722  /*
723  * In this path, for speed reasons we just Assert that the referenced
724  * block is good. Future field experience may show that this Assert
725  * had better become a regular runtime test-and-elog check.
726  */
728 
729 #if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \
730  || defined(CLOBBER_FREED_MEMORY)
731  chunksize = MemoryChunkGetValue(chunk);
732 #endif
733  }
734 
735 #ifdef MEMORY_CONTEXT_CHECKING
736  /* Test for someone scribbling on unused space in chunk */
737  Assert(chunk->requested_size < chunksize);
738  if (!sentinel_ok(pointer, chunk->requested_size))
739  elog(WARNING, "detected write past chunk end in %s %p",
740  ((MemoryContext) block->context)->name, chunk);
741 #endif
742 
743 #ifdef CLOBBER_FREED_MEMORY
744  wipe_mem(pointer, chunksize);
745 #endif
746 
747 #ifdef MEMORY_CONTEXT_CHECKING
748  /* Reset requested_size to InvalidAllocSize in freed chunks */
749  chunk->requested_size = InvalidAllocSize;
750 #endif
751 
752  block->nfree += 1;
753 
754  Assert(block->nchunks > 0);
755  Assert(block->nfree <= block->nchunks);
756  Assert(block != block->context->freeblock);
757 
758  /* If there are still allocated chunks in the block, we're done. */
759  if (likely(block->nfree < block->nchunks))
760  return;
761 
762  set = block->context;
763 
764  /*-----------------------
765  * The block this allocation was on has now become completely empty of
766  * chunks. In the general case, we can now return the memory for this
767  * block back to malloc. However, there are cases where we don't want to
768  * do that:
769  *
770  * 1) If it's the keeper block. This block was malloc'd in the same
771  * allocation as the context itself and can't be free'd without
772  * freeing the context.
773  * 2) If it's the current block. We could free this, but doing so would
774  * leave us nothing to set the current block to, so we just mark the
775  * block as empty so new allocations can reuse it again.
776  * 3) If we have no "freeblock" set, then we save a single block for
777  * future allocations to avoid having to malloc a new block again.
778  * This is useful for FIFO workloads as it avoids continual
779  * free/malloc cycles.
780  */
781  if (IsKeeperBlock(set, block) || set->block == block)
782  GenerationBlockMarkEmpty(block); /* case 1 and 2 */
783  else if (set->freeblock == NULL)
784  {
785  /* case 3 */
787  set->freeblock = block;
788  }
789  else
790  GenerationBlockFree(set, block); /* Otherwise, free it */
791 }
#define IsKeeperBlock(set, block)
Definition: generation.c:132
static void GenerationBlockFree(GenerationContext *set, GenerationBlock *block)
Definition: generation.c:664
static void GenerationBlockMarkEmpty(GenerationBlock *block)
Definition: generation.c:630
#define GenerationBlockIsValid(block)
Definition: generation.c:109
#define ExternalChunkGetBlock(chunk)
Definition: generation.c:123
GenerationContext * context
Definition: generation.c:90

References Assert, GenerationContext::block, chunk, GenerationBlock::context, elog, GenerationBlock::endptr, ERROR, ExternalChunkGetBlock, GenerationContext::freeblock, Generation_CHUNKHDRSZ, GenerationBlockFree(), GenerationBlockIsValid, GenerationBlockMarkEmpty(), InvalidAllocSize, IsKeeperBlock, likely, MemoryChunkGetBlock(), MemoryChunkGetValue(), MemoryChunkIsExternal(), GenerationBlock::nchunks, GenerationBlock::nfree, PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and WARNING.

Referenced by GenerationRealloc().

◆ GenerationGetChunkContext()

MemoryContext GenerationGetChunkContext ( void *  pointer)

Definition at line 947 of file generation.c.

948 {
950  GenerationBlock *block;
951 
952  /* Allow access to the chunk header. */
954 
956  block = ExternalChunkGetBlock(chunk);
957  else
959 
960  /* Disallow access to the chunk header. */
962 
964  return &block->context->header;
965 }
MemoryContextData header
Definition: generation.c:61

References Assert, chunk, GenerationBlock::context, ExternalChunkGetBlock, Generation_CHUNKHDRSZ, GenerationBlockIsValid, GenerationContext::header, MemoryChunkGetBlock(), MemoryChunkIsExternal(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ GenerationGetChunkSpace()

Size GenerationGetChunkSpace ( void *  pointer)

Definition at line 973 of file generation.c.

974 {
976  Size chunksize;
977 
978  /* Allow access to the chunk header. */
980 
982  {
984 
986  chunksize = block->endptr - (char *) pointer;
987  }
988  else
989  chunksize = MemoryChunkGetValue(chunk);
990 
991  /* Disallow access to the chunk header. */
993 
994  return Generation_CHUNKHDRSZ + chunksize;
995 }

References Assert, chunk, GenerationBlock::endptr, ExternalChunkGetBlock, Generation_CHUNKHDRSZ, GenerationBlockIsValid, MemoryChunkGetValue(), MemoryChunkIsExternal(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ GenerationIsEmpty()

bool GenerationIsEmpty ( MemoryContext  context)

Definition at line 1002 of file generation.c.

1003 {
1005  dlist_iter iter;
1006 
1007  Assert(GenerationIsValid(set));
1008 
1009  dlist_foreach(iter, &set->blocks)
1010  {
1011  GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
1012 
1013  if (block->nchunks > 0)
1014  return false;
1015  }
1016 
1017  return true;
1018 }
dlist_head blocks
Definition: generation.c:72

References Assert, GenerationContext::blocks, context, dlist_iter::cur, dlist_container, dlist_foreach, GenerationIsValid, and GenerationBlock::nchunks.

◆ GenerationRealloc()

void* GenerationRealloc ( void *  pointer,
Size  size,
int  flags 
)

Definition at line 800 of file generation.c.

801 {
803  GenerationContext *set;
804  GenerationBlock *block;
805  GenerationPointer newPointer;
806  Size oldsize;
807 
808  /* Allow access to the chunk header. */
810 
812  {
813  block = ExternalChunkGetBlock(chunk);
814 
815  /*
816  * Try to verify that we have a sane block pointer: the block header
817  * should reference a generation context.
818  */
819  if (!GenerationBlockIsValid(block))
820  elog(ERROR, "could not find block containing chunk %p", chunk);
821 
822  oldsize = block->endptr - (char *) pointer;
823  }
824  else
825  {
826  block = MemoryChunkGetBlock(chunk);
827 
828  /*
829  * In this path, for speed reasons we just Assert that the referenced
830  * block is good. Future field experience may show that this Assert
831  * had better become a regular runtime test-and-elog check.
832  */
834 
835  oldsize = MemoryChunkGetValue(chunk);
836  }
837 
838  set = block->context;
839 
840 #ifdef MEMORY_CONTEXT_CHECKING
841  /* Test for someone scribbling on unused space in chunk */
842  Assert(chunk->requested_size < oldsize);
843  if (!sentinel_ok(pointer, chunk->requested_size))
844  elog(WARNING, "detected write past chunk end in %s %p",
845  ((MemoryContext) set)->name, chunk);
846 #endif
847 
848  /*
849  * Maybe the allocated area already big enough. (In particular, we always
850  * fall out here if the requested size is a decrease.)
851  *
852  * This memory context does not use power-of-2 chunk sizing and instead
853  * carves the chunks to be as small as possible, so most repalloc() calls
854  * will end up in the palloc/memcpy/pfree branch.
855  *
856  * XXX Perhaps we should annotate this condition with unlikely()?
857  */
858 #ifdef MEMORY_CONTEXT_CHECKING
859  /* With MEMORY_CONTEXT_CHECKING, we need an extra byte for the sentinel */
860  if (oldsize > size)
861 #else
862  if (oldsize >= size)
863 #endif
864  {
865 #ifdef MEMORY_CONTEXT_CHECKING
866  Size oldrequest = chunk->requested_size;
867 
868 #ifdef RANDOMIZE_ALLOCATED_MEMORY
869  /* We can only fill the extra space if we know the prior request */
870  if (size > oldrequest)
871  randomize_mem((char *) pointer + oldrequest,
872  size - oldrequest);
873 #endif
874 
875  chunk->requested_size = size;
876 
877  /*
878  * If this is an increase, mark any newly-available part UNDEFINED.
879  * Otherwise, mark the obsolete part NOACCESS.
880  */
881  if (size > oldrequest)
882  VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
883  size - oldrequest);
884  else
885  VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
886  oldsize - size);
887 
888  /* set mark to catch clobber of "unused" space */
889  set_sentinel(pointer, size);
890 #else /* !MEMORY_CONTEXT_CHECKING */
891 
892  /*
893  * We don't have the information to determine whether we're growing
894  * the old request or shrinking it, so we conservatively mark the
895  * entire new allocation DEFINED.
896  */
897  VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
899 #endif
900 
901  /* Disallow access to the chunk header. */
903 
904  return pointer;
905  }
906 
907  /* allocate new chunk (this also checks size is valid) */
908  newPointer = GenerationAlloc((MemoryContext) set, size, flags);
909 
910  /* leave immediately if request was not completed */
911  if (newPointer == NULL)
912  {
913  /* Disallow access to the chunk header. */
915  return MemoryContextAllocationFailure((MemoryContext) set, size, flags);
916  }
917 
918  /*
919  * GenerationAlloc() may have returned a region that is still NOACCESS.
920  * Change it to UNDEFINED for the moment; memcpy() will then transfer
921  * definedness from the old allocation to the new. If we know the old
922  * allocation, copy just that much. Otherwise, make the entire old chunk
923  * defined to avoid errors as we copy the currently-NOACCESS trailing
924  * bytes.
925  */
926  VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
927 #ifdef MEMORY_CONTEXT_CHECKING
928  oldsize = chunk->requested_size;
929 #else
930  VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
931 #endif
932 
933  /* transfer existing data (certain to fit) */
934  memcpy(newPointer, pointer, oldsize);
935 
936  /* free old chunk */
937  GenerationFree(pointer);
938 
939  return newPointer;
940 }
void * GenerationAlloc(MemoryContext context, Size size, int flags)
Definition: generation.c:527
void GenerationFree(void *pointer)
Definition: generation.c:689
void * GenerationPointer
Definition: generation.c:53

References Assert, chunk, GenerationBlock::context, elog, GenerationBlock::endptr, ERROR, ExternalChunkGetBlock, Generation_CHUNKHDRSZ, GenerationAlloc(), GenerationBlockIsValid, GenerationFree(), MemoryChunkGetBlock(), MemoryChunkGetValue(), MemoryChunkIsExternal(), MemoryContextAllocationFailure(), name, PointerGetMemoryChunk, size, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MAKE_MEM_NOACCESS, VALGRIND_MAKE_MEM_UNDEFINED, and WARNING.

◆ GenerationReset()

void GenerationReset ( MemoryContext  context)

Definition at line 283 of file generation.c.

284 {
286  dlist_mutable_iter miter;
287 
289 
290 #ifdef MEMORY_CONTEXT_CHECKING
291  /* Check for corruption and leaks before freeing */
292  GenerationCheck(context);
293 #endif
294 
295  /*
296  * NULLify the free block pointer. We must do this before calling
297  * GenerationBlockFree as that function never expects to free the
298  * freeblock.
299  */
300  set->freeblock = NULL;
301 
302  dlist_foreach_modify(miter, &set->blocks)
303  {
304  GenerationBlock *block = dlist_container(GenerationBlock, node, miter.cur);
305 
306  if (IsKeeperBlock(set, block))
308  else
309  GenerationBlockFree(set, block);
310  }
311 
312  /* set it so new allocations to make use of the keeper block */
313  set->block = KeeperBlock(set);
314 
315  /* Reset block size allocation sequence, too */
316  set->nextBlockSize = set->initBlockSize;
317 
318  /* Ensure there is only 1 item in the dlist */
319  Assert(!dlist_is_empty(&set->blocks));
321 }
#define KeeperBlock(set)
Definition: generation.c:127
uint32 nextBlockSize
Definition: generation.c:66
uint32 initBlockSize
Definition: generation.c:64

References Assert, GenerationContext::block, GenerationContext::blocks, context, dlist_mutable_iter::cur, dlist_container, dlist_foreach_modify, dlist_has_next(), dlist_head_node(), dlist_is_empty(), GenerationContext::freeblock, GenerationBlockFree(), GenerationBlockMarkEmpty(), GenerationIsValid, GenerationContext::initBlockSize, IsKeeperBlock, KeeperBlock, and GenerationContext::nextBlockSize.

Referenced by GenerationDelete().

◆ GenerationStats()

void GenerationStats ( MemoryContext  context,
MemoryStatsPrintFunc  printfunc,
void *  passthru,
MemoryContextCounters totals,
bool  print_to_stderr 
)

Definition at line 1033 of file generation.c.

1036 {
1038  Size nblocks = 0;
1039  Size nchunks = 0;
1040  Size nfreechunks = 0;
1041  Size totalspace;
1042  Size freespace = 0;
1043  dlist_iter iter;
1044 
1045  Assert(GenerationIsValid(set));
1046 
1047  /* Include context header in totalspace */
1048  totalspace = MAXALIGN(sizeof(GenerationContext));
1049 
1050  dlist_foreach(iter, &set->blocks)
1051  {
1052  GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
1053 
1054  nblocks++;
1055  nchunks += block->nchunks;
1056  nfreechunks += block->nfree;
1057  totalspace += block->blksize;
1058  freespace += (block->endptr - block->freeptr);
1059  }
1060 
1061  if (printfunc)
1062  {
1063  char stats_string[200];
1064 
1065  snprintf(stats_string, sizeof(stats_string),
1066  "%zu total in %zu blocks (%zu chunks); %zu free (%zu chunks); %zu used",
1067  totalspace, nblocks, nchunks, freespace,
1068  nfreechunks, totalspace - freespace);
1069  printfunc(context, passthru, stats_string, print_to_stderr);
1070  }
1071 
1072  if (totals)
1073  {
1074  totals->nblocks += nblocks;
1075  totals->freechunks += nfreechunks;
1076  totals->totalspace += totalspace;
1077  totals->freespace += freespace;
1078  }
1079 }
char * freeptr
Definition: generation.c:94

References Assert, GenerationBlock::blksize, GenerationContext::blocks, context, dlist_iter::cur, dlist_container, dlist_foreach, GenerationBlock::endptr, MemoryContextCounters::freechunks, GenerationBlock::freeptr, MemoryContextCounters::freespace, GenerationIsValid, MAXALIGN, MemoryContextCounters::nblocks, GenerationBlock::nchunks, GenerationBlock::nfree, snprintf, and MemoryContextCounters::totalspace.

◆ MemoryContextAllocationFailure()

void* MemoryContextAllocationFailure ( MemoryContext  context,
Size  size,
int  flags 
)

Definition at line 1147 of file mcxt.c.

1148 {
1149  if ((flags & MCXT_ALLOC_NO_OOM) == 0)
1150  {
1151  if (TopMemoryContext)
1153  ereport(ERROR,
1154  (errcode(ERRCODE_OUT_OF_MEMORY),
1155  errmsg("out of memory"),
1156  errdetail("Failed on request of size %zu in memory context \"%s\".",
1157  size, context->name)));
1158  }
1159  return NULL;
1160 }
int errdetail(const char *fmt,...)
Definition: elog.c:1203
int errcode(int sqlerrcode)
Definition: elog.c:853
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define ereport(elevel,...)
Definition: elog.h:149
#define MCXT_ALLOC_NO_OOM
Definition: fe_memutils.h:29
MemoryContext TopMemoryContext
Definition: mcxt.c:149
void MemoryContextStats(MemoryContext context)
Definition: mcxt.c:814

References context, ereport, errcode(), errdetail(), errmsg(), ERROR, MCXT_ALLOC_NO_OOM, MemoryContextStats(), size, and TopMemoryContext.

Referenced by AllocSetAllocFromNewBlock(), AllocSetAllocLarge(), AllocSetRealloc(), BumpAllocFromNewBlock(), GenerationAllocFromNewBlock(), GenerationAllocLarge(), GenerationRealloc(), and SlabAllocFromNewBlock().

◆ MemoryContextCheckSize()

static void MemoryContextCheckSize ( MemoryContext  context,
Size  size,
int  flags 
)
inlinestatic

Definition at line 167 of file memutils_internal.h.

168 {
170  {
171  if (!(flags & MCXT_ALLOC_HUGE) || !AllocHugeSizeIsValid(size))
173  }
174 }
#define MCXT_ALLOC_HUGE
Definition: fe_memutils.h:28
#define AllocHugeSizeIsValid(size)
Definition: memutils.h:49
#define AllocSizeIsValid(size)
Definition: memutils.h:42
void MemoryContextSizeFailure(MemoryContext context, Size size, int flags) pg_attribute_noreturn()
Definition: mcxt.c:1168

References AllocHugeSizeIsValid, AllocSizeIsValid, context, MCXT_ALLOC_HUGE, MemoryContextSizeFailure(), size, and unlikely.

Referenced by AllocSetAllocLarge(), AllocSetRealloc(), BumpAllocLarge(), and GenerationAllocLarge().

◆ MemoryContextCreate()

void MemoryContextCreate ( MemoryContext  node,
NodeTag  tag,
MemoryContextMethodID  method_id,
MemoryContext  parent,
const char *  name 
)

Definition at line 1100 of file mcxt.c.

1105 {
1106  /* Creating new memory contexts is not allowed in a critical section */
1107  Assert(CritSectionCount == 0);
1108 
1109  /* Initialize all standard fields of memory context header */
1110  node->type = tag;
1111  node->isReset = true;
1112  node->methods = &mcxt_methods[method_id];
1113  node->parent = parent;
1114  node->firstchild = NULL;
1115  node->mem_allocated = 0;
1116  node->prevchild = NULL;
1117  node->name = name;
1118  node->ident = NULL;
1119  node->reset_cbs = NULL;
1120 
1121  /* OK to link node into context tree */
1122  if (parent)
1123  {
1124  node->nextchild = parent->firstchild;
1125  if (parent->firstchild != NULL)
1126  parent->firstchild->prevchild = node;
1127  parent->firstchild = node;
1128  /* inherit allowInCritSection flag from parent */
1129  node->allowInCritSection = parent->allowInCritSection;
1130  }
1131  else
1132  {
1133  node->nextchild = NULL;
1134  node->allowInCritSection = false;
1135  }
1136 
1137  VALGRIND_CREATE_MEMPOOL(node, 0, false);
1138 }
volatile uint32 CritSectionCount
Definition: globals.c:44
static const MemoryContextMethods mcxt_methods[]
Definition: mcxt.c:46
#define VALGRIND_CREATE_MEMPOOL(context, redzones, zeroed)
Definition: memdebug.h:24
MemoryContext prevchild
Definition: memnodes.h:129
MemoryContext firstchild
Definition: memnodes.h:128
bool allowInCritSection
Definition: memnodes.h:124
const char * ident
Definition: memnodes.h:132
MemoryContext parent
Definition: memnodes.h:127
MemoryContextCallback * reset_cbs
Definition: memnodes.h:133
const MemoryContextMethods * methods
Definition: memnodes.h:126

References MemoryContextData::allowInCritSection, Assert, CritSectionCount, MemoryContextData::firstchild, MemoryContextData::ident, MemoryContextData::isReset, mcxt_methods, MemoryContextData::mem_allocated, MemoryContextData::methods, name, MemoryContextData::name, MemoryContextData::nextchild, MemoryContextData::parent, MemoryContextData::prevchild, MemoryContextData::reset_cbs, and VALGRIND_CREATE_MEMPOOL.

Referenced by AllocSetContextCreateInternal(), BumpContextCreate(), GenerationContextCreate(), and SlabContextCreate().

◆ MemoryContextSizeFailure()

void MemoryContextSizeFailure ( MemoryContext  context,
Size  size,
int  flags 
)

Definition at line 1168 of file mcxt.c.

1169 {
1170  elog(ERROR, "invalid memory alloc request size %zu", size);
1171 }

References elog, ERROR, and size.

Referenced by MemoryContextCheckSize().

◆ SlabAlloc()

void* SlabAlloc ( MemoryContext  context,
Size  size,
int  flags 
)

Definition at line 630 of file slab.c.

631 {
632  SlabContext *slab = (SlabContext *) context;
633  SlabBlock *block;
635 
636  Assert(SlabIsValid(slab));
637 
638  /* sanity check that this is pointing to a valid blocklist */
639  Assert(slab->curBlocklistIndex >= 0);
641 
642  /*
643  * Make sure we only allow correct request size. This doubles as the
644  * MemoryContextCheckSize check.
645  */
646  if (unlikely(size != slab->chunkSize))
647  SlabAllocInvalidSize(context, size);
648 
649  if (unlikely(slab->curBlocklistIndex == 0))
650  {
651  /*
652  * Handle the case when there are no partially filled blocks
653  * available. This happens either when the last allocation took the
654  * last chunk in the block, or when SlabFree() free'd the final block.
655  */
656  return SlabAllocFromNewBlock(context, size, flags);
657  }
658  else
659  {
660  dlist_head *blocklist = &slab->blocklist[slab->curBlocklistIndex];
661  int new_blocklist_idx;
662 
663  Assert(!dlist_is_empty(blocklist));
664 
665  /* grab the block from the blocklist */
666  block = dlist_head_element(SlabBlock, node, blocklist);
667 
668  /* make sure we actually got a valid block, with matching nfree */
669  Assert(block != NULL);
670  Assert(slab->curBlocklistIndex == SlabBlocklistIndex(slab, block->nfree));
671  Assert(block->nfree > 0);
672 
673  /* fetch the next chunk from this block */
674  chunk = SlabGetNextFreeChunk(slab, block);
675 
676  /* get the new blocklist index based on the new free chunk count */
677  new_blocklist_idx = SlabBlocklistIndex(slab, block->nfree);
678 
679  /*
680  * Handle the case where the blocklist index changes. This also deals
681  * with blocks becoming full as only full blocks go at index 0.
682  */
683  if (unlikely(slab->curBlocklistIndex != new_blocklist_idx))
684  {
685  dlist_delete_from(blocklist, &block->node);
686  dlist_push_head(&slab->blocklist[new_blocklist_idx], &block->node);
687 
688  if (dlist_is_empty(blocklist))
690  }
691  }
692 
693  return SlabAllocSetupNewChunk(context, block, chunk, size);
694 }
static void dlist_delete_from(dlist_head *head, dlist_node *node)
Definition: ilist.h:429
#define dlist_head_element(type, membername, lhead)
Definition: ilist.h:603
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:347
static pg_noinline void * SlabAllocFromNewBlock(MemoryContext context, Size size, int flags)
Definition: slab.c:539
#define SlabIsValid(set)
Definition: slab.c:196
static int32 SlabBlocklistIndex(SlabContext *slab, int nfree)
Definition: slab.c:211
static void * SlabAllocSetupNewChunk(MemoryContext context, SlabBlock *block, MemoryChunk *chunk, Size size)
Definition: slab.c:498
static int32 SlabFindNextBlockListIndex(SlabContext *slab)
Definition: slab.c:251
static MemoryChunk * SlabGetNextFreeChunk(SlabContext *slab, SlabBlock *block)
Definition: slab.c:271
int32 nfree
Definition: slab.c:149
dlist_node node
Definition: slab.c:153
dlist_head blocklist[SLAB_BLOCKLIST_COUNT]
Definition: slab.c:129
int32 chunksPerBlock
Definition: slab.c:110
int32 curBlocklistIndex
Definition: slab.c:111
uint32 chunkSize
Definition: slab.c:107

References Assert, SlabContext::blocklist, chunk, SlabContext::chunkSize, SlabContext::chunksPerBlock, context, SlabContext::curBlocklistIndex, dlist_delete_from(), dlist_head_element, dlist_is_empty(), dlist_push_head(), SlabBlock::nfree, SlabBlock::node, size, SlabAllocFromNewBlock(), SlabAllocSetupNewChunk(), SlabBlocklistIndex(), SlabFindNextBlockListIndex(), SlabGetNextFreeChunk(), SlabIsValid, and unlikely.

◆ SlabDelete()

void SlabDelete ( MemoryContext  context)

Definition at line 485 of file slab.c.

486 {
487  /* Reset to release all the SlabBlocks */
489  /* And free the context header */
490  free(context);
491 }
void SlabReset(MemoryContext context)
Definition: slab.c:431

References context, free, and SlabReset().

◆ SlabFree()

void SlabFree ( void *  pointer)

Definition at line 701 of file slab.c.

702 {
704  SlabBlock *block;
705  SlabContext *slab;
706  int curBlocklistIdx;
707  int newBlocklistIdx;
708 
709  /* Allow access to the chunk header. */
711 
712  block = MemoryChunkGetBlock(chunk);
713 
714  /*
715  * For speed reasons we just Assert that the referenced block is good.
716  * Future field experience may show that this Assert had better become a
717  * regular runtime test-and-elog check.
718  */
719  Assert(SlabBlockIsValid(block));
720  slab = block->slab;
721 
722 #ifdef MEMORY_CONTEXT_CHECKING
723  /* Test for someone scribbling on unused space in chunk */
724  Assert(slab->chunkSize < (slab->fullChunkSize - Slab_CHUNKHDRSZ));
725  if (!sentinel_ok(pointer, slab->chunkSize))
726  elog(WARNING, "detected write past chunk end in %s %p",
727  slab->header.name, chunk);
728 #endif
729 
730  /* push this chunk onto the head of the block's free list */
731  *(MemoryChunk **) pointer = block->freehead;
732  block->freehead = chunk;
733 
734  block->nfree++;
735 
736  Assert(block->nfree > 0);
737  Assert(block->nfree <= slab->chunksPerBlock);
738 
739 #ifdef CLOBBER_FREED_MEMORY
740  /* don't wipe the free list MemoryChunk pointer stored in the chunk */
741  wipe_mem((char *) pointer + sizeof(MemoryChunk *),
742  slab->chunkSize - sizeof(MemoryChunk *));
743 #endif
744 
745  curBlocklistIdx = SlabBlocklistIndex(slab, block->nfree - 1);
746  newBlocklistIdx = SlabBlocklistIndex(slab, block->nfree);
747 
748  /*
749  * Check if the block needs to be moved to another element on the
750  * blocklist based on it now having 1 more free chunk.
751  */
752  if (unlikely(curBlocklistIdx != newBlocklistIdx))
753  {
754  /* do the move */
755  dlist_delete_from(&slab->blocklist[curBlocklistIdx], &block->node);
756  dlist_push_head(&slab->blocklist[newBlocklistIdx], &block->node);
757 
758  /*
759  * The blocklist[curBlocklistIdx] may now be empty or we may now be
760  * able to use a lower-element blocklist. We'll need to redetermine
761  * what the slab->curBlocklistIndex is if the current blocklist was
762  * changed or if a lower element one was changed. We must ensure we
763  * use the list with the fullest block(s).
764  */
765  if (slab->curBlocklistIndex >= curBlocklistIdx)
766  {
768 
769  /*
770  * We know there must be a block with at least 1 unused chunk as
771  * we just pfree'd one. Ensure curBlocklistIndex reflects this.
772  */
773  Assert(slab->curBlocklistIndex > 0);
774  }
775  }
776 
777  /* Handle when a block becomes completely empty */
778  if (unlikely(block->nfree == slab->chunksPerBlock))
779  {
780  /* remove the block */
781  dlist_delete_from(&slab->blocklist[newBlocklistIdx], &block->node);
782 
783  /*
784  * To avoid thrashing malloc/free, we keep a list of empty blocks that
785  * we can reuse again instead of having to malloc a new one.
786  */
788  dclist_push_head(&slab->emptyblocks, &block->node);
789  else
790  {
791  /*
792  * When we have enough empty blocks stored already, we actually
793  * free the block.
794  */
795 #ifdef CLOBBER_FREED_MEMORY
796  wipe_mem(block, slab->blockSize);
797 #endif
798  free(block);
799  slab->header.mem_allocated -= slab->blockSize;
800  }
801 
802  /*
803  * Check if we need to reset the blocklist index. This is required
804  * when the blocklist this block is on has become completely empty.
805  */
806  if (slab->curBlocklistIndex == newBlocklistIdx &&
807  dlist_is_empty(&slab->blocklist[newBlocklistIdx]))
809  }
810 }
static uint32 dclist_count(const dclist_head *head)
Definition: ilist.h:932
static void dclist_push_head(dclist_head *head, dlist_node *node)
Definition: ilist.h:693
elog(ERROR, "unexpected alloc chunk size %zu (expected %u)", size, slab->chunkSize)
#define Slab_CHUNKHDRSZ
Definition: slab.c:157
#define SlabBlockIsValid(block)
Definition: slab.c:202
#define SLAB_MAXIMUM_EMPTY_BLOCKS
Definition: slab.c:98
MemoryChunk * freehead
Definition: slab.c:151
SlabContext * slab
Definition: slab.c:148
uint32 fullChunkSize
Definition: slab.c:108
MemoryContextData header
Definition: slab.c:105
uint32 blockSize
Definition: slab.c:109
dclist_head emptyblocks
Definition: slab.c:120

References Assert, SlabContext::blocklist, SlabContext::blockSize, chunk, SlabContext::chunkSize, SlabContext::chunksPerBlock, SlabContext::curBlocklistIndex, dclist_count(), dclist_push_head(), dlist_delete_from(), dlist_is_empty(), dlist_push_head(), elog(), SlabContext::emptyblocks, free, SlabBlock::freehead, SlabContext::fullChunkSize, SlabContext::header, MemoryContextData::mem_allocated, MemoryChunkGetBlock(), MemoryContextData::name, SlabBlock::nfree, SlabBlock::node, PointerGetMemoryChunk, SlabBlock::slab, Slab_CHUNKHDRSZ, SLAB_MAXIMUM_EMPTY_BLOCKS, SlabBlockIsValid, SlabBlocklistIndex(), SlabFindNextBlockListIndex(), unlikely, VALGRIND_MAKE_MEM_DEFINED, and WARNING.

◆ SlabGetChunkContext()

MemoryContext SlabGetChunkContext ( void *  pointer)

Definition at line 863 of file slab.c.

864 {
866  SlabBlock *block;
867 
868  /* Allow access to the chunk header. */
870 
871  block = MemoryChunkGetBlock(chunk);
872 
873  /* Disallow access to the chunk header. */
875 
876  Assert(SlabBlockIsValid(block));
877 
878  return &block->slab->header;
879 }

References Assert, chunk, SlabContext::header, MemoryChunkGetBlock(), PointerGetMemoryChunk, SlabBlock::slab, Slab_CHUNKHDRSZ, SlabBlockIsValid, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ SlabGetChunkSpace()

Size SlabGetChunkSpace ( void *  pointer)

Definition at line 887 of file slab.c.

888 {
890  SlabBlock *block;
891  SlabContext *slab;
892 
893  /* Allow access to the chunk header. */
895 
896  block = MemoryChunkGetBlock(chunk);
897 
898  /* Disallow access to the chunk header. */
900 
901  Assert(SlabBlockIsValid(block));
902  slab = block->slab;
903 
904  return slab->fullChunkSize;
905 }

References Assert, chunk, SlabContext::fullChunkSize, MemoryChunkGetBlock(), PointerGetMemoryChunk, SlabBlock::slab, Slab_CHUNKHDRSZ, SlabBlockIsValid, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ SlabIsEmpty()

bool SlabIsEmpty ( MemoryContext  context)

Definition at line 912 of file slab.c.

913 {
915 
916  return (context->mem_allocated == 0);
917 }

References Assert, context, and SlabIsValid.

◆ SlabRealloc()

void* SlabRealloc ( void *  pointer,
Size  size,
int  flags 
)

Definition at line 826 of file slab.c.

827 {
829  SlabBlock *block;
830  SlabContext *slab;
831 
832  /* Allow access to the chunk header. */
834 
835  block = MemoryChunkGetBlock(chunk);
836 
837  /* Disallow access to the chunk header. */
839 
840  /*
841  * Try to verify that we have a sane block pointer: the block header
842  * should reference a slab context. (We use a test-and-elog, not just
843  * Assert, because it seems highly likely that we're here in error in the
844  * first place.)
845  */
846  if (!SlabBlockIsValid(block))
847  elog(ERROR, "could not find block containing chunk %p", chunk);
848  slab = block->slab;
849 
850  /* can't do actual realloc with slab, but let's try to be gentle */
851  if (size == slab->chunkSize)
852  return pointer;
853 
854  elog(ERROR, "slab allocator does not support realloc()");
855  return NULL; /* keep compiler quiet */
856 }

References chunk, SlabContext::chunkSize, elog(), ERROR, MemoryChunkGetBlock(), PointerGetMemoryChunk, size, SlabBlock::slab, Slab_CHUNKHDRSZ, SlabBlockIsValid, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ SlabReset()

void SlabReset ( MemoryContext  context)

Definition at line 431 of file slab.c.

432 {
433  SlabContext *slab = (SlabContext *) context;
434  dlist_mutable_iter miter;
435  int i;
436 
437  Assert(SlabIsValid(slab));
438 
439 #ifdef MEMORY_CONTEXT_CHECKING
440  /* Check for corruption and leaks before freeing */
441  SlabCheck(context);
442 #endif
443 
444  /* release any retained empty blocks */
445  dclist_foreach_modify(miter, &slab->emptyblocks)
446  {
447  SlabBlock *block = dlist_container(SlabBlock, node, miter.cur);
448 
449  dclist_delete_from(&slab->emptyblocks, miter.cur);
450 
451 #ifdef CLOBBER_FREED_MEMORY
452  wipe_mem(block, slab->blockSize);
453 #endif
454  free(block);
455  context->mem_allocated -= slab->blockSize;
456  }
457 
458  /* walk over blocklist and free the blocks */
459  for (i = 0; i < SLAB_BLOCKLIST_COUNT; i++)
460  {
461  dlist_foreach_modify(miter, &slab->blocklist[i])
462  {
463  SlabBlock *block = dlist_container(SlabBlock, node, miter.cur);
464 
465  dlist_delete(miter.cur);
466 
467 #ifdef CLOBBER_FREED_MEMORY
468  wipe_mem(block, slab->blockSize);
469 #endif
470  free(block);
471  context->mem_allocated -= slab->blockSize;
472  }
473  }
474 
475  slab->curBlocklistIndex = 0;
476 
477  Assert(context->mem_allocated == 0);
478 }
static void dlist_delete(dlist_node *node)
Definition: ilist.h:405
static void dclist_delete_from(dclist_head *head, dlist_node *node)
Definition: ilist.h:763
#define dclist_foreach_modify(iter, lhead)
Definition: ilist.h:973
int i
Definition: isn.c:72
#define SLAB_BLOCKLIST_COUNT
Definition: slab.c:95

References Assert, SlabContext::blocklist, SlabContext::blockSize, context, dlist_mutable_iter::cur, SlabContext::curBlocklistIndex, dclist_delete_from(), dclist_foreach_modify, dlist_container, dlist_delete(), dlist_foreach_modify, SlabContext::emptyblocks, free, i, SLAB_BLOCKLIST_COUNT, and SlabIsValid.

Referenced by SlabDelete().

◆ SlabStats()

void SlabStats ( MemoryContext  context,
MemoryStatsPrintFunc  printfunc,
void *  passthru,
MemoryContextCounters totals,
bool  print_to_stderr 
)

Definition at line 929 of file slab.c.

933 {
934  SlabContext *slab = (SlabContext *) context;
935  Size nblocks = 0;
936  Size freechunks = 0;
937  Size totalspace;
938  Size freespace = 0;
939  int i;
940 
941  Assert(SlabIsValid(slab));
942 
943  /* Include context header in totalspace */
944  totalspace = Slab_CONTEXT_HDRSZ(slab->chunksPerBlock);
945 
946  /* Add the space consumed by blocks in the emptyblocks list */
947  totalspace += dclist_count(&slab->emptyblocks) * slab->blockSize;
948 
949  for (i = 0; i < SLAB_BLOCKLIST_COUNT; i++)
950  {
951  dlist_iter iter;
952 
953  dlist_foreach(iter, &slab->blocklist[i])
954  {
955  SlabBlock *block = dlist_container(SlabBlock, node, iter.cur);
956 
957  nblocks++;
958  totalspace += slab->blockSize;
959  freespace += slab->fullChunkSize * block->nfree;
960  freechunks += block->nfree;
961  }
962  }
963 
964  if (printfunc)
965  {
966  char stats_string[200];
967 
968  /* XXX should we include free chunks on empty blocks? */
969  snprintf(stats_string, sizeof(stats_string),
970  "%zu total in %zu blocks; %u empty blocks; %zu free (%zu chunks); %zu used",
971  totalspace, nblocks, dclist_count(&slab->emptyblocks),
972  freespace, freechunks, totalspace - freespace);
973  printfunc(context, passthru, stats_string, print_to_stderr);
974  }
975 
976  if (totals)
977  {
978  totals->nblocks += nblocks;
979  totals->freechunks += freechunks;
980  totals->totalspace += totalspace;
981  totals->freespace += freespace;
982  }
983 }
#define Slab_CONTEXT_HDRSZ(chunksPerBlock)
Definition: slab.c:88

References Assert, SlabContext::blocklist, SlabContext::blockSize, SlabContext::chunksPerBlock, context, dlist_iter::cur, dclist_count(), dlist_container, dlist_foreach, SlabContext::emptyblocks, MemoryContextCounters::freechunks, MemoryContextCounters::freespace, SlabContext::fullChunkSize, i, MemoryContextCounters::nblocks, SlabBlock::nfree, SLAB_BLOCKLIST_COUNT, Slab_CONTEXT_HDRSZ, SlabIsValid, snprintf, and MemoryContextCounters::totalspace.