PostgreSQL Source Code git master
Loading...
Searching...
No Matches
memutils_internal.h File Reference
#include "utils/memutils.h"
Include dependency graph for memutils_internal.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Macros

#define PallocAlignedExtraBytes(alignto)    ((alignto) + (sizeof(MemoryChunk) - MAXIMUM_ALIGNOF))
 
#define MEMORY_CONTEXT_METHODID_BITS   4
 
#define MEMORY_CONTEXT_METHODID_MASK    ((((uint64) 1) << MEMORY_CONTEXT_METHODID_BITS) - 1)
 

Typedefs

typedef enum MemoryContextMethodID MemoryContextMethodID
 

Enumerations

enum  MemoryContextMethodID {
  MCTX_0_RESERVED_UNUSEDMEM_ID , MCTX_1_RESERVED_GLIBC_ID , MCTX_2_RESERVED_GLIBC_ID , MCTX_ASET_ID ,
  MCTX_GENERATION_ID , MCTX_SLAB_ID , MCTX_ALIGNED_REDIRECT_ID , MCTX_BUMP_ID ,
  MCTX_8_UNUSED_ID , MCTX_9_UNUSED_ID , MCTX_10_UNUSED_ID , MCTX_11_UNUSED_ID ,
  MCTX_12_UNUSED_ID , MCTX_13_UNUSED_ID , MCTX_14_UNUSED_ID , MCTX_15_RESERVED_WIPEDMEM_ID
}
 

Functions

voidAllocSetAlloc (MemoryContext context, Size size, int flags)
 
void AllocSetFree (void *pointer)
 
voidAllocSetRealloc (void *pointer, Size size, int flags)
 
void AllocSetReset (MemoryContext context)
 
void AllocSetDelete (MemoryContext context)
 
MemoryContext AllocSetGetChunkContext (void *pointer)
 
Size AllocSetGetChunkSpace (void *pointer)
 
bool AllocSetIsEmpty (MemoryContext context)
 
void AllocSetStats (MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
 
voidGenerationAlloc (MemoryContext context, Size size, int flags)
 
void GenerationFree (void *pointer)
 
voidGenerationRealloc (void *pointer, Size size, int flags)
 
void GenerationReset (MemoryContext context)
 
void GenerationDelete (MemoryContext context)
 
MemoryContext GenerationGetChunkContext (void *pointer)
 
Size GenerationGetChunkSpace (void *pointer)
 
bool GenerationIsEmpty (MemoryContext context)
 
void GenerationStats (MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
 
voidSlabAlloc (MemoryContext context, Size size, int flags)
 
void SlabFree (void *pointer)
 
voidSlabRealloc (void *pointer, Size size, int flags)
 
void SlabReset (MemoryContext context)
 
void SlabDelete (MemoryContext context)
 
MemoryContext SlabGetChunkContext (void *pointer)
 
Size SlabGetChunkSpace (void *pointer)
 
bool SlabIsEmpty (MemoryContext context)
 
void SlabStats (MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
 
void AlignedAllocFree (void *pointer)
 
voidAlignedAllocRealloc (void *pointer, Size size, int flags)
 
MemoryContext AlignedAllocGetChunkContext (void *pointer)
 
Size AlignedAllocGetChunkSpace (void *pointer)
 
voidBumpAlloc (MemoryContext context, Size size, int flags)
 
void BumpFree (void *pointer)
 
voidBumpRealloc (void *pointer, Size size, int flags)
 
void BumpReset (MemoryContext context)
 
void BumpDelete (MemoryContext context)
 
MemoryContext BumpGetChunkContext (void *pointer)
 
Size BumpGetChunkSpace (void *pointer)
 
bool BumpIsEmpty (MemoryContext context)
 
void BumpStats (MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
 
void MemoryContextCreate (MemoryContext node, NodeTag tag, MemoryContextMethodID method_id, MemoryContext parent, const char *name)
 
voidMemoryContextAllocationFailure (MemoryContext context, Size size, int flags)
 
pg_noreturn void MemoryContextSizeFailure (MemoryContext context, Size size, int flags)
 
static void MemoryContextCheckSize (MemoryContext context, Size size, int flags)
 

Macro Definition Documentation

◆ MEMORY_CONTEXT_METHODID_BITS

#define MEMORY_CONTEXT_METHODID_BITS   4

Definition at line 145 of file memutils_internal.h.

◆ MEMORY_CONTEXT_METHODID_MASK

#define MEMORY_CONTEXT_METHODID_MASK    ((((uint64) 1) << MEMORY_CONTEXT_METHODID_BITS) - 1)

Definition at line 146 of file memutils_internal.h.

167{
168 if (unlikely(!AllocSizeIsValid(size)))
169 {
170 if (!(flags & MCXT_ALLOC_HUGE) || !AllocHugeSizeIsValid(size))
171 MemoryContextSizeFailure(context, size, flags);
172 }
173}
174
175#endif /* MEMUTILS_INTERNAL_H */
#define unlikely(x)
Definition c.h:424
#define MCXT_ALLOC_HUGE
Definition fe_memutils.h:28
#define AllocHugeSizeIsValid(size)
Definition memutils.h:49
#define AllocSizeIsValid(size)
Definition memutils.h:42
pg_noreturn void MemoryContextSizeFailure(MemoryContext context, Size size, int flags)
Definition mcxt.c:1219

◆ PallocAlignedExtraBytes

#define PallocAlignedExtraBytes (   alignto)     ((alignto) + (sizeof(MemoryChunk) - MAXIMUM_ALIGNOF))

Definition at line 104 of file memutils_internal.h.

Typedef Documentation

◆ MemoryContextMethodID

Enumeration Type Documentation

◆ MemoryContextMethodID

Enumerator
MCTX_0_RESERVED_UNUSEDMEM_ID 
MCTX_1_RESERVED_GLIBC_ID 
MCTX_2_RESERVED_GLIBC_ID 
MCTX_ASET_ID 
MCTX_GENERATION_ID 
MCTX_SLAB_ID 
MCTX_ALIGNED_REDIRECT_ID 
MCTX_BUMP_ID 
MCTX_8_UNUSED_ID 
MCTX_9_UNUSED_ID 
MCTX_10_UNUSED_ID 
MCTX_11_UNUSED_ID 
MCTX_12_UNUSED_ID 
MCTX_13_UNUSED_ID 
MCTX_14_UNUSED_ID 
MCTX_15_RESERVED_WIPEDMEM_ID 

Definition at line 121 of file memutils_internal.h.

122{
123 MCTX_0_RESERVED_UNUSEDMEM_ID, /* 0000 occurs in never-used memory */
124 MCTX_1_RESERVED_GLIBC_ID, /* glibc malloc'd chunks usually match 0001 */
125 MCTX_2_RESERVED_GLIBC_ID, /* glibc malloc'd chunks > 128kB match 0010 */
138 MCTX_15_RESERVED_WIPEDMEM_ID /* 1111 occurs in wipe_mem'd memory */
MemoryContextMethodID
@ MCTX_15_RESERVED_WIPEDMEM_ID
@ MCTX_GENERATION_ID
@ MCTX_14_UNUSED_ID
@ MCTX_12_UNUSED_ID
@ MCTX_10_UNUSED_ID
@ MCTX_BUMP_ID
@ MCTX_11_UNUSED_ID
@ MCTX_8_UNUSED_ID
@ MCTX_1_RESERVED_GLIBC_ID
@ MCTX_SLAB_ID
@ MCTX_9_UNUSED_ID
@ MCTX_0_RESERVED_UNUSEDMEM_ID
@ MCTX_ASET_ID
@ MCTX_2_RESERVED_GLIBC_ID
@ MCTX_ALIGNED_REDIRECT_ID
@ MCTX_13_UNUSED_ID

Function Documentation

◆ AlignedAllocFree()

void AlignedAllocFree ( void pointer)
extern

Definition at line 29 of file alignedalloc.c.

30{
32 void *unaligned;
33
35
37
38 /* obtain the original (unaligned) allocated pointer */
40
41#ifdef MEMORY_CONTEXT_CHECKING
42 /* Test for someone scribbling on unused space in chunk */
43 if (!sentinel_ok(pointer, chunk->requested_size))
44 elog(WARNING, "detected write past chunk end in %s %p",
46#endif
47
48 /*
49 * Create a dummy vchunk covering the start of the unaligned chunk, but
50 * not overlapping the aligned chunk. This will be freed while pfree'ing
51 * the unaligned chunk, keeping Valgrind happy. Then when we return to
52 * the outer pfree, that will clean up the vchunk for the aligned chunk.
53 */
55 (char *) pointer - (char *) unaligned);
56
57 /* Recursively pfree the unaligned chunk */
59}
#define Assert(condition)
Definition c.h:906
#define WARNING
Definition elog.h:36
#define elog(elevel,...)
Definition elog.h:226
void pfree(void *pointer)
Definition mcxt.c:1616
MemoryContext GetMemoryChunkContext(void *pointer)
Definition mcxt.c:756
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition memdebug.h:26
#define VALGRIND_MEMPOOL_ALLOC(context, addr, size)
Definition memdebug.h:29
static bool MemoryChunkIsExternal(MemoryChunk *chunk)
static void * MemoryChunkGetBlock(MemoryChunk *chunk)
#define PointerGetMemoryChunk(p)
static int fb(int x)
const char * name

References Assert, elog, fb(), GetMemoryChunkContext(), MemoryChunkGetBlock(), MemoryChunkIsExternal(), name, pfree(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MEMPOOL_ALLOC, and WARNING.

◆ AlignedAllocGetChunkContext()

◆ AlignedAllocGetChunkSpace()

Size AlignedAllocGetChunkSpace ( void pointer)
extern

Definition at line 176 of file alignedalloc.c.

177{
179 void *unaligned;
180 Size space;
181
183
186
188
189 return space;
190}
size_t Size
Definition c.h:652
Size GetMemoryChunkSpace(void *pointer)
Definition mcxt.c:770

References fb(), GetMemoryChunkSpace(), MemoryChunkGetBlock(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ AlignedAllocRealloc()

void * AlignedAllocRealloc ( void pointer,
Size  size,
int  flags 
)
extern

Definition at line 70 of file alignedalloc.c.

71{
74 void *unaligned;
75 MemoryContext ctx;
77 void *newptr;
78
80
83
84 /* sanity check this is a power of 2 value */
85 Assert((alignto & (alignto - 1)) == 0);
86
87 /*
88 * Determine the size of the original allocation. We can't determine this
89 * exactly as GetMemoryChunkSpace() returns the total space used for the
90 * allocation, which for contexts like aset includes rounding up to the
91 * next power of 2. However, this value is just used to memcpy() the old
92 * data into the new allocation, so we only need to concern ourselves with
93 * not reading beyond the end of the original allocation's memory. The
94 * drawback here is that we may copy more bytes than we need to, which
95 * only amounts to wasted effort. We can safely subtract the extra bytes
96 * that we requested to allow us to align the pointer. We must also
97 * subtract the space for the unaligned pointer's MemoryChunk since
98 * GetMemoryChunkSpace should have included that. This does assume that
99 * all context types use MemoryChunk as a chunk header.
100 */
103
104#ifdef MEMORY_CONTEXT_CHECKING
105 /* check that GetMemoryChunkSpace returned something realistic */
106 Assert(old_size >= redirchunk->requested_size);
107#endif
108
109 /*
110 * To keep things simple, we always allocate a new aligned chunk and copy
111 * data into it. Because of the above inaccuracy, this may end in copying
112 * more data than was in the original allocation request size, but that
113 * should be OK.
114 */
116 newptr = MemoryContextAllocAligned(ctx, size, alignto, flags);
117
118 /* Cope cleanly with OOM */
119 if (unlikely(newptr == NULL))
120 {
122 return MemoryContextAllocationFailure(ctx, size, flags);
123 }
124
125 /*
126 * We may memcpy more than the original allocation request size, which
127 * would result in trying to copy trailing bytes that the original
128 * MemoryContextAllocAligned call marked NOACCESS. So we must mark the
129 * entire old_size as defined. That's slightly annoying, but probably not
130 * worth improving.
131 */
133 memcpy(newptr, pointer, Min(size, old_size));
134
135 /*
136 * Create a dummy vchunk covering the start of the old unaligned chunk,
137 * but not overlapping the aligned chunk. This will be freed while
138 * pfree'ing the old unaligned chunk, keeping Valgrind happy. Then when
139 * we return to repalloc, it will move the vchunk for the aligned chunk.
140 */
142 (char *) pointer - (char *) unaligned);
143
145
146 return newptr;
147}
#define Min(x, y)
Definition c.h:1040
void * MemoryContextAllocAligned(MemoryContext context, Size size, Size alignto, int flags)
Definition mcxt.c:1482
void * MemoryContextAllocationFailure(MemoryContext context, Size size, int flags)
Definition mcxt.c:1198
#define PallocAlignedExtraBytes(alignto)
static Size MemoryChunkGetValue(MemoryChunk *chunk)

References Assert, fb(), GetMemoryChunkContext(), GetMemoryChunkSpace(), MemoryChunkGetBlock(), MemoryChunkGetValue(), MemoryContextAllocAligned(), MemoryContextAllocationFailure(), Min, PallocAlignedExtraBytes, pfree(), PointerGetMemoryChunk, unlikely, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MAKE_MEM_NOACCESS, and VALGRIND_MEMPOOL_ALLOC.

◆ AllocSetAlloc()

void * AllocSetAlloc ( MemoryContext  context,
Size  size,
int  flags 
)
extern

Definition at line 1012 of file aset.c.

1013{
1014 AllocSet set = (AllocSet) context;
1015 AllocBlock block;
1017 int fidx;
1018 Size chunk_size;
1020
1021 Assert(AllocSetIsValid(set));
1022
1023 /* due to the keeper block set->blocks should never be NULL */
1024 Assert(set->blocks != NULL);
1025
1026 /*
1027 * If requested size exceeds maximum for chunks we hand the request off to
1028 * AllocSetAllocLarge().
1029 */
1030 if (size > set->allocChunkLimit)
1031 return AllocSetAllocLarge(context, size, flags);
1032
1033 /*
1034 * Request is small enough to be treated as a chunk. Look in the
1035 * corresponding free list to see if there is a free chunk we could reuse.
1036 * If one is found, remove it from the free list, make it again a member
1037 * of the alloc set and return its data address.
1038 *
1039 * Note that we don't attempt to ensure there's space for the sentinel
1040 * byte here. We expect a large proportion of allocations to be for sizes
1041 * which are already a power of 2. If we were to always make space for a
1042 * sentinel byte in MEMORY_CONTEXT_CHECKING builds, then we'd end up
1043 * doubling the memory requirements for such allocations.
1044 */
1045 fidx = AllocSetFreeIndex(size);
1046 chunk = set->freelist[fidx];
1047 if (chunk != NULL)
1048 {
1050
1051 /* Allow access to the chunk header. */
1053
1055
1056 /* pop this chunk off the freelist */
1058 set->freelist[fidx] = link->next;
1060
1061#ifdef MEMORY_CONTEXT_CHECKING
1062 chunk->requested_size = size;
1063 /* set mark to catch clobber of "unused" space */
1066#endif
1067#ifdef RANDOMIZE_ALLOCATED_MEMORY
1068 /* fill the allocated space with junk */
1069 randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
1070#endif
1071
1072 /* Ensure any padding bytes are marked NOACCESS. */
1075
1076 /* Disallow access to the chunk header. */
1078
1080 }
1081
1082 /*
1083 * Choose the actual chunk size to allocate.
1084 */
1085 chunk_size = GetChunkSizeFromFreeListIdx(fidx);
1086 Assert(chunk_size >= size);
1087
1088 block = set->blocks;
1089 availspace = block->endptr - block->freeptr;
1090
1091 /*
1092 * If there is enough room in the active allocation block, we will put the
1093 * chunk into that block. Else must start a new one.
1094 */
1095 if (unlikely(availspace < (chunk_size + ALLOC_CHUNKHDRSZ)))
1096 return AllocSetAllocFromNewBlock(context, size, flags, fidx);
1097
1098 /* There's enough space on the current block, so allocate from that */
1099 return AllocSetAllocChunkFromBlock(context, block, size, chunk_size, fidx);
1100}
static pg_noinline void * AllocSetAllocFromNewBlock(MemoryContext context, Size size, int flags, int fidx)
Definition aset.c:861
#define AllocSetIsValid(set)
Definition aset.c:200
#define GetFreeListLink(chkptr)
Definition aset.c:138
#define ALLOC_CHUNKHDRSZ
Definition aset.c:109
#define GetChunkSizeFromFreeListIdx(fidx)
Definition aset.c:146
static int AllocSetFreeIndex(Size size)
Definition aset.c:277
static void * AllocSetAllocChunkFromBlock(MemoryContext context, AllocBlock block, Size size, Size chunk_size, int fidx)
Definition aset.c:816
static pg_noinline void * AllocSetAllocLarge(MemoryContext context, Size size, int flags)
Definition aset.c:735
AllocSetContext * AllocSet
Definition aset.c:173
#define MemoryChunkGetPointer(c)
char * freeptr
Definition aset.c:192
char * endptr
Definition aset.c:193
uint32 allocChunkLimit
Definition aset.c:168
AllocBlock blocks
Definition aset.c:162
MemoryChunk * freelist[ALLOCSET_NUM_FREELISTS]
Definition aset.c:163

References ALLOC_CHUNKHDRSZ, AllocSetContext::allocChunkLimit, AllocSetAllocChunkFromBlock(), AllocSetAllocFromNewBlock(), AllocSetAllocLarge(), AllocSetFreeIndex(), AllocSetIsValid, Assert, AllocSetContext::blocks, AllocBlockData::endptr, fb(), AllocSetContext::freelist, AllocBlockData::freeptr, GetChunkSizeFromFreeListIdx, GetFreeListLink, MemoryChunkGetPointer, MemoryChunkGetValue(), unlikely, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

Referenced by AllocSetRealloc().

◆ AllocSetDelete()

void AllocSetDelete ( MemoryContext  context)
extern

Definition at line 632 of file aset.c.

633{
634 AllocSet set = (AllocSet) context;
635 AllocBlock block = set->blocks;
637
639
640#ifdef MEMORY_CONTEXT_CHECKING
641 /* Check for corruption and leaks before freeing */
642 AllocSetCheck(context);
643#endif
644
645 /* Remember keeper block size for Assert below */
646 keepersize = KeeperBlock(set)->endptr - ((char *) set);
647
648 /*
649 * If the context is a candidate for a freelist, put it into that freelist
650 * instead of destroying it.
651 */
652 if (set->freeListIndex >= 0)
653 {
655
656 /*
657 * Reset the context, if it needs it, so that we aren't hanging on to
658 * more than the initial malloc chunk.
659 */
660 if (!context->isReset)
661 MemoryContextResetOnly(context);
662
663 /*
664 * If the freelist is full, just discard what's already in it. See
665 * comments with context_freelists[].
666 */
667 if (freelist->num_free >= MAX_FREE_CONTEXTS)
668 {
669 while (freelist->first_free != NULL)
670 {
671 AllocSetContext *oldset = freelist->first_free;
672
674 freelist->num_free--;
675
676 /* Destroy the context's vpool --- see notes below */
678
679 /* All that remains is to free the header/initial block */
680 free(oldset);
681 }
682 Assert(freelist->num_free == 0);
683 }
684
685 /* Now add the just-deleted context to the freelist. */
686 set->header.nextchild = (MemoryContext) freelist->first_free;
687 freelist->first_free = set;
688 freelist->num_free++;
689
690 return;
691 }
692
693 /* Free all blocks, except the keeper which is part of context header */
694 while (block != NULL)
695 {
696 AllocBlock next = block->next;
697
698 if (!IsKeeperBlock(set, block))
699 context->mem_allocated -= block->endptr - ((char *) block);
700
701#ifdef CLOBBER_FREED_MEMORY
702 wipe_mem(block, block->freeptr - ((char *) block));
703#endif
704
705 if (!IsKeeperBlock(set, block))
706 {
707 /* As in AllocSetReset, free block-header vchunks explicitly */
708 VALGRIND_MEMPOOL_FREE(set, block);
709 free(block);
710 }
711
712 block = next;
713 }
714
715 Assert(context->mem_allocated == keepersize);
716
717 /*
718 * Destroy the vpool. We don't seem to need to explicitly free the
719 * initial block's header vchunk, nor any user-data vchunks that Valgrind
720 * still knows about; they'll all go away automatically.
721 */
723
724 /* Finally, free the context header, including the keeper block */
725 free(set);
726}
#define IsKeeperBlock(set, block)
Definition aset.c:248
#define KeeperBlock(set)
Definition aset.c:244
#define MAX_FREE_CONTEXTS
Definition aset.c:241
static AllocSetFreeList context_freelists[2]
Definition aset.c:257
static int32 next
Definition blutils.c:225
#define PG_USED_FOR_ASSERTS_ONLY
Definition c.h:235
void MemoryContextResetOnly(MemoryContext context)
Definition mcxt.c:422
#define VALGRIND_DESTROY_MEMPOOL(context)
Definition memdebug.h:25
#define VALGRIND_MEMPOOL_FREE(context, addr)
Definition memdebug.h:30
struct MemoryContextData * MemoryContext
Definition palloc.h:36
#define free(a)
AllocBlock next
Definition aset.c:191
MemoryContextData header
Definition aset.c:160
int freeListIndex
Definition aset.c:170
AllocSetContext * first_free
Definition aset.c:253
MemoryContext nextchild
Definition memnodes.h:130

References AllocSetIsValid, Assert, AllocSetContext::blocks, context_freelists, AllocBlockData::endptr, fb(), AllocSetFreeList::first_free, free, AllocSetContext::freeListIndex, AllocBlockData::freeptr, AllocSetContext::header, IsKeeperBlock, MemoryContextData::isReset, KeeperBlock, MAX_FREE_CONTEXTS, MemoryContextData::mem_allocated, MemoryContextResetOnly(), next, AllocBlockData::next, MemoryContextData::nextchild, AllocSetFreeList::num_free, PG_USED_FOR_ASSERTS_ONLY, VALGRIND_DESTROY_MEMPOOL, and VALGRIND_MEMPOOL_FREE.

◆ AllocSetFree()

void AllocSetFree ( void pointer)
extern

Definition at line 1107 of file aset.c.

1108{
1109 AllocSet set;
1111
1112 /* Allow access to the chunk header. */
1114
1116 {
1117 /* Release single-chunk block. */
1119
1120 /*
1121 * Try to verify that we have a sane block pointer: the block header
1122 * should reference an aset and the freeptr should match the endptr.
1123 */
1124 if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
1125 elog(ERROR, "could not find block containing chunk %p", chunk);
1126
1127 set = block->aset;
1128
1129#ifdef MEMORY_CONTEXT_CHECKING
1130 {
1131 /* Test for someone scribbling on unused space in chunk */
1132 Assert(chunk->requested_size < (block->endptr - (char *) pointer));
1133 if (!sentinel_ok(pointer, chunk->requested_size))
1134 elog(WARNING, "detected write past chunk end in %s %p",
1135 set->header.name, chunk);
1136 }
1137#endif
1138
1139 /* OK, remove block from aset's list and free it */
1140 if (block->prev)
1141 block->prev->next = block->next;
1142 else
1143 set->blocks = block->next;
1144 if (block->next)
1145 block->next->prev = block->prev;
1146
1147 set->header.mem_allocated -= block->endptr - ((char *) block);
1148
1149#ifdef CLOBBER_FREED_MEMORY
1150 wipe_mem(block, block->freeptr - ((char *) block));
1151#endif
1152
1153 /* As in AllocSetReset, free block-header vchunks explicitly */
1154 VALGRIND_MEMPOOL_FREE(set, block);
1155
1156 free(block);
1157 }
1158 else
1159 {
1161 int fidx;
1163
1164 /*
1165 * In this path, for speed reasons we just Assert that the referenced
1166 * block is good. We can also Assert that the value field is sane.
1167 * Future field experience may show that these Asserts had better
1168 * become regular runtime test-and-elog checks.
1169 */
1170 Assert(AllocBlockIsValid(block));
1171 set = block->aset;
1172
1176
1177#ifdef MEMORY_CONTEXT_CHECKING
1178 /* Test for someone scribbling on unused space in chunk */
1179 if (chunk->requested_size < GetChunkSizeFromFreeListIdx(fidx))
1180 if (!sentinel_ok(pointer, chunk->requested_size))
1181 elog(WARNING, "detected write past chunk end in %s %p",
1182 set->header.name, chunk);
1183#endif
1184
1185#ifdef CLOBBER_FREED_MEMORY
1187#endif
1188 /* push this chunk onto the top of the free list */
1190 link->next = set->freelist[fidx];
1192 set->freelist[fidx] = chunk;
1193
1194#ifdef MEMORY_CONTEXT_CHECKING
1195
1196 /*
1197 * Reset requested_size to InvalidAllocSize in chunks that are on free
1198 * list.
1199 */
1200 chunk->requested_size = InvalidAllocSize;
1201#endif
1202 }
1203}
#define AllocBlockIsValid(block)
Definition aset.c:207
#define FreeListIdxIsValid(fidx)
Definition aset.c:142
#define ExternalChunkGetBlock(chunk)
Definition aset.c:215
#define ERROR
Definition elog.h:39
#define InvalidAllocSize
Definition memutils.h:47
AllocBlock prev
Definition aset.c:190
AllocSet aset
Definition aset.c:189
const char * name
Definition memnodes.h:131

References ALLOC_CHUNKHDRSZ, AllocBlockIsValid, AllocBlockData::aset, Assert, AllocSetContext::blocks, elog, AllocBlockData::endptr, ERROR, ExternalChunkGetBlock, fb(), free, AllocSetContext::freelist, FreeListIdxIsValid, AllocBlockData::freeptr, GetChunkSizeFromFreeListIdx, GetFreeListLink, AllocSetContext::header, InvalidAllocSize, MemoryContextData::mem_allocated, MemoryChunkGetBlock(), MemoryChunkGetValue(), MemoryChunkIsExternal(), MemoryContextData::name, AllocBlockData::next, PointerGetMemoryChunk, AllocBlockData::prev, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MAKE_MEM_NOACCESS, VALGRIND_MEMPOOL_FREE, and WARNING.

Referenced by AllocSetRealloc().

◆ AllocSetGetChunkContext()

MemoryContext AllocSetGetChunkContext ( void pointer)
extern

Definition at line 1490 of file aset.c.

1491{
1493 AllocBlock block;
1494 AllocSet set;
1495
1496 /* Allow access to the chunk header. */
1498
1501 else
1503
1504 /* Disallow access to the chunk header. */
1506
1507 Assert(AllocBlockIsValid(block));
1508 set = block->aset;
1509
1510 return &set->header;
1511}
struct AllocBlockData * AllocBlock
Definition aset.c:113

References ALLOC_CHUNKHDRSZ, AllocBlockIsValid, AllocBlockData::aset, Assert, ExternalChunkGetBlock, fb(), AllocSetContext::header, MemoryChunkGetBlock(), MemoryChunkIsExternal(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ AllocSetGetChunkSpace()

Size AllocSetGetChunkSpace ( void pointer)
extern

Definition at line 1519 of file aset.c.

1520{
1522 int fidx;
1523
1524 /* Allow access to the chunk header. */
1526
1528 {
1530
1531 /* Disallow access to the chunk header. */
1533
1534 Assert(AllocBlockIsValid(block));
1535
1536 return block->endptr - (char *) chunk;
1537 }
1538
1541
1542 /* Disallow access to the chunk header. */
1544
1546}

References ALLOC_CHUNKHDRSZ, AllocBlockIsValid, Assert, AllocBlockData::endptr, ExternalChunkGetBlock, fb(), FreeListIdxIsValid, GetChunkSizeFromFreeListIdx, MemoryChunkGetValue(), MemoryChunkIsExternal(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ AllocSetIsEmpty()

bool AllocSetIsEmpty ( MemoryContext  context)
extern

Definition at line 1553 of file aset.c.

1554{
1555 Assert(AllocSetIsValid(context));
1556
1557 /*
1558 * For now, we say "empty" only if the context is new or just reset. We
1559 * could examine the freelists to determine if all space has been freed,
1560 * but it's not really worth the trouble for present uses of this
1561 * functionality.
1562 */
1563 if (context->isReset)
1564 return true;
1565 return false;
1566}

References AllocSetIsValid, Assert, and MemoryContextData::isReset.

◆ AllocSetRealloc()

void * AllocSetRealloc ( void pointer,
Size  size,
int  flags 
)
extern

Definition at line 1218 of file aset.c.

1219{
1220 AllocBlock block;
1221 AllocSet set;
1224 int fidx;
1225
1226 /* Allow access to the chunk header. */
1228
1230 {
1231 /*
1232 * The chunk must have been allocated as a single-chunk block. Use
1233 * realloc() to make the containing block bigger, or smaller, with
1234 * minimum space wastage.
1235 */
1237 Size chksize;
1238 Size blksize;
1240
1242
1243 /*
1244 * Try to verify that we have a sane block pointer: the block header
1245 * should reference an aset and the freeptr should match the endptr.
1246 */
1247 if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
1248 elog(ERROR, "could not find block containing chunk %p", chunk);
1249
1250 set = block->aset;
1251
1252 /* only check size in paths where the limits could be hit */
1253 MemoryContextCheckSize((MemoryContext) set, size, flags);
1254
1255 oldchksize = block->endptr - (char *) pointer;
1256
1257#ifdef MEMORY_CONTEXT_CHECKING
1258 /* Test for someone scribbling on unused space in chunk */
1259 Assert(chunk->requested_size < oldchksize);
1260 if (!sentinel_ok(pointer, chunk->requested_size))
1261 elog(WARNING, "detected write past chunk end in %s %p",
1262 set->header.name, chunk);
1263#endif
1264
1265#ifdef MEMORY_CONTEXT_CHECKING
1266 /* ensure there's always space for the sentinel byte */
1267 chksize = MAXALIGN(size + 1);
1268#else
1269 chksize = MAXALIGN(size);
1270#endif
1271
1272 /* Do the realloc */
1274 oldblksize = block->endptr - ((char *) block);
1275
1276 newblock = (AllocBlock) realloc(block, blksize);
1277 if (newblock == NULL)
1278 {
1279 /* Disallow access to the chunk header. */
1281 return MemoryContextAllocationFailure(&set->header, size, flags);
1282 }
1283
1284 /*
1285 * Move the block-header vchunk explicitly. (mcxt.c will take care of
1286 * moving the vchunk for the user data.)
1287 */
1289 block = newblock;
1290
1291 /* updated separately, not to underflow when (oldblksize > blksize) */
1293 set->header.mem_allocated += blksize;
1294
1295 block->freeptr = block->endptr = ((char *) block) + blksize;
1296
1297 /* Update pointers since block has likely been moved */
1298 chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
1299 pointer = MemoryChunkGetPointer(chunk);
1300 if (block->prev)
1301 block->prev->next = block;
1302 else
1303 set->blocks = block;
1304 if (block->next)
1305 block->next->prev = block;
1306
1307#ifdef MEMORY_CONTEXT_CHECKING
1308#ifdef RANDOMIZE_ALLOCATED_MEMORY
1309
1310 /*
1311 * We can only randomize the extra space if we know the prior request.
1312 * When using Valgrind, randomize_mem() also marks memory UNDEFINED.
1313 */
1314 if (size > chunk->requested_size)
1315 randomize_mem((char *) pointer + chunk->requested_size,
1316 size - chunk->requested_size);
1317#else
1318
1319 /*
1320 * If this is an increase, realloc() will have marked any
1321 * newly-allocated part (from oldchksize to chksize) UNDEFINED, but we
1322 * also need to adjust trailing bytes from the old allocation (from
1323 * chunk->requested_size to oldchksize) as they are marked NOACCESS.
1324 * Make sure not to mark too many bytes in case chunk->requested_size
1325 * < size < oldchksize.
1326 */
1327#ifdef USE_VALGRIND
1328 if (Min(size, oldchksize) > chunk->requested_size)
1329 VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1330 Min(size, oldchksize) - chunk->requested_size);
1331#endif
1332#endif
1333
1334 chunk->requested_size = size;
1335 /* set mark to catch clobber of "unused" space */
1336 Assert(size < chksize);
1337 set_sentinel(pointer, size);
1338#else /* !MEMORY_CONTEXT_CHECKING */
1339
1340 /*
1341 * We may need to adjust marking of bytes from the old allocation as
1342 * some of them may be marked NOACCESS. We don't know how much of the
1343 * old chunk size was the requested size; it could have been as small
1344 * as one byte. We have to be conservative and just mark the entire
1345 * old portion DEFINED. Make sure not to mark memory beyond the new
1346 * allocation in case it's smaller than the old one.
1347 */
1348 VALGRIND_MAKE_MEM_DEFINED(pointer, Min(size, oldchksize));
1349#endif
1350
1351 /* Ensure any padding bytes are marked NOACCESS. */
1352 VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1353
1354 /* Disallow access to the chunk header. */
1356
1357 return pointer;
1358 }
1359
1360 block = MemoryChunkGetBlock(chunk);
1361
1362 /*
1363 * In this path, for speed reasons we just Assert that the referenced
1364 * block is good. We can also Assert that the value field is sane. Future
1365 * field experience may show that these Asserts had better become regular
1366 * runtime test-and-elog checks.
1367 */
1368 Assert(AllocBlockIsValid(block));
1369 set = block->aset;
1370
1374
1375#ifdef MEMORY_CONTEXT_CHECKING
1376 /* Test for someone scribbling on unused space in chunk */
1377 if (chunk->requested_size < oldchksize)
1378 if (!sentinel_ok(pointer, chunk->requested_size))
1379 elog(WARNING, "detected write past chunk end in %s %p",
1380 set->header.name, chunk);
1381#endif
1382
1383 /*
1384 * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1385 * allocated area already is >= the new size. (In particular, we will
1386 * fall out here if the requested size is a decrease.)
1387 */
1388 if (oldchksize >= size)
1389 {
1390#ifdef MEMORY_CONTEXT_CHECKING
1391 Size oldrequest = chunk->requested_size;
1392
1393#ifdef RANDOMIZE_ALLOCATED_MEMORY
1394 /* We can only fill the extra space if we know the prior request */
1395 if (size > oldrequest)
1396 randomize_mem((char *) pointer + oldrequest,
1397 size - oldrequest);
1398#endif
1399
1400 chunk->requested_size = size;
1401
1402 /*
1403 * If this is an increase, mark any newly-available part UNDEFINED.
1404 * Otherwise, mark the obsolete part NOACCESS.
1405 */
1406 if (size > oldrequest)
1407 VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1408 size - oldrequest);
1409 else
1410 VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1411 oldchksize - size);
1412
1413 /* set mark to catch clobber of "unused" space */
1414 if (size < oldchksize)
1415 set_sentinel(pointer, size);
1416#else /* !MEMORY_CONTEXT_CHECKING */
1417
1418 /*
1419 * We don't have the information to determine whether we're growing
1420 * the old request or shrinking it, so we conservatively mark the
1421 * entire new allocation DEFINED.
1422 */
1424 VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1425#endif
1426
1427 /* Disallow access to the chunk header. */
1429
1430 return pointer;
1431 }
1432 else
1433 {
1434 /*
1435 * Enlarge-a-small-chunk case. We just do this by brute force, ie,
1436 * allocate a new chunk and copy the data. Since we know the existing
1437 * data isn't huge, this won't involve any great memcpy expense, so
1438 * it's not worth being smarter. (At one time we tried to avoid
1439 * memcpy when it was possible to enlarge the chunk in-place, but that
1440 * turns out to misbehave unpleasantly for repeated cycles of
1441 * palloc/repalloc/pfree: the eventually freed chunks go into the
1442 * wrong freelist for the next initial palloc request, and so we leak
1443 * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1444 */
1446 Size oldsize;
1447
1448 /* allocate new chunk (this also checks size is valid) */
1449 newPointer = AllocSetAlloc((MemoryContext) set, size, flags);
1450
1451 /* leave immediately if request was not completed */
1452 if (newPointer == NULL)
1453 {
1454 /* Disallow access to the chunk header. */
1456 return MemoryContextAllocationFailure((MemoryContext) set, size, flags);
1457 }
1458
1459 /*
1460 * AllocSetAlloc() may have returned a region that is still NOACCESS.
1461 * Change it to UNDEFINED for the moment; memcpy() will then transfer
1462 * definedness from the old allocation to the new. If we know the old
1463 * allocation, copy just that much. Otherwise, make the entire old
1464 * chunk defined to avoid errors as we copy the currently-NOACCESS
1465 * trailing bytes.
1466 */
1468#ifdef MEMORY_CONTEXT_CHECKING
1469 oldsize = chunk->requested_size;
1470#else
1473#endif
1474
1475 /* transfer existing data (certain to fit) */
1476 memcpy(newPointer, pointer, oldsize);
1477
1478 /* free old chunk */
1479 AllocSetFree(pointer);
1480
1481 return newPointer;
1482 }
1483}
#define ALLOC_BLOCKHDRSZ
Definition aset.c:108
void * AllocSetAlloc(MemoryContext context, Size size, int flags)
Definition aset.c:1012
void * AllocPointer
Definition aset.c:119
void AllocSetFree(void *pointer)
Definition aset.c:1107
#define MAXALIGN(LEN)
Definition c.h:859
#define VALGRIND_MEMPOOL_CHANGE(context, optr, nptr, size)
Definition memdebug.h:31
#define VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
Definition memdebug.h:28
static void MemoryContextCheckSize(MemoryContext context, Size size, int flags)
#define realloc(a, b)

References ALLOC_BLOCKHDRSZ, ALLOC_CHUNKHDRSZ, AllocBlockIsValid, AllocSetAlloc(), AllocSetFree(), AllocBlockData::aset, Assert, elog, AllocBlockData::endptr, ERROR, ExternalChunkGetBlock, fb(), FreeListIdxIsValid, AllocBlockData::freeptr, GetChunkSizeFromFreeListIdx, AllocSetContext::header, MAXALIGN, MemoryContextData::mem_allocated, MemoryChunkGetBlock(), MemoryChunkGetPointer, MemoryChunkGetValue(), MemoryChunkIsExternal(), MemoryContextAllocationFailure(), MemoryContextCheckSize(), Min, MemoryContextData::name, AllocBlockData::next, PointerGetMemoryChunk, AllocBlockData::prev, realloc, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MAKE_MEM_NOACCESS, VALGRIND_MAKE_MEM_UNDEFINED, VALGRIND_MEMPOOL_CHANGE, and WARNING.

◆ AllocSetReset()

void AllocSetReset ( MemoryContext  context)
extern

Definition at line 546 of file aset.c.

547{
548 AllocSet set = (AllocSet) context;
549 AllocBlock block;
551
553
554#ifdef MEMORY_CONTEXT_CHECKING
555 /* Check for corruption and leaks before freeing */
556 AllocSetCheck(context);
557#endif
558
559 /* Remember keeper block size for Assert below */
560 keepersize = KeeperBlock(set)->endptr - ((char *) set);
561
562 /* Clear chunk freelists */
563 MemSetAligned(set->freelist, 0, sizeof(set->freelist));
564
565 block = set->blocks;
566
567 /* New blocks list will be just the keeper block */
568 set->blocks = KeeperBlock(set);
569
570 while (block != NULL)
571 {
572 AllocBlock next = block->next;
573
574 if (IsKeeperBlock(set, block))
575 {
576 /* Reset the block, but don't return it to malloc */
577 char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
578
579#ifdef CLOBBER_FREED_MEMORY
581#else
582 /* wipe_mem() would have done this */
584#endif
585 block->freeptr = datastart;
586 block->prev = NULL;
587 block->next = NULL;
588 }
589 else
590 {
591 /* Normal case, release the block */
592 context->mem_allocated -= block->endptr - ((char *) block);
593
594#ifdef CLOBBER_FREED_MEMORY
595 wipe_mem(block, block->freeptr - ((char *) block));
596#endif
597
598 /*
599 * We need to free the block header's vchunk explicitly, although
600 * the user-data vchunks within will go away in the TRIM below.
601 * Otherwise Valgrind complains about leaked allocations.
602 */
603 VALGRIND_MEMPOOL_FREE(set, block);
604
605 free(block);
606 }
607 block = next;
608 }
609
610 Assert(context->mem_allocated == keepersize);
611
612 /*
613 * Instruct Valgrind to throw away all the vchunks associated with this
614 * context, except for the one covering the AllocSetContext and
615 * keeper-block header. This gets rid of the vchunks for whatever user
616 * data is getting discarded by the context reset.
617 */
619
620 /* Reset block size allocation sequence, too */
621 set->nextBlockSize = set->initBlockSize;
622}
#define FIRST_BLOCKHDRSZ
Definition aset.c:110
#define MemSetAligned(start, val, len)
Definition c.h:1086
#define VALGRIND_MEMPOOL_TRIM(context, addr, size)
Definition memdebug.h:32
uint32 initBlockSize
Definition aset.c:165
uint32 nextBlockSize
Definition aset.c:167

References ALLOC_BLOCKHDRSZ, AllocSetIsValid, Assert, AllocSetContext::blocks, AllocBlockData::endptr, fb(), FIRST_BLOCKHDRSZ, free, AllocSetContext::freelist, AllocBlockData::freeptr, AllocSetContext::initBlockSize, IsKeeperBlock, KeeperBlock, MemoryContextData::mem_allocated, MemSetAligned, next, AllocBlockData::next, AllocSetContext::nextBlockSize, PG_USED_FOR_ASSERTS_ONLY, AllocBlockData::prev, VALGRIND_MAKE_MEM_NOACCESS, VALGRIND_MEMPOOL_FREE, and VALGRIND_MEMPOOL_TRIM.

◆ AllocSetStats()

void AllocSetStats ( MemoryContext  context,
MemoryStatsPrintFunc  printfunc,
void passthru,
MemoryContextCounters totals,
bool  print_to_stderr 
)
extern

Definition at line 1578 of file aset.c.

1581{
1582 AllocSet set = (AllocSet) context;
1583 Size nblocks = 0;
1584 Size freechunks = 0;
1585 Size totalspace;
1586 Size freespace = 0;
1587 AllocBlock block;
1588 int fidx;
1589
1590 Assert(AllocSetIsValid(set));
1591
1592 /* Include context header in totalspace */
1593 totalspace = MAXALIGN(sizeof(AllocSetContext));
1594
1595 for (block = set->blocks; block != NULL; block = block->next)
1596 {
1597 nblocks++;
1598 totalspace += block->endptr - ((char *) block);
1599 freespace += block->endptr - block->freeptr;
1600 }
1601 for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1602 {
1604 MemoryChunk *chunk = set->freelist[fidx];
1605
1606 while (chunk != NULL)
1607 {
1609
1610 /* Allow access to the chunk header. */
1614
1615 freechunks++;
1616 freespace += chksz + ALLOC_CHUNKHDRSZ;
1617
1619 chunk = link->next;
1621 }
1622 }
1623
1624 if (printfunc)
1625 {
1626 char stats_string[200];
1627
1629 "%zu total in %zu blocks; %zu free (%zu chunks); %zu used",
1630 totalspace, nblocks, freespace, freechunks,
1631 totalspace - freespace);
1633 }
1634
1635 if (totals)
1636 {
1637 totals->nblocks += nblocks;
1638 totals->freechunks += freechunks;
1639 totals->totalspace += totalspace;
1640 totals->freespace += freespace;
1641 }
1642}
#define ALLOCSET_NUM_FREELISTS
Definition aset.c:84
#define snprintf
Definition port.h:260

References ALLOC_CHUNKHDRSZ, ALLOCSET_NUM_FREELISTS, AllocSetIsValid, Assert, AllocSetContext::blocks, AllocBlockData::endptr, fb(), AllocSetContext::freelist, AllocBlockData::freeptr, GetChunkSizeFromFreeListIdx, GetFreeListLink, MAXALIGN, MemoryChunkGetValue(), AllocBlockData::next, snprintf, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ BumpAlloc()

void * BumpAlloc ( MemoryContext  context,
Size  size,
int  flags 
)
extern

Definition at line 517 of file bump.c.

518{
519 BumpContext *set = (BumpContext *) context;
520 BumpBlock *block;
521 Size chunk_size;
523
524 Assert(BumpIsValid(set));
525
526#ifdef MEMORY_CONTEXT_CHECKING
527 /* ensure there's always space for the sentinel byte */
528 chunk_size = MAXALIGN(size + 1);
529#else
530 chunk_size = MAXALIGN(size);
531#endif
532
533 /*
534 * If requested size exceeds maximum for chunks we hand the request off to
535 * BumpAllocLarge().
536 */
537 if (chunk_size > set->allocChunkLimit)
538 return BumpAllocLarge(context, size, flags);
539
540 required_size = chunk_size + Bump_CHUNKHDRSZ;
541
542 /*
543 * Not an oversized chunk. We try to first make use of the latest block,
544 * but if there's not enough space in it we must allocate a new block.
545 */
546 block = dlist_container(BumpBlock, node, dlist_head_node(&set->blocks));
547
549 return BumpAllocFromNewBlock(context, size, flags, chunk_size);
550
551 /* The current block has space, so just allocate chunk there. */
552 return BumpAllocChunkFromBlock(context, block, size, chunk_size);
553}
#define Bump_CHUNKHDRSZ
Definition bump.c:56
static pg_noinline void * BumpAllocLarge(MemoryContext context, Size size, int flags)
Definition bump.c:313
#define BumpIsValid(set)
Definition bump.c:102
static pg_noinline void * BumpAllocFromNewBlock(MemoryContext context, Size size, int flags, Size chunk_size)
Definition bump.c:453
static void * BumpAllocChunkFromBlock(MemoryContext context, BumpBlock *block, Size size, Size chunk_size)
Definition bump.c:394
static Size BumpBlockFreeBytes(BumpBlock *block)
Definition bump.c:611
static dlist_node * dlist_head_node(dlist_head *head)
Definition ilist.h:565
#define dlist_container(type, membername, ptr)
Definition ilist.h:593
dlist_head blocks
Definition bump.c:78
uint32 allocChunkLimit
Definition bump.c:76

References BumpContext::allocChunkLimit, Assert, BumpContext::blocks, Bump_CHUNKHDRSZ, BumpAllocChunkFromBlock(), BumpAllocFromNewBlock(), BumpAllocLarge(), BumpBlockFreeBytes(), BumpIsValid, dlist_container, dlist_head_node(), fb(), and MAXALIGN.

◆ BumpDelete()

void BumpDelete ( MemoryContext  context)
extern

Definition at line 294 of file bump.c.

295{
296 /* Reset to release all releasable BumpBlocks */
297 BumpReset(context);
298
299 /* Destroy the vpool -- see notes in aset.c */
301
302 /* And free the context header and keeper block */
303 free(context);
304}
void BumpReset(MemoryContext context)
Definition bump.c:251

References BumpReset(), free, and VALGRIND_DESTROY_MEMPOOL.

◆ BumpFree()

void BumpFree ( void pointer)
extern

Definition at line 646 of file bump.c.

647{
648 elog(ERROR, "%s is not supported by the bump memory allocator", "pfree");
649}

References elog, and ERROR.

◆ BumpGetChunkContext()

MemoryContext BumpGetChunkContext ( void pointer)
extern

Definition at line 667 of file bump.c.

668{
669 elog(ERROR, "%s is not supported by the bump memory allocator", "GetMemoryChunkContext");
670 return NULL; /* keep compiler quiet */
671}

References elog, ERROR, and fb().

◆ BumpGetChunkSpace()

Size BumpGetChunkSpace ( void pointer)
extern

Definition at line 678 of file bump.c.

679{
680 elog(ERROR, "%s is not supported by the bump memory allocator", "GetMemoryChunkSpace");
681 return 0; /* keep compiler quiet */
682}

References elog, and ERROR.

◆ BumpIsEmpty()

bool BumpIsEmpty ( MemoryContext  context)
extern

Definition at line 689 of file bump.c.

690{
691 BumpContext *set = (BumpContext *) context;
692 dlist_iter iter;
693
694 Assert(BumpIsValid(set));
695
696 dlist_foreach(iter, &set->blocks)
697 {
698 BumpBlock *block = dlist_container(BumpBlock, node, iter.cur);
699
700 if (!BumpBlockIsEmpty(block))
701 return false;
702 }
703
704 return true;
705}
static bool BumpBlockIsEmpty(BumpBlock *block)
Definition bump.c:578
#define dlist_foreach(iter, lhead)
Definition ilist.h:623
dlist_node * cur
Definition ilist.h:179

References Assert, BumpContext::blocks, BumpBlockIsEmpty(), BumpIsValid, dlist_iter::cur, dlist_container, and dlist_foreach.

◆ BumpRealloc()

void * BumpRealloc ( void pointer,
Size  size,
int  flags 
)
extern

Definition at line 656 of file bump.c.

657{
658 elog(ERROR, "%s is not supported by the bump memory allocator", "realloc");
659 return NULL; /* keep compiler quiet */
660}

References elog, ERROR, and fb().

◆ BumpReset()

void BumpReset ( MemoryContext  context)
extern

Definition at line 251 of file bump.c.

252{
253 BumpContext *set = (BumpContext *) context;
255
256 Assert(BumpIsValid(set));
257
258#ifdef MEMORY_CONTEXT_CHECKING
259 /* Check for corruption and leaks before freeing */
260 BumpCheck(context);
261#endif
262
264 {
265 BumpBlock *block = dlist_container(BumpBlock, node, miter.cur);
266
267 if (IsKeeperBlock(set, block))
268 BumpBlockMarkEmpty(block);
269 else
270 BumpBlockFree(set, block);
271 }
272
273 /*
274 * Instruct Valgrind to throw away all the vchunks associated with this
275 * context, except for the one covering the BumpContext and keeper-block
276 * header. This gets rid of the vchunks for whatever user data is getting
277 * discarded by the context reset.
278 */
280
281 /* Reset block size allocation sequence, too */
282 set->nextBlockSize = set->initBlockSize;
283
284 /* Ensure there is only 1 item in the dlist */
287}
static void BumpBlockFree(BumpContext *set, BumpBlock *block)
Definition bump.c:621
static void BumpBlockMarkEmpty(BumpBlock *block)
Definition bump.c:589
#define IsKeeperBlock(set, blk)
Definition bump.c:64
#define FIRST_BLOCKHDRSZ
Definition bump.c:49
static bool dlist_has_next(const dlist_head *head, const dlist_node *node)
Definition ilist.h:503
#define dlist_foreach_modify(iter, lhead)
Definition ilist.h:640
static bool dlist_is_empty(const dlist_head *head)
Definition ilist.h:336
uint32 initBlockSize
Definition bump.c:73
uint32 nextBlockSize
Definition bump.c:75

References Assert, BumpContext::blocks, BumpBlockFree(), BumpBlockMarkEmpty(), BumpIsValid, dlist_container, dlist_foreach_modify, dlist_has_next(), dlist_head_node(), dlist_is_empty(), fb(), FIRST_BLOCKHDRSZ, BumpContext::initBlockSize, IsKeeperBlock, BumpContext::nextBlockSize, and VALGRIND_MEMPOOL_TRIM.

Referenced by BumpDelete().

◆ BumpStats()

void BumpStats ( MemoryContext  context,
MemoryStatsPrintFunc  printfunc,
void passthru,
MemoryContextCounters totals,
bool  print_to_stderr 
)
extern

Definition at line 717 of file bump.c.

719{
720 BumpContext *set = (BumpContext *) context;
721 Size nblocks = 0;
722 Size totalspace = 0;
723 Size freespace = 0;
724 dlist_iter iter;
725
726 Assert(BumpIsValid(set));
727
728 dlist_foreach(iter, &set->blocks)
729 {
730 BumpBlock *block = dlist_container(BumpBlock, node, iter.cur);
731
732 nblocks++;
733 totalspace += (block->endptr - (char *) block);
734 freespace += (block->endptr - block->freeptr);
735 }
736
737 if (printfunc)
738 {
739 char stats_string[200];
740
742 "%zu total in %zu blocks; %zu free; %zu used",
743 totalspace, nblocks, freespace, totalspace - freespace);
745 }
746
747 if (totals)
748 {
749 totals->nblocks += nblocks;
750 totals->totalspace += totalspace;
751 totals->freespace += freespace;
752 }
753}
char * endptr
Definition bump.c:95
char * freeptr
Definition bump.c:94

References Assert, BumpContext::blocks, BumpIsValid, dlist_iter::cur, dlist_container, dlist_foreach, BumpBlock::endptr, fb(), BumpBlock::freeptr, and snprintf.

◆ GenerationAlloc()

void * GenerationAlloc ( MemoryContext  context,
Size  size,
int  flags 
)
extern

Definition at line 553 of file generation.c.

554{
555 GenerationContext *set = (GenerationContext *) context;
556 GenerationBlock *block;
557 Size chunk_size;
559
561
562#ifdef MEMORY_CONTEXT_CHECKING
563 /* ensure there's always space for the sentinel byte */
564 chunk_size = MAXALIGN(size + 1);
565#else
566 chunk_size = MAXALIGN(size);
567#endif
568
569 /*
570 * If requested size exceeds maximum for chunks we hand the request off to
571 * GenerationAllocLarge().
572 */
573 if (chunk_size > set->allocChunkLimit)
574 return GenerationAllocLarge(context, size, flags);
575
577
578 /*
579 * Not an oversized chunk. We try to first make use of the current block,
580 * but if there's not enough space in it, instead of allocating a new
581 * block, we look to see if the empty freeblock has enough space. We
582 * don't try reusing the keeper block. If it's become empty we'll reuse
583 * that again only if the context is reset.
584 *
585 * We only try reusing the freeblock if we've no space for this allocation
586 * on the current block. When a freeblock exists, we'll switch to it once
587 * the first time we can't fit an allocation in the current block. We
588 * avoid ping-ponging between the two as we need to be careful not to
589 * fragment differently sized consecutive allocations between several
590 * blocks. Going between the two could cause fragmentation for FIFO
591 * workloads, which generation is meant to be good at.
592 */
593 block = set->block;
594
596 {
597 GenerationBlock *freeblock = set->freeblock;
598
599 /* freeblock, if set, must be empty */
600 Assert(freeblock == NULL || GenerationBlockIsEmpty(freeblock));
601
602 /* check if we have a freeblock and if it's big enough */
603 if (freeblock != NULL &&
605 {
606 /* make the freeblock the current block */
607 set->freeblock = NULL;
608 set->block = freeblock;
609
610 return GenerationAllocChunkFromBlock(context,
611 freeblock,
612 size,
613 chunk_size);
614 }
615 else
616 {
617 /*
618 * No freeblock, or it's not big enough for this allocation. Make
619 * a new block.
620 */
621 return GenerationAllocFromNewBlock(context, size, flags, chunk_size);
622 }
623 }
624
625 /* The current block has space, so just allocate chunk there. */
626 return GenerationAllocChunkFromBlock(context, block, size, chunk_size);
627}
static pg_noinline void * GenerationAllocLarge(MemoryContext context, Size size, int flags)
Definition generation.c:363
static Size GenerationBlockFreeBytes(GenerationBlock *block)
Definition generation.c:680
#define Generation_CHUNKHDRSZ
Definition generation.c:47
static void * GenerationAllocChunkFromBlock(MemoryContext context, GenerationBlock *block, Size size, Size chunk_size)
Definition generation.c:436
#define GenerationBlockIsEmpty(b)
Definition generation.c:118
static pg_noinline void * GenerationAllocFromNewBlock(MemoryContext context, Size size, int flags, Size chunk_size)
Definition generation.c:484
#define GenerationIsValid(set)
Definition generation.c:104
GenerationBlock * freeblock
Definition generation.c:72
GenerationBlock * block
Definition generation.c:71
uint32 allocChunkLimit
Definition generation.c:69

References GenerationContext::allocChunkLimit, Assert, GenerationContext::block, fb(), GenerationContext::freeblock, Generation_CHUNKHDRSZ, GenerationAllocChunkFromBlock(), GenerationAllocFromNewBlock(), GenerationAllocLarge(), GenerationBlockFreeBytes(), GenerationBlockIsEmpty, GenerationIsValid, MAXALIGN, and unlikely.

Referenced by GenerationRealloc().

◆ GenerationDelete()

void GenerationDelete ( MemoryContext  context)
extern

Definition at line 344 of file generation.c.

345{
346 /* Reset to release all releasable GenerationBlocks */
347 GenerationReset(context);
348
349 /* Destroy the vpool -- see notes in aset.c */
351
352 /* And free the context header and keeper block */
353 free(context);
354}
void GenerationReset(MemoryContext context)
Definition generation.c:291

References free, GenerationReset(), and VALGRIND_DESTROY_MEMPOOL.

◆ GenerationFree()

void GenerationFree ( void pointer)
extern

Definition at line 718 of file generation.c.

719{
721 GenerationBlock *block;
723#if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \
724 || defined(CLOBBER_FREED_MEMORY)
726#endif
727
728 /* Allow access to the chunk header. */
730
732 {
734
735 /*
736 * Try to verify that we have a sane block pointer: the block header
737 * should reference a generation context.
738 */
739 if (!GenerationBlockIsValid(block))
740 elog(ERROR, "could not find block containing chunk %p", chunk);
741
742#if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \
743 || defined(CLOBBER_FREED_MEMORY)
744 chunksize = block->endptr - (char *) pointer;
745#endif
746 }
747 else
748 {
749 block = MemoryChunkGetBlock(chunk);
750
751 /*
752 * In this path, for speed reasons we just Assert that the referenced
753 * block is good. Future field experience may show that this Assert
754 * had better become a regular runtime test-and-elog check.
755 */
757
758#if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \
759 || defined(CLOBBER_FREED_MEMORY)
761#endif
762 }
763
764#ifdef MEMORY_CONTEXT_CHECKING
765 /* Test for someone scribbling on unused space in chunk */
766 Assert(chunk->requested_size < chunksize);
767 if (!sentinel_ok(pointer, chunk->requested_size))
768 elog(WARNING, "detected write past chunk end in %s %p",
769 ((MemoryContext) block->context)->name, chunk);
770#endif
771
772#ifdef CLOBBER_FREED_MEMORY
773 wipe_mem(pointer, chunksize);
774#endif
775
776#ifdef MEMORY_CONTEXT_CHECKING
777 /* Reset requested_size to InvalidAllocSize in freed chunks */
778 chunk->requested_size = InvalidAllocSize;
779#endif
780
781 block->nfree += 1;
782
783 Assert(block->nchunks > 0);
784 Assert(block->nfree <= block->nchunks);
785 Assert(block != block->context->freeblock);
786
787 /* If there are still allocated chunks in the block, we're done. */
788 if (likely(block->nfree < block->nchunks))
789 return;
790
791 set = block->context;
792
793 /*-----------------------
794 * The block this allocation was on has now become completely empty of
795 * chunks. In the general case, we can now return the memory for this
796 * block back to malloc. However, there are cases where we don't want to
797 * do that:
798 *
799 * 1) If it's the keeper block. This block was malloc'd in the same
800 * allocation as the context itself and can't be free'd without
801 * freeing the context.
802 * 2) If it's the current block. We could free this, but doing so would
803 * leave us nothing to set the current block to, so we just mark the
804 * block as empty so new allocations can reuse it again.
805 * 3) If we have no "freeblock" set, then we save a single block for
806 * future allocations to avoid having to malloc a new block again.
807 * This is useful for FIFO workloads as it avoids continual
808 * free/malloc cycles.
809 */
810 if (IsKeeperBlock(set, block) || set->block == block)
811 GenerationBlockMarkEmpty(block); /* case 1 and 2 */
812 else if (set->freeblock == NULL)
813 {
814 /* case 3 */
816 set->freeblock = block;
817 }
818 else
819 GenerationBlockFree(set, block); /* Otherwise, free it */
820}
#define likely(x)
Definition c.h:423
#define IsKeeperBlock(set, block)
Definition generation.c:134
static void GenerationBlockFree(GenerationContext *set, GenerationBlock *block)
Definition generation.c:690
static void GenerationBlockMarkEmpty(GenerationBlock *block)
Definition generation.c:656
#define GenerationBlockIsValid(block)
Definition generation.c:111
#define ExternalChunkGetBlock(chunk)
Definition generation.c:125
GenerationContext * context
Definition generation.c:92

References Assert, GenerationContext::block, GenerationBlock::context, elog, GenerationBlock::endptr, ERROR, ExternalChunkGetBlock, fb(), GenerationContext::freeblock, Generation_CHUNKHDRSZ, GenerationBlockFree(), GenerationBlockIsValid, GenerationBlockMarkEmpty(), InvalidAllocSize, IsKeeperBlock, likely, MemoryChunkGetBlock(), MemoryChunkGetValue(), MemoryChunkIsExternal(), GenerationBlock::nchunks, GenerationBlock::nfree, PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and WARNING.

Referenced by GenerationRealloc().

◆ GenerationGetChunkContext()

MemoryContext GenerationGetChunkContext ( void pointer)
extern

◆ GenerationGetChunkSpace()

Size GenerationGetChunkSpace ( void pointer)
extern

Definition at line 1002 of file generation.c.

1003{
1006
1007 /* Allow access to the chunk header. */
1009
1011 {
1013
1015 chunksize = block->endptr - (char *) pointer;
1016 }
1017 else
1019
1020 /* Disallow access to the chunk header. */
1022
1024}

References Assert, GenerationBlock::endptr, ExternalChunkGetBlock, fb(), Generation_CHUNKHDRSZ, GenerationBlockIsValid, MemoryChunkGetValue(), MemoryChunkIsExternal(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ GenerationIsEmpty()

bool GenerationIsEmpty ( MemoryContext  context)
extern

Definition at line 1031 of file generation.c.

1032{
1033 GenerationContext *set = (GenerationContext *) context;
1034 dlist_iter iter;
1035
1037
1038 dlist_foreach(iter, &set->blocks)
1039 {
1040 GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
1041
1042 if (block->nchunks > 0)
1043 return false;
1044 }
1045
1046 return true;
1047}
dlist_head blocks
Definition generation.c:74

References Assert, GenerationContext::blocks, dlist_iter::cur, dlist_container, dlist_foreach, GenerationIsValid, and GenerationBlock::nchunks.

◆ GenerationRealloc()

void * GenerationRealloc ( void pointer,
Size  size,
int  flags 
)
extern

Definition at line 829 of file generation.c.

830{
833 GenerationBlock *block;
836
837 /* Allow access to the chunk header. */
839
841 {
843
844 /*
845 * Try to verify that we have a sane block pointer: the block header
846 * should reference a generation context.
847 */
848 if (!GenerationBlockIsValid(block))
849 elog(ERROR, "could not find block containing chunk %p", chunk);
850
851 oldsize = block->endptr - (char *) pointer;
852 }
853 else
854 {
855 block = MemoryChunkGetBlock(chunk);
856
857 /*
858 * In this path, for speed reasons we just Assert that the referenced
859 * block is good. Future field experience may show that this Assert
860 * had better become a regular runtime test-and-elog check.
861 */
863
865 }
866
867 set = block->context;
868
869#ifdef MEMORY_CONTEXT_CHECKING
870 /* Test for someone scribbling on unused space in chunk */
871 Assert(chunk->requested_size < oldsize);
872 if (!sentinel_ok(pointer, chunk->requested_size))
873 elog(WARNING, "detected write past chunk end in %s %p",
874 ((MemoryContext) set)->name, chunk);
875#endif
876
877 /*
878 * Maybe the allocated area already big enough. (In particular, we always
879 * fall out here if the requested size is a decrease.)
880 *
881 * This memory context does not use power-of-2 chunk sizing and instead
882 * carves the chunks to be as small as possible, so most repalloc() calls
883 * will end up in the palloc/memcpy/pfree branch.
884 *
885 * XXX Perhaps we should annotate this condition with unlikely()?
886 */
887#ifdef MEMORY_CONTEXT_CHECKING
888 /* With MEMORY_CONTEXT_CHECKING, we need an extra byte for the sentinel */
889 if (oldsize > size)
890#else
891 if (oldsize >= size)
892#endif
893 {
894#ifdef MEMORY_CONTEXT_CHECKING
895 Size oldrequest = chunk->requested_size;
896
897#ifdef RANDOMIZE_ALLOCATED_MEMORY
898 /* We can only fill the extra space if we know the prior request */
899 if (size > oldrequest)
900 randomize_mem((char *) pointer + oldrequest,
901 size - oldrequest);
902#endif
903
904 chunk->requested_size = size;
905
906 /*
907 * If this is an increase, mark any newly-available part UNDEFINED.
908 * Otherwise, mark the obsolete part NOACCESS.
909 */
910 if (size > oldrequest)
911 VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
912 size - oldrequest);
913 else
914 VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
915 oldsize - size);
916
917 /* set mark to catch clobber of "unused" space */
918 set_sentinel(pointer, size);
919#else /* !MEMORY_CONTEXT_CHECKING */
920
921 /*
922 * We don't have the information to determine whether we're growing
923 * the old request or shrinking it, so we conservatively mark the
924 * entire new allocation DEFINED.
925 */
927 VALGRIND_MAKE_MEM_DEFINED(pointer, size);
928#endif
929
930 /* Disallow access to the chunk header. */
932
933 return pointer;
934 }
935
936 /* allocate new chunk (this also checks size is valid) */
937 newPointer = GenerationAlloc((MemoryContext) set, size, flags);
938
939 /* leave immediately if request was not completed */
940 if (newPointer == NULL)
941 {
942 /* Disallow access to the chunk header. */
944 return MemoryContextAllocationFailure((MemoryContext) set, size, flags);
945 }
946
947 /*
948 * GenerationAlloc() may have returned a region that is still NOACCESS.
949 * Change it to UNDEFINED for the moment; memcpy() will then transfer
950 * definedness from the old allocation to the new. If we know the old
951 * allocation, copy just that much. Otherwise, make the entire old chunk
952 * defined to avoid errors as we copy the currently-NOACCESS trailing
953 * bytes.
954 */
956#ifdef MEMORY_CONTEXT_CHECKING
957 oldsize = chunk->requested_size;
958#else
960#endif
961
962 /* transfer existing data (certain to fit) */
963 memcpy(newPointer, pointer, oldsize);
964
965 /* free old chunk */
966 GenerationFree(pointer);
967
968 return newPointer;
969}
void GenerationFree(void *pointer)
Definition generation.c:718
void * GenerationPointer
Definition generation.c:55
void * GenerationAlloc(MemoryContext context, Size size, int flags)
Definition generation.c:553

References Assert, GenerationBlock::context, elog, GenerationBlock::endptr, ERROR, ExternalChunkGetBlock, fb(), Generation_CHUNKHDRSZ, GenerationAlloc(), GenerationBlockIsValid, GenerationFree(), MemoryChunkGetBlock(), MemoryChunkGetValue(), MemoryChunkIsExternal(), MemoryContextAllocationFailure(), name, PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MAKE_MEM_NOACCESS, VALGRIND_MAKE_MEM_UNDEFINED, and WARNING.

◆ GenerationReset()

void GenerationReset ( MemoryContext  context)
extern

Definition at line 291 of file generation.c.

292{
293 GenerationContext *set = (GenerationContext *) context;
295
297
298#ifdef MEMORY_CONTEXT_CHECKING
299 /* Check for corruption and leaks before freeing */
300 GenerationCheck(context);
301#endif
302
303 /*
304 * NULLify the free block pointer. We must do this before calling
305 * GenerationBlockFree as that function never expects to free the
306 * freeblock.
307 */
308 set->freeblock = NULL;
309
311 {
313
314 if (IsKeeperBlock(set, block))
316 else
317 GenerationBlockFree(set, block);
318 }
319
320 /*
321 * Instruct Valgrind to throw away all the vchunks associated with this
322 * context, except for the one covering the GenerationContext and
323 * keeper-block header. This gets rid of the vchunks for whatever user
324 * data is getting discarded by the context reset.
325 */
327
328 /* set it so new allocations to make use of the keeper block */
329 set->block = KeeperBlock(set);
330
331 /* Reset block size allocation sequence, too */
332 set->nextBlockSize = set->initBlockSize;
333
334 /* Ensure there is only 1 item in the dlist */
337}
#define KeeperBlock(set)
Definition generation.c:129
#define FIRST_BLOCKHDRSZ
Definition generation.c:48

References Assert, GenerationContext::block, GenerationContext::blocks, dlist_container, dlist_foreach_modify, dlist_has_next(), dlist_head_node(), dlist_is_empty(), fb(), FIRST_BLOCKHDRSZ, GenerationContext::freeblock, GenerationBlockFree(), GenerationBlockMarkEmpty(), GenerationIsValid, GenerationContext::initBlockSize, IsKeeperBlock, KeeperBlock, GenerationContext::nextBlockSize, and VALGRIND_MEMPOOL_TRIM.

Referenced by GenerationDelete().

◆ GenerationStats()

void GenerationStats ( MemoryContext  context,
MemoryStatsPrintFunc  printfunc,
void passthru,
MemoryContextCounters totals,
bool  print_to_stderr 
)
extern

Definition at line 1062 of file generation.c.

1065{
1066 GenerationContext *set = (GenerationContext *) context;
1067 Size nblocks = 0;
1068 Size nchunks = 0;
1069 Size nfreechunks = 0;
1070 Size totalspace;
1071 Size freespace = 0;
1072 dlist_iter iter;
1073
1075
1076 /* Include context header in totalspace */
1077 totalspace = MAXALIGN(sizeof(GenerationContext));
1078
1079 dlist_foreach(iter, &set->blocks)
1080 {
1081 GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
1082
1083 nblocks++;
1084 nchunks += block->nchunks;
1085 nfreechunks += block->nfree;
1086 totalspace += block->blksize;
1087 freespace += (block->endptr - block->freeptr);
1088 }
1089
1090 if (printfunc)
1091 {
1092 char stats_string[200];
1093
1095 "%zu total in %zu blocks (%zu chunks); %zu free (%zu chunks); %zu used",
1096 totalspace, nblocks, nchunks, freespace,
1097 nfreechunks, totalspace - freespace);
1099 }
1100
1101 if (totals)
1102 {
1103 totals->nblocks += nblocks;
1104 totals->freechunks += nfreechunks;
1105 totals->totalspace += totalspace;
1106 totals->freespace += freespace;
1107 }
1108}

References Assert, GenerationBlock::blksize, GenerationContext::blocks, dlist_iter::cur, dlist_container, dlist_foreach, GenerationBlock::endptr, fb(), GenerationBlock::freeptr, GenerationIsValid, MAXALIGN, GenerationBlock::nchunks, GenerationBlock::nfree, and snprintf.

◆ MemoryContextAllocationFailure()

void * MemoryContextAllocationFailure ( MemoryContext  context,
Size  size,
int  flags 
)
extern

Definition at line 1198 of file mcxt.c.

1199{
1200 if ((flags & MCXT_ALLOC_NO_OOM) == 0)
1201 {
1202 if (TopMemoryContext)
1204 ereport(ERROR,
1206 errmsg("out of memory"),
1207 errdetail("Failed on request of size %zu in memory context \"%s\".",
1208 size, context->name)));
1209 }
1210 return NULL;
1211}
int errcode(int sqlerrcode)
Definition elog.c:874
int errmsg(const char *fmt,...)
Definition elog.c:1093
int errdetail(const char *fmt,...) pg_attribute_printf(1
#define ereport(elevel,...)
Definition elog.h:150
#define MCXT_ALLOC_NO_OOM
Definition fe_memutils.h:29
MemoryContext TopMemoryContext
Definition mcxt.c:166
void MemoryContextStats(MemoryContext context)
Definition mcxt.c:863

References ereport, errcode(), errdetail(), errmsg(), ERROR, fb(), MCXT_ALLOC_NO_OOM, MemoryContextStats(), MemoryContextData::name, and TopMemoryContext.

Referenced by AlignedAllocRealloc(), AllocSetAllocFromNewBlock(), AllocSetAllocLarge(), AllocSetRealloc(), BumpAllocFromNewBlock(), BumpAllocLarge(), GenerationAllocFromNewBlock(), GenerationAllocLarge(), GenerationRealloc(), and SlabAllocFromNewBlock().

◆ MemoryContextCheckSize()

static void MemoryContextCheckSize ( MemoryContext  context,
Size  size,
int  flags 
)
inlinestatic

Definition at line 167 of file memutils_internal.h.

168{
169 if (unlikely(!AllocSizeIsValid(size)))
170 {
171 if (!(flags & MCXT_ALLOC_HUGE) || !AllocHugeSizeIsValid(size))
172 MemoryContextSizeFailure(context, size, flags);
173 }
174}

References AllocHugeSizeIsValid, AllocSizeIsValid, MCXT_ALLOC_HUGE, MemoryContextSizeFailure(), and unlikely.

Referenced by AllocSetAllocLarge(), AllocSetRealloc(), BumpAllocLarge(), and GenerationAllocLarge().

◆ MemoryContextCreate()

void MemoryContextCreate ( MemoryContext  node,
NodeTag  tag,
MemoryContextMethodID  method_id,
MemoryContext  parent,
const char name 
)
extern

Definition at line 1149 of file mcxt.c.

1154{
1155 /* Creating new memory contexts is not allowed in a critical section */
1157
1158 /* Validate parent, to help prevent crazy context linkages */
1159 Assert(parent == NULL || MemoryContextIsValid(parent));
1160 Assert(node != parent);
1161
1162 /* Initialize all standard fields of memory context header */
1163 node->type = tag;
1164 node->isReset = true;
1165 node->methods = &mcxt_methods[method_id];
1166 node->parent = parent;
1167 node->firstchild = NULL;
1168 node->mem_allocated = 0;
1169 node->prevchild = NULL;
1170 node->name = name;
1171 node->ident = NULL;
1172 node->reset_cbs = NULL;
1173
1174 /* OK to link node into context tree */
1175 if (parent)
1176 {
1177 node->nextchild = parent->firstchild;
1178 if (parent->firstchild != NULL)
1179 parent->firstchild->prevchild = node;
1180 parent->firstchild = node;
1181 /* inherit allowInCritSection flag from parent */
1182 node->allowInCritSection = parent->allowInCritSection;
1183 }
1184 else
1185 {
1186 node->nextchild = NULL;
1187 node->allowInCritSection = false;
1188 }
1189}
volatile uint32 CritSectionCount
Definition globals.c:45
static const MemoryContextMethods mcxt_methods[]
Definition mcxt.c:63
#define MemoryContextIsValid(context)
Definition memnodes.h:145
MemoryContext prevchild
Definition memnodes.h:129
MemoryContext firstchild
Definition memnodes.h:128
bool allowInCritSection
Definition memnodes.h:124
const char * ident
Definition memnodes.h:132
MemoryContext parent
Definition memnodes.h:127
MemoryContextCallback * reset_cbs
Definition memnodes.h:133
const MemoryContextMethods * methods
Definition memnodes.h:126

References MemoryContextData::allowInCritSection, Assert, CritSectionCount, fb(), MemoryContextData::firstchild, MemoryContextData::ident, MemoryContextData::isReset, mcxt_methods, MemoryContextData::mem_allocated, MemoryContextIsValid, MemoryContextData::methods, name, MemoryContextData::name, MemoryContextData::nextchild, MemoryContextData::parent, MemoryContextData::prevchild, and MemoryContextData::reset_cbs.

Referenced by AllocSetContextCreateInternal(), BumpContextCreate(), GenerationContextCreate(), and SlabContextCreate().

◆ MemoryContextSizeFailure()

pg_noreturn void MemoryContextSizeFailure ( MemoryContext  context,
Size  size,
int  flags 
)
extern

Definition at line 1219 of file mcxt.c.

1220{
1221 elog(ERROR, "invalid memory alloc request size %zu", size);
1222}

References elog, and ERROR.

Referenced by MemoryContextCheckSize().

◆ SlabAlloc()

void * SlabAlloc ( MemoryContext  context,
Size  size,
int  flags 
)
extern

Definition at line 658 of file slab.c.

659{
660 SlabContext *slab = (SlabContext *) context;
661 SlabBlock *block;
663
664 Assert(SlabIsValid(slab));
665
666 /* sanity check that this is pointing to a valid blocklist */
667 Assert(slab->curBlocklistIndex >= 0);
669
670 /*
671 * Make sure we only allow correct request size. This doubles as the
672 * MemoryContextCheckSize check.
673 */
674 if (unlikely(size != slab->chunkSize))
675 SlabAllocInvalidSize(context, size);
676
677 if (unlikely(slab->curBlocklistIndex == 0))
678 {
679 /*
680 * Handle the case when there are no partially filled blocks
681 * available. This happens either when the last allocation took the
682 * last chunk in the block, or when SlabFree() free'd the final block.
683 */
684 return SlabAllocFromNewBlock(context, size, flags);
685 }
686 else
687 {
688 dlist_head *blocklist = &slab->blocklist[slab->curBlocklistIndex];
690
691 Assert(!dlist_is_empty(blocklist));
692
693 /* grab the block from the blocklist */
694 block = dlist_head_element(SlabBlock, node, blocklist);
695
696 /* make sure we actually got a valid block, with matching nfree */
697 Assert(block != NULL);
698 Assert(slab->curBlocklistIndex == SlabBlocklistIndex(slab, block->nfree));
699 Assert(block->nfree > 0);
700
701 /* fetch the next chunk from this block */
702 chunk = SlabGetNextFreeChunk(slab, block);
703
704 /* get the new blocklist index based on the new free chunk count */
706
707 /*
708 * Handle the case where the blocklist index changes. This also deals
709 * with blocks becoming full as only full blocks go at index 0.
710 */
712 {
713 dlist_delete_from(blocklist, &block->node);
715
716 if (dlist_is_empty(blocklist))
718 }
719 }
720
721 return SlabAllocSetupNewChunk(context, block, chunk, size);
722}
static void dlist_delete_from(dlist_head *head, dlist_node *node)
Definition ilist.h:429
#define dlist_head_element(type, membername, lhead)
Definition ilist.h:603
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition ilist.h:347
static pg_noinline void * SlabAllocFromNewBlock(MemoryContext context, Size size, int flags)
Definition slab.c:564
#define SlabIsValid(set)
Definition slab.c:196
static MemoryChunk * SlabGetNextFreeChunk(SlabContext *slab, SlabBlock *block)
Definition slab.c:271
static int32 SlabBlocklistIndex(SlabContext *slab, int nfree)
Definition slab.c:211
static void * SlabAllocSetupNewChunk(MemoryContext context, SlabBlock *block, MemoryChunk *chunk, Size size)
Definition slab.c:523
pg_noinline static pg_noreturn void SlabAllocInvalidSize(MemoryContext context, Size size)
Definition slab.c:634
static int32 SlabFindNextBlockListIndex(SlabContext *slab)
Definition slab.c:251
int32 nfree
Definition slab.c:149
dlist_node node
Definition slab.c:153
dlist_head blocklist[SLAB_BLOCKLIST_COUNT]
Definition slab.c:129
int32 chunksPerBlock
Definition slab.c:110
int32 curBlocklistIndex
Definition slab.c:111
uint32 chunkSize
Definition slab.c:107

References Assert, SlabContext::blocklist, SlabContext::chunkSize, SlabContext::chunksPerBlock, SlabContext::curBlocklistIndex, dlist_delete_from(), dlist_head_element, dlist_is_empty(), dlist_push_head(), fb(), SlabBlock::nfree, SlabBlock::node, SlabAllocFromNewBlock(), SlabAllocInvalidSize(), SlabAllocSetupNewChunk(), SlabBlocklistIndex(), SlabFindNextBlockListIndex(), SlabGetNextFreeChunk(), SlabIsValid, and unlikely.

◆ SlabDelete()

void SlabDelete ( MemoryContext  context)
extern

Definition at line 506 of file slab.c.

507{
508 /* Reset to release all the SlabBlocks */
509 SlabReset(context);
510
511 /* Destroy the vpool -- see notes in aset.c */
513
514 /* And free the context header */
515 free(context);
516}
void SlabReset(MemoryContext context)
Definition slab.c:436

References free, SlabReset(), and VALGRIND_DESTROY_MEMPOOL.

◆ SlabFree()

void SlabFree ( void pointer)
extern

Definition at line 729 of file slab.c.

730{
732 SlabBlock *block;
733 SlabContext *slab;
734 int curBlocklistIdx;
735 int newBlocklistIdx;
736
737 /* Allow access to the chunk header. */
739
740 block = MemoryChunkGetBlock(chunk);
741
742 /*
743 * For speed reasons we just Assert that the referenced block is good.
744 * Future field experience may show that this Assert had better become a
745 * regular runtime test-and-elog check.
746 */
747 Assert(SlabBlockIsValid(block));
748 slab = block->slab;
749
750#ifdef MEMORY_CONTEXT_CHECKING
751 /* Test for someone scribbling on unused space in chunk */
753 if (!sentinel_ok(pointer, slab->chunkSize))
754 elog(WARNING, "detected write past chunk end in %s %p",
755 slab->header.name, chunk);
756#endif
757
758 /* push this chunk onto the head of the block's free list */
759 *(MemoryChunk **) pointer = block->freehead;
760 block->freehead = chunk;
761
762 block->nfree++;
763
764 Assert(block->nfree > 0);
765 Assert(block->nfree <= slab->chunksPerBlock);
766
767#ifdef CLOBBER_FREED_MEMORY
768 /* don't wipe the free list MemoryChunk pointer stored in the chunk */
769 wipe_mem((char *) pointer + sizeof(MemoryChunk *),
770 slab->chunkSize - sizeof(MemoryChunk *));
771#endif
772
773 curBlocklistIdx = SlabBlocklistIndex(slab, block->nfree - 1);
775
776 /*
777 * Check if the block needs to be moved to another element on the
778 * blocklist based on it now having 1 more free chunk.
779 */
781 {
782 /* do the move */
785
786 /*
787 * The blocklist[curBlocklistIdx] may now be empty or we may now be
788 * able to use a lower-element blocklist. We'll need to redetermine
789 * what the slab->curBlocklistIndex is if the current blocklist was
790 * changed or if a lower element one was changed. We must ensure we
791 * use the list with the fullest block(s).
792 */
794 {
796
797 /*
798 * We know there must be a block with at least 1 unused chunk as
799 * we just pfree'd one. Ensure curBlocklistIndex reflects this.
800 */
801 Assert(slab->curBlocklistIndex > 0);
802 }
803 }
804
805 /* Handle when a block becomes completely empty */
806 if (unlikely(block->nfree == slab->chunksPerBlock))
807 {
808 /* remove the block */
810
811 /*
812 * To avoid thrashing malloc/free, we keep a list of empty blocks that
813 * we can reuse again instead of having to malloc a new one.
814 */
816 dclist_push_head(&slab->emptyblocks, &block->node);
817 else
818 {
819 /*
820 * When we have enough empty blocks stored already, we actually
821 * free the block.
822 */
823#ifdef CLOBBER_FREED_MEMORY
824 wipe_mem(block, slab->blockSize);
825#endif
826
827 /* As in aset.c, free block-header vchunks explicitly */
828 VALGRIND_MEMPOOL_FREE(slab, block);
829
830 free(block);
831 slab->header.mem_allocated -= slab->blockSize;
832 }
833
834 /*
835 * Check if we need to reset the blocklist index. This is required
836 * when the blocklist this block is on has become completely empty.
837 */
838 if (slab->curBlocklistIndex == newBlocklistIdx &&
841 }
842}
static uint32 dclist_count(const dclist_head *head)
Definition ilist.h:932
static void dclist_push_head(dclist_head *head, dlist_node *node)
Definition ilist.h:693
#define Slab_CHUNKHDRSZ
Definition slab.c:157
#define SlabBlockIsValid(block)
Definition slab.c:202
#define SLAB_MAXIMUM_EMPTY_BLOCKS
Definition slab.c:98
MemoryChunk * freehead
Definition slab.c:151
SlabContext * slab
Definition slab.c:148
uint32 fullChunkSize
Definition slab.c:108
MemoryContextData header
Definition slab.c:105
uint32 blockSize
Definition slab.c:109
dclist_head emptyblocks
Definition slab.c:120

References Assert, SlabContext::blocklist, SlabContext::blockSize, SlabContext::chunkSize, SlabContext::chunksPerBlock, SlabContext::curBlocklistIndex, dclist_count(), dclist_push_head(), dlist_delete_from(), dlist_is_empty(), dlist_push_head(), elog, SlabContext::emptyblocks, fb(), free, SlabBlock::freehead, SlabContext::fullChunkSize, SlabContext::header, MemoryContextData::mem_allocated, MemoryChunkGetBlock(), MemoryContextData::name, SlabBlock::nfree, SlabBlock::node, PointerGetMemoryChunk, SlabBlock::slab, Slab_CHUNKHDRSZ, SLAB_MAXIMUM_EMPTY_BLOCKS, SlabBlockIsValid, SlabBlocklistIndex(), SlabFindNextBlockListIndex(), unlikely, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MEMPOOL_FREE, and WARNING.

◆ SlabGetChunkContext()

MemoryContext SlabGetChunkContext ( void pointer)
extern

Definition at line 895 of file slab.c.

896{
898 SlabBlock *block;
899
900 /* Allow access to the chunk header. */
902
903 block = MemoryChunkGetBlock(chunk);
904
905 /* Disallow access to the chunk header. */
907
908 Assert(SlabBlockIsValid(block));
909
910 return &block->slab->header;
911}

References Assert, fb(), SlabContext::header, MemoryChunkGetBlock(), PointerGetMemoryChunk, SlabBlock::slab, Slab_CHUNKHDRSZ, SlabBlockIsValid, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ SlabGetChunkSpace()

Size SlabGetChunkSpace ( void pointer)
extern

Definition at line 919 of file slab.c.

920{
922 SlabBlock *block;
923 SlabContext *slab;
924
925 /* Allow access to the chunk header. */
927
928 block = MemoryChunkGetBlock(chunk);
929
930 /* Disallow access to the chunk header. */
932
933 Assert(SlabBlockIsValid(block));
934 slab = block->slab;
935
936 return slab->fullChunkSize;
937}

References Assert, fb(), SlabContext::fullChunkSize, MemoryChunkGetBlock(), PointerGetMemoryChunk, SlabBlock::slab, Slab_CHUNKHDRSZ, SlabBlockIsValid, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ SlabIsEmpty()

bool SlabIsEmpty ( MemoryContext  context)
extern

Definition at line 944 of file slab.c.

945{
946 Assert(SlabIsValid((SlabContext *) context));
947
948 return (context->mem_allocated == 0);
949}

References Assert, MemoryContextData::mem_allocated, and SlabIsValid.

◆ SlabRealloc()

void * SlabRealloc ( void pointer,
Size  size,
int  flags 
)
extern

Definition at line 858 of file slab.c.

859{
861 SlabBlock *block;
862 SlabContext *slab;
863
864 /* Allow access to the chunk header. */
866
867 block = MemoryChunkGetBlock(chunk);
868
869 /* Disallow access to the chunk header. */
871
872 /*
873 * Try to verify that we have a sane block pointer: the block header
874 * should reference a slab context. (We use a test-and-elog, not just
875 * Assert, because it seems highly likely that we're here in error in the
876 * first place.)
877 */
878 if (!SlabBlockIsValid(block))
879 elog(ERROR, "could not find block containing chunk %p", chunk);
880 slab = block->slab;
881
882 /* can't do actual realloc with slab, but let's try to be gentle */
883 if (size == slab->chunkSize)
884 return pointer;
885
886 elog(ERROR, "slab allocator does not support realloc()");
887 return NULL; /* keep compiler quiet */
888}

References SlabContext::chunkSize, elog, ERROR, fb(), MemoryChunkGetBlock(), PointerGetMemoryChunk, SlabBlock::slab, Slab_CHUNKHDRSZ, SlabBlockIsValid, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ SlabReset()

void SlabReset ( MemoryContext  context)
extern

Definition at line 436 of file slab.c.

437{
438 SlabContext *slab = (SlabContext *) context;
440 int i;
441
442 Assert(SlabIsValid(slab));
443
444#ifdef MEMORY_CONTEXT_CHECKING
445 /* Check for corruption and leaks before freeing */
446 SlabCheck(context);
447#endif
448
449 /* release any retained empty blocks */
451 {
452 SlabBlock *block = dlist_container(SlabBlock, node, miter.cur);
453
455
456#ifdef CLOBBER_FREED_MEMORY
457 wipe_mem(block, slab->blockSize);
458#endif
459
460 /* As in aset.c, free block-header vchunks explicitly */
461 VALGRIND_MEMPOOL_FREE(slab, block);
462
463 free(block);
464 context->mem_allocated -= slab->blockSize;
465 }
466
467 /* walk over blocklist and free the blocks */
468 for (i = 0; i < SLAB_BLOCKLIST_COUNT; i++)
469 {
471 {
472 SlabBlock *block = dlist_container(SlabBlock, node, miter.cur);
473
474 dlist_delete(miter.cur);
475
476#ifdef CLOBBER_FREED_MEMORY
477 wipe_mem(block, slab->blockSize);
478#endif
479
480 /* As in aset.c, free block-header vchunks explicitly */
481 VALGRIND_MEMPOOL_FREE(slab, block);
482
483 free(block);
484 context->mem_allocated -= slab->blockSize;
485 }
486 }
487
488 /*
489 * Instruct Valgrind to throw away all the vchunks associated with this
490 * context, except for the one covering the SlabContext. This gets rid of
491 * the vchunks for whatever user data is getting discarded by the context
492 * reset.
493 */
494 VALGRIND_MEMPOOL_TRIM(slab, slab, sizeof(SlabContext));
495
496 slab->curBlocklistIndex = 0;
497
498 Assert(context->mem_allocated == 0);
499}
static void dlist_delete(dlist_node *node)
Definition ilist.h:405
static void dclist_delete_from(dclist_head *head, dlist_node *node)
Definition ilist.h:763
#define dclist_foreach_modify(iter, lhead)
Definition ilist.h:973
int i
Definition isn.c:77
#define SLAB_BLOCKLIST_COUNT
Definition slab.c:95

References Assert, SlabContext::blocklist, SlabContext::blockSize, SlabContext::curBlocklistIndex, dclist_delete_from(), dclist_foreach_modify, dlist_container, dlist_delete(), dlist_foreach_modify, SlabContext::emptyblocks, fb(), free, i, MemoryContextData::mem_allocated, SLAB_BLOCKLIST_COUNT, SlabIsValid, VALGRIND_MEMPOOL_FREE, and VALGRIND_MEMPOOL_TRIM.

Referenced by SlabDelete().

◆ SlabStats()

void SlabStats ( MemoryContext  context,
MemoryStatsPrintFunc  printfunc,
void passthru,
MemoryContextCounters totals,
bool  print_to_stderr 
)
extern

Definition at line 961 of file slab.c.

965{
966 SlabContext *slab = (SlabContext *) context;
967 Size nblocks = 0;
968 Size freechunks = 0;
969 Size totalspace;
970 Size freespace = 0;
971 int i;
972
973 Assert(SlabIsValid(slab));
974
975 /* Include context header in totalspace */
976 totalspace = Slab_CONTEXT_HDRSZ(slab->chunksPerBlock);
977
978 /* Add the space consumed by blocks in the emptyblocks list */
979 totalspace += dclist_count(&slab->emptyblocks) * slab->blockSize;
980
981 for (i = 0; i < SLAB_BLOCKLIST_COUNT; i++)
982 {
983 dlist_iter iter;
984
985 dlist_foreach(iter, &slab->blocklist[i])
986 {
987 SlabBlock *block = dlist_container(SlabBlock, node, iter.cur);
988
989 nblocks++;
990 totalspace += slab->blockSize;
991 freespace += slab->fullChunkSize * block->nfree;
992 freechunks += block->nfree;
993 }
994 }
995
996 if (printfunc)
997 {
998 char stats_string[200];
999
1000 /* XXX should we include free chunks on empty blocks? */
1002 "%zu total in %zu blocks; %u empty blocks; %zu free (%zu chunks); %zu used",
1003 totalspace, nblocks, dclist_count(&slab->emptyblocks),
1004 freespace, freechunks, totalspace - freespace);
1006 }
1007
1008 if (totals)
1009 {
1010 totals->nblocks += nblocks;
1011 totals->freechunks += freechunks;
1012 totals->totalspace += totalspace;
1013 totals->freespace += freespace;
1014 }
1015}
#define Slab_CONTEXT_HDRSZ(chunksPerBlock)
Definition slab.c:88

References Assert, SlabContext::blocklist, SlabContext::blockSize, SlabContext::chunksPerBlock, dlist_iter::cur, dclist_count(), dlist_container, dlist_foreach, SlabContext::emptyblocks, fb(), SlabContext::fullChunkSize, i, SlabBlock::nfree, SLAB_BLOCKLIST_COUNT, Slab_CONTEXT_HDRSZ, SlabIsValid, and snprintf.