PostgreSQL Source Code git master
Loading...
Searching...
No Matches
aset.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * aset.c
4 * Allocation set definitions.
5 *
6 * AllocSet is our standard implementation of the abstract MemoryContext
7 * type.
8 *
9 *
10 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
11 * Portions Copyright (c) 1994, Regents of the University of California
12 *
13 * IDENTIFICATION
14 * src/backend/utils/mmgr/aset.c
15 *
16 * NOTE:
17 * This is a new (Feb. 05, 1999) implementation of the allocation set
18 * routines. AllocSet...() does not use OrderedSet...() any more.
19 * Instead it manages allocations in a block pool by itself, combining
20 * many small allocations in a few bigger blocks. AllocSetFree() normally
21 * doesn't free() memory really. It just add's the free'd area to some
22 * list for later reuse by AllocSetAlloc(). All memory blocks are free()'d
23 * at once on AllocSetReset(), which happens when the memory context gets
24 * destroyed.
25 * Jan Wieck
26 *
27 * Performance improvement from Tom Lane, 8/99: for extremely large request
28 * sizes, we do want to be able to give the memory back to free() as soon
29 * as it is pfree()'d. Otherwise we risk tying up a lot of memory in
30 * freelist entries that might never be usable. This is specially needed
31 * when the caller is repeatedly repalloc()'ing a block bigger and bigger;
32 * the previous instances of the block were guaranteed to be wasted until
33 * AllocSetReset() under the old way.
34 *
35 * Further improvement 12/00: as the code stood, request sizes in the
36 * midrange between "small" and "large" were handled very inefficiently,
37 * because any sufficiently large free chunk would be used to satisfy a
38 * request, even if it was much larger than necessary. This led to more
39 * and more wasted space in allocated chunks over time. To fix, get rid
40 * of the midrange behavior: we now handle only "small" power-of-2-size
41 * chunks as chunks. Anything "large" is passed off to malloc(). Change
42 * the number of freelists to change the small/large boundary.
43 *
44 *-------------------------------------------------------------------------
45 */
46
47#include "postgres.h"
48
49#include "port/pg_bitutils.h"
50#include "utils/memdebug.h"
51#include "utils/memutils.h"
54
55/*--------------------
56 * Chunk freelist k holds chunks of size 1 << (k + ALLOC_MINBITS),
57 * for k = 0 .. ALLOCSET_NUM_FREELISTS-1.
58 *
59 * Note that all chunks in the freelists have power-of-2 sizes. This
60 * improves recyclability: we may waste some space, but the wasted space
61 * should stay pretty constant as requests are made and released.
62 *
63 * A request too large for the last freelist is handled by allocating a
64 * dedicated block from malloc(). The block still has a block header and
65 * chunk header, but when the chunk is freed we'll return the whole block
66 * to malloc(), not put it on our freelists.
67 *
68 * CAUTION: ALLOC_MINBITS must be large enough so that
69 * 1<<ALLOC_MINBITS is at least MAXALIGN,
70 * or we may fail to align the smallest chunks adequately.
71 * 8-byte alignment is enough on all currently known machines. This 8-byte
72 * minimum also allows us to store a pointer to the next freelist item within
73 * the chunk of memory itself.
74 *
75 * With the current parameters, request sizes up to 8K are treated as chunks,
76 * larger requests go into dedicated blocks. Change ALLOCSET_NUM_FREELISTS
77 * to adjust the boundary point; and adjust ALLOCSET_SEPARATE_THRESHOLD in
78 * memutils.h to agree. (Note: in contexts with small maxBlockSize, we may
79 * set the allocChunkLimit to less than 8K, so as to avoid space wastage.)
80 *--------------------
81 */
82
83#define ALLOC_MINBITS 3 /* smallest chunk size is 8 bytes */
84#define ALLOCSET_NUM_FREELISTS 11
85#define ALLOC_CHUNK_LIMIT (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
86/* Size of largest chunk that we use a fixed size for */
87#define ALLOC_CHUNK_FRACTION 4
88/* We allow chunks to be at most 1/4 of maxBlockSize (less overhead) */
89
90/* ALLOC_CHUNK_LIMIT must be equal to ALLOCSET_SEPARATE_THRESHOLD */
92 "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD");
93
94/*--------------------
95 * The first block allocated for an allocset has size initBlockSize.
96 * Each time we have to allocate another block, we double the block size
97 * (if possible, and without exceeding maxBlockSize), so as to reduce
98 * the bookkeeping load on malloc().
99 *
100 * Blocks allocated to hold oversize chunks do not follow this rule, however;
101 * they are just however big they need to be to hold that single chunk.
102 *
103 * Also, if a minContextSize is specified, the first block has that size,
104 * and then initBlockSize is used for the next one.
105 *--------------------
106 */
107
108#define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData))
109#define ALLOC_CHUNKHDRSZ sizeof(MemoryChunk)
110#define FIRST_BLOCKHDRSZ (MAXALIGN(sizeof(AllocSetContext)) + \
111 ALLOC_BLOCKHDRSZ)
112
113typedef struct AllocBlockData *AllocBlock; /* forward reference */
114
115/*
116 * AllocPointer
117 * Aligned pointer which may be a member of an allocation set.
118 */
119typedef void *AllocPointer;
120
121/*
122 * AllocFreeListLink
123 * When pfreeing memory, if we maintain a freelist for the given chunk's
124 * size then we use a AllocFreeListLink to point to the current item in
125 * the AllocSetContext's freelist and then set the given freelist element
126 * to point to the chunk being freed.
127 */
132
133/*
134 * Obtain a AllocFreeListLink for the given chunk. Allocation sizes are
135 * always at least sizeof(AllocFreeListLink), so we reuse the pointer's memory
136 * itself to store the freelist link.
137 */
138#define GetFreeListLink(chkptr) \
139 (AllocFreeListLink *) ((char *) (chkptr) + ALLOC_CHUNKHDRSZ)
140
141/* Validate a freelist index retrieved from a chunk header */
142#define FreeListIdxIsValid(fidx) \
143 ((fidx) >= 0 && (fidx) < ALLOCSET_NUM_FREELISTS)
144
145/* Determine the size of the chunk based on the freelist index */
146#define GetChunkSizeFromFreeListIdx(fidx) \
147 ((((Size) 1) << ALLOC_MINBITS) << (fidx))
148
149/*
150 * AllocSetContext is our standard implementation of MemoryContext.
151 *
152 * Note: header.isReset means there is nothing for AllocSetReset to do.
153 * This is different from the aset being physically empty (empty blocks list)
154 * because we will still have a keeper block. It's also different from the set
155 * being logically empty, because we don't attempt to detect pfree'ing the
156 * last active chunk.
157 */
158typedef struct AllocSetContext
159{
160 MemoryContextData header; /* Standard memory-context fields */
161 /* Info about storage allocated in this context: */
162 AllocBlock blocks; /* head of list of blocks in this set */
164 /* Allocation parameters for this context: */
165 uint32 initBlockSize; /* initial block size */
166 uint32 maxBlockSize; /* maximum block size */
167 uint32 nextBlockSize; /* next block size to allocate */
168 uint32 allocChunkLimit; /* effective chunk size limit */
169 /* freelist this context could be put in, or -1 if not a candidate: */
170 int freeListIndex; /* index in context_freelists[], or -1 */
172
174
175/*
176 * AllocBlock
177 * An AllocBlock is the unit of memory that is obtained by aset.c
178 * from malloc(). It contains one or more MemoryChunks, which are
179 * the units requested by palloc() and freed by pfree(). MemoryChunks
180 * cannot be returned to malloc() individually, instead they are put
181 * on freelists by pfree() and re-used by the next palloc() that has
182 * a matching request size.
183 *
184 * AllocBlockData is the header data for a block --- the usable space
185 * within the block begins at the next alignment boundary.
186 */
187typedef struct AllocBlockData
188{
189 AllocSet aset; /* aset that owns this block */
190 AllocBlock prev; /* prev block in aset's blocks list, if any */
191 AllocBlock next; /* next block in aset's blocks list, if any */
192 char *freeptr; /* start of free space in this block */
193 char *endptr; /* end of space in this block */
195
196/*
197 * AllocSetIsValid
198 * True iff set is valid allocation set.
199 */
200#define AllocSetIsValid(set) \
201 ((set) && IsA(set, AllocSetContext))
202
203/*
204 * AllocBlockIsValid
205 * True iff block is valid block of allocation set.
206 */
207#define AllocBlockIsValid(block) \
208 ((block) && AllocSetIsValid((block)->aset))
209
210/*
211 * We always store external chunks on a dedicated block. This makes fetching
212 * the block from an external chunk easy since it's always the first and only
213 * chunk on the block.
214 */
215#define ExternalChunkGetBlock(chunk) \
216 (AllocBlock) ((char *) chunk - ALLOC_BLOCKHDRSZ)
217
218/*
219 * Rather than repeatedly creating and deleting memory contexts, we keep some
220 * freed contexts in freelists so that we can hand them out again with little
221 * work. Before putting a context in a freelist, we reset it so that it has
222 * only its initial malloc chunk and no others. To be a candidate for a
223 * freelist, a context must have the same minContextSize/initBlockSize as
224 * other contexts in the list; but its maxBlockSize is irrelevant since that
225 * doesn't affect the size of the initial chunk.
226 *
227 * We currently provide one freelist for ALLOCSET_DEFAULT_SIZES contexts
228 * and one for ALLOCSET_SMALL_SIZES contexts; the latter works for
229 * ALLOCSET_START_SMALL_SIZES too, since only the maxBlockSize differs.
230 *
231 * Ordinarily, we re-use freelist contexts in last-in-first-out order, in
232 * hopes of improving locality of reference. But if there get to be too
233 * many contexts in the list, we'd prefer to drop the most-recently-created
234 * contexts in hopes of keeping the process memory map compact.
235 * We approximate that by simply deleting all existing entries when the list
236 * overflows, on the assumption that queries that allocate a lot of contexts
237 * will probably free them in more or less reverse order of allocation.
238 *
239 * Contexts in a freelist are chained via their nextchild pointers.
240 */
241#define MAX_FREE_CONTEXTS 100 /* arbitrary limit on freelist length */
242
243/* Obtain the keeper block for an allocation set */
244#define KeeperBlock(set) \
245 ((AllocBlock) (((char *) set) + MAXALIGN(sizeof(AllocSetContext))))
246
247/* Check if the block is the keeper block of the given allocation set */
248#define IsKeeperBlock(set, block) ((block) == (KeeperBlock(set)))
249
250typedef struct AllocSetFreeList
251{
252 int num_free; /* current list length */
253 AllocSetContext *first_free; /* list header */
255
256/* context_freelists[0] is for default params, [1] for small params */
258{
259 {
260 0, NULL
261 },
262 {
263 0, NULL
264 }
265};
266
267
268/* ----------
269 * AllocSetFreeIndex -
270 *
271 * Depending on the size of an allocation compute which freechunk
272 * list of the alloc set it belongs to. Caller must have verified
273 * that size <= ALLOC_CHUNK_LIMIT.
274 * ----------
275 */
276static inline int
278{
279 int idx;
280
281 if (size > (1 << ALLOC_MINBITS))
282 {
283 /*----------
284 * At this point we must compute ceil(log2(size >> ALLOC_MINBITS)).
285 * This is the same as
286 * pg_leftmost_one_pos32((size - 1) >> ALLOC_MINBITS) + 1
287 * or equivalently
288 * pg_leftmost_one_pos32(size - 1) - ALLOC_MINBITS + 1
289 *
290 * However, for platforms without intrinsic support, we duplicate the
291 * logic here, allowing an additional optimization. It's reasonable
292 * to assume that ALLOC_CHUNK_LIMIT fits in 16 bits, so we can unroll
293 * the byte-at-a-time loop in pg_leftmost_one_pos32 and just handle
294 * the last two bytes.
295 *
296 * Yes, this function is enough of a hot-spot to make it worth this
297 * much trouble.
298 *----------
299 */
300#ifdef HAVE_BITSCAN_REVERSE
301 idx = pg_leftmost_one_pos32((uint32) size - 1) - ALLOC_MINBITS + 1;
302#else
303 uint32 t,
304 tsize;
305
306 /* Statically assert that we only have a 16-bit input value. */
308 "ALLOC_CHUNK_LIMIT must be less than 64kB");
309
310 tsize = size - 1;
311 t = tsize >> 8;
313 idx -= ALLOC_MINBITS - 1;
314#endif
315
317 }
318 else
319 idx = 0;
320
321 return idx;
322}
323
324
325/*
326 * Public routines
327 */
328
329
330/*
331 * AllocSetContextCreateInternal
332 * Create a new AllocSet context.
333 *
334 * parent: parent context, or NULL if top-level context
335 * name: name of context (must be statically allocated)
336 * minContextSize: minimum context size
337 * initBlockSize: initial allocation block size
338 * maxBlockSize: maximum allocation block size
339 *
340 * Most callers should abstract the context size parameters using a macro
341 * such as ALLOCSET_DEFAULT_SIZES.
342 *
343 * Note: don't call this directly; go through the wrapper macro
344 * AllocSetContextCreate.
345 */
348 const char *name,
350 Size initBlockSize,
351 Size maxBlockSize)
352{
353 int freeListIndex;
355 AllocSet set;
356 AllocBlock block;
357
358 /* ensure MemoryChunk's size is properly maxaligned */
360 "sizeof(MemoryChunk) is not maxaligned");
361 /* check we have enough space to store the freelist link */
363 "sizeof(AllocFreeListLink) larger than minimum allocation size");
364
365 /*
366 * First, validate allocation parameters. Once these were regular runtime
367 * tests and elog's, but in practice Asserts seem sufficient because
368 * nobody varies their parameters at runtime. We somewhat arbitrarily
369 * enforce a minimum 1K block size. We restrict the maximum block size to
370 * MEMORYCHUNK_MAX_BLOCKOFFSET as MemoryChunks are limited to this in
371 * regards to addressing the offset between the chunk and the block that
372 * the chunk is stored on. We would be unable to store the offset between
373 * the chunk and block for any chunks that were beyond
374 * MEMORYCHUNK_MAX_BLOCKOFFSET bytes into the block if the block was to be
375 * larger than this.
376 */
377 Assert(initBlockSize == MAXALIGN(initBlockSize) &&
378 initBlockSize >= 1024);
379 Assert(maxBlockSize == MAXALIGN(maxBlockSize) &&
380 maxBlockSize >= initBlockSize &&
381 AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
382 Assert(minContextSize == 0 ||
384 minContextSize >= 1024 &&
385 minContextSize <= maxBlockSize));
386 Assert(maxBlockSize <= MEMORYCHUNK_MAX_BLOCKOFFSET);
387
388 /*
389 * Check whether the parameters match either available freelist. We do
390 * not need to demand a match of maxBlockSize.
391 */
393 initBlockSize == ALLOCSET_DEFAULT_INITSIZE)
394 freeListIndex = 0;
396 initBlockSize == ALLOCSET_SMALL_INITSIZE)
397 freeListIndex = 1;
398 else
399 freeListIndex = -1;
400
401 /*
402 * If a suitable freelist entry exists, just recycle that context.
403 */
404 if (freeListIndex >= 0)
405 {
406 AllocSetFreeList *freelist = &context_freelists[freeListIndex];
407
408 if (freelist->first_free != NULL)
409 {
410 /* Remove entry from freelist */
411 set = freelist->first_free;
412 freelist->first_free = (AllocSet) set->header.nextchild;
413 freelist->num_free--;
414
415 /* Update its maxBlockSize; everything else should be OK */
416 set->maxBlockSize = maxBlockSize;
417
418 /* Reinitialize its header, installing correct name and parent */
422 parent,
423 name);
424
425 ((MemoryContext) set)->mem_allocated =
426 KeeperBlock(set)->endptr - ((char *) set);
427
428 return (MemoryContext) set;
429 }
430 }
431
432 /* Determine size of initial block */
435 if (minContextSize != 0)
437 else
438 firstBlockSize = Max(firstBlockSize, initBlockSize);
439
440 /*
441 * Allocate the initial block. Unlike other aset.c blocks, it starts with
442 * the context header and its block header follows that.
443 */
445 if (set == NULL)
446 {
451 errmsg("out of memory"),
452 errdetail("Failed while creating memory context \"%s\".",
453 name)));
454 }
455
456 /*
457 * Avoid writing code that can fail between here and MemoryContextCreate;
458 * we'd leak the header/initial block if we ereport in this stretch.
459 */
460
461 /* Create a vpool associated with the context */
462 VALGRIND_CREATE_MEMPOOL(set, 0, false);
463
464 /*
465 * Create a vchunk covering both the AllocSetContext struct and the keeper
466 * block's header. (Perhaps it would be more sensible for these to be two
467 * separate vchunks, but doing that seems to tickle bugs in some versions
468 * of Valgrind.) We must have these vchunks, and also a vchunk for each
469 * subsequently-added block header, so that Valgrind considers the
470 * pointers within them while checking for leaked memory. Note that
471 * Valgrind doesn't distinguish between these vchunks and those created by
472 * mcxt.c for the user-accessible-data chunks we allocate.
473 */
475
476 /* Fill in the initial block's block header */
477 block = KeeperBlock(set);
478 block->aset = set;
479 block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
480 block->endptr = ((char *) set) + firstBlockSize;
481 block->prev = NULL;
482 block->next = NULL;
483
484 /* Mark unallocated space NOACCESS; leave the block header alone. */
485 VALGRIND_MAKE_MEM_NOACCESS(block->freeptr, block->endptr - block->freeptr);
486
487 /* Remember block as part of block list */
488 set->blocks = block;
489
490 /* Finish filling in aset-specific parts of the context header */
491 MemSetAligned(set->freelist, 0, sizeof(set->freelist));
492
493 set->initBlockSize = (uint32) initBlockSize;
494 set->maxBlockSize = (uint32) maxBlockSize;
495 set->nextBlockSize = (uint32) initBlockSize;
496 set->freeListIndex = freeListIndex;
497
498 /*
499 * Compute the allocation chunk size limit for this context. It can't be
500 * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
501 * If maxBlockSize is small then requests exceeding the maxBlockSize, or
502 * even a significant fraction of it, should be treated as large chunks
503 * too. For the typical case of maxBlockSize a power of 2, the chunk size
504 * limit will be at most 1/8th maxBlockSize, so that given a stream of
505 * requests that are all the maximum chunk size we will waste at most
506 * 1/8th of the allocated space.
507 *
508 * Determine the maximum size that a chunk can be before we allocate an
509 * entire AllocBlock dedicated for that chunk. We set the absolute limit
510 * of that size as ALLOC_CHUNK_LIMIT but we reduce it further so that we
511 * can fit about ALLOC_CHUNK_FRACTION chunks this size on a maximally
512 * sized block. (We opt to keep allocChunkLimit a power-of-2 value
513 * primarily for legacy reasons rather than calculating it so that exactly
514 * ALLOC_CHUNK_FRACTION chunks fit on a maximally sized block.)
515 */
517 while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
518 (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
519 set->allocChunkLimit >>= 1;
520
521 /* Finally, do the type-independent part of context creation */
525 parent,
526 name);
527
528 ((MemoryContext) set)->mem_allocated = firstBlockSize;
529
530 return (MemoryContext) set;
531}
532
533/*
534 * AllocSetReset
535 * Frees all memory which is allocated in the given set.
536 *
537 * Actually, this routine has some discretion about what to do.
538 * It should mark all allocated chunks freed, but it need not necessarily
539 * give back all the resources the set owns. Our actual implementation is
540 * that we give back all but the "keeper" block (which we must keep, since
541 * it shares a malloc chunk with the context header). In this way, we don't
542 * thrash malloc() when a context is repeatedly reset after small allocations,
543 * which is typical behavior for per-tuple contexts.
544 */
545void
547{
548 AllocSet set = (AllocSet) context;
549 AllocBlock block;
551
553
554#ifdef MEMORY_CONTEXT_CHECKING
555 /* Check for corruption and leaks before freeing */
556 AllocSetCheck(context);
557#endif
558
559 /* Remember keeper block size for Assert below */
560 keepersize = KeeperBlock(set)->endptr - ((char *) set);
561
562 /* Clear chunk freelists */
563 MemSetAligned(set->freelist, 0, sizeof(set->freelist));
564
565 block = set->blocks;
566
567 /* New blocks list will be just the keeper block */
568 set->blocks = KeeperBlock(set);
569
570 while (block != NULL)
571 {
572 AllocBlock next = block->next;
573
574 if (IsKeeperBlock(set, block))
575 {
576 /* Reset the block, but don't return it to malloc */
577 char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
578
579#ifdef CLOBBER_FREED_MEMORY
581#else
582 /* wipe_mem() would have done this */
584#endif
585 block->freeptr = datastart;
586 block->prev = NULL;
587 block->next = NULL;
588 }
589 else
590 {
591 /* Normal case, release the block */
592 context->mem_allocated -= block->endptr - ((char *) block);
593
594#ifdef CLOBBER_FREED_MEMORY
595 wipe_mem(block, block->freeptr - ((char *) block));
596#endif
597
598 /*
599 * We need to free the block header's vchunk explicitly, although
600 * the user-data vchunks within will go away in the TRIM below.
601 * Otherwise Valgrind complains about leaked allocations.
602 */
603 VALGRIND_MEMPOOL_FREE(set, block);
604
605 free(block);
606 }
607 block = next;
608 }
609
610 Assert(context->mem_allocated == keepersize);
611
612 /*
613 * Instruct Valgrind to throw away all the vchunks associated with this
614 * context, except for the one covering the AllocSetContext and
615 * keeper-block header. This gets rid of the vchunks for whatever user
616 * data is getting discarded by the context reset.
617 */
619
620 /* Reset block size allocation sequence, too */
621 set->nextBlockSize = set->initBlockSize;
622}
623
624/*
625 * AllocSetDelete
626 * Frees all memory which is allocated in the given set,
627 * in preparation for deletion of the set.
628 *
629 * Unlike AllocSetReset, this *must* free all resources of the set.
630 */
631void
633{
634 AllocSet set = (AllocSet) context;
635 AllocBlock block = set->blocks;
637
639
640#ifdef MEMORY_CONTEXT_CHECKING
641 /* Check for corruption and leaks before freeing */
642 AllocSetCheck(context);
643#endif
644
645 /* Remember keeper block size for Assert below */
646 keepersize = KeeperBlock(set)->endptr - ((char *) set);
647
648 /*
649 * If the context is a candidate for a freelist, put it into that freelist
650 * instead of destroying it.
651 */
652 if (set->freeListIndex >= 0)
653 {
655
656 /*
657 * Reset the context, if it needs it, so that we aren't hanging on to
658 * more than the initial malloc chunk.
659 */
660 if (!context->isReset)
661 MemoryContextResetOnly(context);
662
663 /*
664 * If the freelist is full, just discard what's already in it. See
665 * comments with context_freelists[].
666 */
667 if (freelist->num_free >= MAX_FREE_CONTEXTS)
668 {
669 while (freelist->first_free != NULL)
670 {
671 AllocSetContext *oldset = freelist->first_free;
672
674 freelist->num_free--;
675
676 /* Destroy the context's vpool --- see notes below */
678
679 /* All that remains is to free the header/initial block */
680 free(oldset);
681 }
682 Assert(freelist->num_free == 0);
683 }
684
685 /* Now add the just-deleted context to the freelist. */
686 set->header.nextchild = (MemoryContext) freelist->first_free;
687 freelist->first_free = set;
688 freelist->num_free++;
689
690 return;
691 }
692
693 /* Free all blocks, except the keeper which is part of context header */
694 while (block != NULL)
695 {
696 AllocBlock next = block->next;
697
698 if (!IsKeeperBlock(set, block))
699 context->mem_allocated -= block->endptr - ((char *) block);
700
701#ifdef CLOBBER_FREED_MEMORY
702 wipe_mem(block, block->freeptr - ((char *) block));
703#endif
704
705 if (!IsKeeperBlock(set, block))
706 {
707 /* As in AllocSetReset, free block-header vchunks explicitly */
708 VALGRIND_MEMPOOL_FREE(set, block);
709 free(block);
710 }
711
712 block = next;
713 }
714
715 Assert(context->mem_allocated == keepersize);
716
717 /*
718 * Destroy the vpool. We don't seem to need to explicitly free the
719 * initial block's header vchunk, nor any user-data vchunks that Valgrind
720 * still knows about; they'll all go away automatically.
721 */
723
724 /* Finally, free the context header, including the keeper block */
725 free(set);
726}
727
728/*
729 * Helper for AllocSetAlloc() that allocates an entire block for the chunk.
730 *
731 * AllocSetAlloc()'s comment explains why this is separate.
732 */
734static void *
735AllocSetAllocLarge(MemoryContext context, Size size, int flags)
736{
737 AllocSet set = (AllocSet) context;
738 AllocBlock block;
740 Size chunk_size;
741 Size blksize;
742
743 /* validate 'size' is within the limits for the given 'flags' */
744 MemoryContextCheckSize(context, size, flags);
745
746#ifdef MEMORY_CONTEXT_CHECKING
747 /* ensure there's always space for the sentinel byte */
748 chunk_size = MAXALIGN(size + 1);
749#else
750 chunk_size = MAXALIGN(size);
751#endif
752
753 blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
754 block = (AllocBlock) malloc(blksize);
755 if (block == NULL)
756 return MemoryContextAllocationFailure(context, size, flags);
757
758 /* Make a vchunk covering the new block's header */
760
761 context->mem_allocated += blksize;
762
763 block->aset = set;
764 block->freeptr = block->endptr = ((char *) block) + blksize;
765
766 chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
767
768 /* mark the MemoryChunk as externally managed */
770
771#ifdef MEMORY_CONTEXT_CHECKING
772 chunk->requested_size = size;
773 /* set mark to catch clobber of "unused" space */
774 Assert(size < chunk_size);
776#endif
777#ifdef RANDOMIZE_ALLOCATED_MEMORY
778 /* fill the allocated space with junk */
780#endif
781
782 /*
783 * Stick the new block underneath the active allocation block, if any, so
784 * that we don't lose the use of the space remaining therein.
785 */
786 if (set->blocks != NULL)
787 {
788 block->prev = set->blocks;
789 block->next = set->blocks->next;
790 if (block->next)
791 block->next->prev = block;
792 set->blocks->next = block;
793 }
794 else
795 {
796 block->prev = NULL;
797 block->next = NULL;
798 set->blocks = block;
799 }
800
801 /* Ensure any padding bytes are marked NOACCESS. */
803 chunk_size - size);
804
805 /* Disallow access to the chunk header. */
807
809}
810
811/*
812 * Small helper for allocating a new chunk from a chunk, to avoid duplicating
813 * the code between AllocSetAlloc() and AllocSetAllocFromNewBlock().
814 */
815static inline void *
817 Size size, Size chunk_size, int fidx)
818{
820
821 chunk = (MemoryChunk *) (block->freeptr);
822
823 /* Prepare to initialize the chunk header. */
825
826 block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
827 Assert(block->freeptr <= block->endptr);
828
829 /* store the free list index in the value field */
831
832#ifdef MEMORY_CONTEXT_CHECKING
833 chunk->requested_size = size;
834 /* set mark to catch clobber of "unused" space */
835 if (size < chunk_size)
837#endif
838#ifdef RANDOMIZE_ALLOCATED_MEMORY
839 /* fill the allocated space with junk */
841#endif
842
843 /* Ensure any padding bytes are marked NOACCESS. */
845 chunk_size - size);
846
847 /* Disallow access to the chunk header. */
849
851}
852
853/*
854 * Helper for AllocSetAlloc() that allocates a new block and returns a chunk
855 * allocated from it.
856 *
857 * AllocSetAlloc()'s comment explains why this is separate.
858 */
860static void *
862 int fidx)
863{
864 AllocSet set = (AllocSet) context;
865 AllocBlock block;
867 Size blksize;
869 Size chunk_size;
870
871 /* due to the keeper block set->blocks should always be valid */
872 Assert(set->blocks != NULL);
873 block = set->blocks;
874 availspace = block->endptr - block->freeptr;
875
876 /*
877 * The existing active (top) block does not have enough room for the
878 * requested allocation, but it might still have a useful amount of space
879 * in it. Once we push it down in the block list, we'll never try to
880 * allocate more space from it. So, before we do that, carve up its free
881 * space into chunks that we can put on the set's freelists.
882 *
883 * Because we can only get here when there's less than ALLOC_CHUNK_LIMIT
884 * left in the block, this loop cannot iterate more than
885 * ALLOCSET_NUM_FREELISTS-1 times.
886 */
887 while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
888 {
893
894 /*
895 * In most cases, we'll get back the index of the next larger freelist
896 * than the one we need to put this chunk on. The exception is when
897 * availchunk is exactly a power of 2.
898 */
900 {
901 a_fidx--;
902 Assert(a_fidx >= 0);
904 }
905
906 chunk = (MemoryChunk *) (block->freeptr);
907
908 /* Prepare to initialize the chunk header. */
910 block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
912
913 /* store the freelist index in the value field */
915#ifdef MEMORY_CONTEXT_CHECKING
916 chunk->requested_size = InvalidAllocSize; /* mark it free */
917#endif
918 /* push this chunk onto the free list */
920
922 link->next = set->freelist[a_fidx];
924
925 set->freelist[a_fidx] = chunk;
926 }
927
928 /*
929 * The first such block has size initBlockSize, and we double the space in
930 * each succeeding block, but not more than maxBlockSize.
931 */
932 blksize = set->nextBlockSize;
933 set->nextBlockSize <<= 1;
934 if (set->nextBlockSize > set->maxBlockSize)
935 set->nextBlockSize = set->maxBlockSize;
936
937 /* Choose the actual chunk size to allocate */
938 chunk_size = GetChunkSizeFromFreeListIdx(fidx);
939 Assert(chunk_size >= size);
940
941 /*
942 * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
943 * space... but try to keep it a power of 2.
944 */
946 while (blksize < required_size)
947 blksize <<= 1;
948
949 /* Try to allocate it */
950 block = (AllocBlock) malloc(blksize);
951
952 /*
953 * We could be asking for pretty big blocks here, so cope if malloc fails.
954 * But give up if there's less than 1 MB or so available...
955 */
956 while (block == NULL && blksize > 1024 * 1024)
957 {
958 blksize >>= 1;
959 if (blksize < required_size)
960 break;
961 block = (AllocBlock) malloc(blksize);
962 }
963
964 if (block == NULL)
965 return MemoryContextAllocationFailure(context, size, flags);
966
967 /* Make a vchunk covering the new block's header */
969
970 context->mem_allocated += blksize;
971
972 block->aset = set;
973 block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
974 block->endptr = ((char *) block) + blksize;
975
976 /* Mark unallocated space NOACCESS. */
978 blksize - ALLOC_BLOCKHDRSZ);
979
980 block->prev = NULL;
981 block->next = set->blocks;
982 if (block->next)
983 block->next->prev = block;
984 set->blocks = block;
985
986 return AllocSetAllocChunkFromBlock(context, block, size, chunk_size, fidx);
987}
988
989/*
990 * AllocSetAlloc
991 * Returns a pointer to allocated memory of given size or raises an ERROR
992 * on allocation failure, or returns NULL when flags contains
993 * MCXT_ALLOC_NO_OOM.
994 *
995 * No request may exceed:
996 * MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
997 * All callers use a much-lower limit.
998 *
999 * Note: when using valgrind, it doesn't matter how the returned allocation
1000 * is marked, as mcxt.c will set it to UNDEFINED. In some paths we will
1001 * return space that is marked NOACCESS - AllocSetRealloc has to beware!
1002 *
1003 * This function should only contain the most common code paths. Everything
1004 * else should be in pg_noinline helper functions, thus avoiding the overhead
1005 * of creating a stack frame for the common cases. Allocating memory is often
1006 * a bottleneck in many workloads, so avoiding stack frame setup is
1007 * worthwhile. Helper functions should always directly return the newly
1008 * allocated memory so that we can just return that address directly as a tail
1009 * call.
1010 */
1011void *
1012AllocSetAlloc(MemoryContext context, Size size, int flags)
1013{
1014 AllocSet set = (AllocSet) context;
1015 AllocBlock block;
1017 int fidx;
1018 Size chunk_size;
1020
1021 Assert(AllocSetIsValid(set));
1022
1023 /* due to the keeper block set->blocks should never be NULL */
1024 Assert(set->blocks != NULL);
1025
1026 /*
1027 * If requested size exceeds maximum for chunks we hand the request off to
1028 * AllocSetAllocLarge().
1029 */
1030 if (size > set->allocChunkLimit)
1031 return AllocSetAllocLarge(context, size, flags);
1032
1033 /*
1034 * Request is small enough to be treated as a chunk. Look in the
1035 * corresponding free list to see if there is a free chunk we could reuse.
1036 * If one is found, remove it from the free list, make it again a member
1037 * of the alloc set and return its data address.
1038 *
1039 * Note that we don't attempt to ensure there's space for the sentinel
1040 * byte here. We expect a large proportion of allocations to be for sizes
1041 * which are already a power of 2. If we were to always make space for a
1042 * sentinel byte in MEMORY_CONTEXT_CHECKING builds, then we'd end up
1043 * doubling the memory requirements for such allocations.
1044 */
1045 fidx = AllocSetFreeIndex(size);
1046 chunk = set->freelist[fidx];
1047 if (chunk != NULL)
1048 {
1050
1051 /* Allow access to the chunk header. */
1053
1055
1056 /* pop this chunk off the freelist */
1058 set->freelist[fidx] = link->next;
1060
1061#ifdef MEMORY_CONTEXT_CHECKING
1062 chunk->requested_size = size;
1063 /* set mark to catch clobber of "unused" space */
1066#endif
1067#ifdef RANDOMIZE_ALLOCATED_MEMORY
1068 /* fill the allocated space with junk */
1069 randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
1070#endif
1071
1072 /* Ensure any padding bytes are marked NOACCESS. */
1075
1076 /* Disallow access to the chunk header. */
1078
1080 }
1081
1082 /*
1083 * Choose the actual chunk size to allocate.
1084 */
1085 chunk_size = GetChunkSizeFromFreeListIdx(fidx);
1086 Assert(chunk_size >= size);
1087
1088 block = set->blocks;
1089 availspace = block->endptr - block->freeptr;
1090
1091 /*
1092 * If there is enough room in the active allocation block, we will put the
1093 * chunk into that block. Else must start a new one.
1094 */
1095 if (unlikely(availspace < (chunk_size + ALLOC_CHUNKHDRSZ)))
1096 return AllocSetAllocFromNewBlock(context, size, flags, fidx);
1097
1098 /* There's enough space on the current block, so allocate from that */
1099 return AllocSetAllocChunkFromBlock(context, block, size, chunk_size, fidx);
1100}
1101
1102/*
1103 * AllocSetFree
1104 * Frees allocated memory; memory is removed from the set.
1105 */
1106void
1107AllocSetFree(void *pointer)
1108{
1109 AllocSet set;
1111
1112 /* Allow access to the chunk header. */
1114
1116 {
1117 /* Release single-chunk block. */
1119
1120 /*
1121 * Try to verify that we have a sane block pointer: the block header
1122 * should reference an aset and the freeptr should match the endptr.
1123 */
1124 if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
1125 elog(ERROR, "could not find block containing chunk %p", chunk);
1126
1127 set = block->aset;
1128
1129#ifdef MEMORY_CONTEXT_CHECKING
1130 {
1131 /* Test for someone scribbling on unused space in chunk */
1132 Assert(chunk->requested_size < (block->endptr - (char *) pointer));
1133 if (!sentinel_ok(pointer, chunk->requested_size))
1134 elog(WARNING, "detected write past chunk end in %s %p",
1135 set->header.name, chunk);
1136 }
1137#endif
1138
1139 /* OK, remove block from aset's list and free it */
1140 if (block->prev)
1141 block->prev->next = block->next;
1142 else
1143 set->blocks = block->next;
1144 if (block->next)
1145 block->next->prev = block->prev;
1146
1147 set->header.mem_allocated -= block->endptr - ((char *) block);
1148
1149#ifdef CLOBBER_FREED_MEMORY
1150 wipe_mem(block, block->freeptr - ((char *) block));
1151#endif
1152
1153 /* As in AllocSetReset, free block-header vchunks explicitly */
1154 VALGRIND_MEMPOOL_FREE(set, block);
1155
1156 free(block);
1157 }
1158 else
1159 {
1161 int fidx;
1163
1164 /*
1165 * In this path, for speed reasons we just Assert that the referenced
1166 * block is good. We can also Assert that the value field is sane.
1167 * Future field experience may show that these Asserts had better
1168 * become regular runtime test-and-elog checks.
1169 */
1170 Assert(AllocBlockIsValid(block));
1171 set = block->aset;
1172
1176
1177#ifdef MEMORY_CONTEXT_CHECKING
1178 /* Test for someone scribbling on unused space in chunk */
1179 if (chunk->requested_size < GetChunkSizeFromFreeListIdx(fidx))
1180 if (!sentinel_ok(pointer, chunk->requested_size))
1181 elog(WARNING, "detected write past chunk end in %s %p",
1182 set->header.name, chunk);
1183#endif
1184
1185#ifdef CLOBBER_FREED_MEMORY
1187#endif
1188 /* push this chunk onto the top of the free list */
1190 link->next = set->freelist[fidx];
1192 set->freelist[fidx] = chunk;
1193
1194#ifdef MEMORY_CONTEXT_CHECKING
1195
1196 /*
1197 * Reset requested_size to InvalidAllocSize in chunks that are on free
1198 * list.
1199 */
1200 chunk->requested_size = InvalidAllocSize;
1201#endif
1202 }
1203}
1204
1205/*
1206 * AllocSetRealloc
1207 * Returns new pointer to allocated memory of given size or NULL if
1208 * request could not be completed; this memory is added to the set.
1209 * Memory associated with given pointer is copied into the new memory,
1210 * and the old memory is freed.
1211 *
1212 * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size. This
1213 * makes our Valgrind client requests less-precise, hazarding false negatives.
1214 * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old
1215 * request size.)
1216 */
1217void *
1218AllocSetRealloc(void *pointer, Size size, int flags)
1219{
1220 AllocBlock block;
1221 AllocSet set;
1224 int fidx;
1225
1226 /* Allow access to the chunk header. */
1228
1230 {
1231 /*
1232 * The chunk must have been allocated as a single-chunk block. Use
1233 * realloc() to make the containing block bigger, or smaller, with
1234 * minimum space wastage.
1235 */
1237 Size chksize;
1238 Size blksize;
1240
1242
1243 /*
1244 * Try to verify that we have a sane block pointer: the block header
1245 * should reference an aset and the freeptr should match the endptr.
1246 */
1247 if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
1248 elog(ERROR, "could not find block containing chunk %p", chunk);
1249
1250 set = block->aset;
1251
1252 /* only check size in paths where the limits could be hit */
1253 MemoryContextCheckSize((MemoryContext) set, size, flags);
1254
1255 oldchksize = block->endptr - (char *) pointer;
1256
1257#ifdef MEMORY_CONTEXT_CHECKING
1258 /* Test for someone scribbling on unused space in chunk */
1259 Assert(chunk->requested_size < oldchksize);
1260 if (!sentinel_ok(pointer, chunk->requested_size))
1261 elog(WARNING, "detected write past chunk end in %s %p",
1262 set->header.name, chunk);
1263#endif
1264
1265#ifdef MEMORY_CONTEXT_CHECKING
1266 /* ensure there's always space for the sentinel byte */
1267 chksize = MAXALIGN(size + 1);
1268#else
1269 chksize = MAXALIGN(size);
1270#endif
1271
1272 /* Do the realloc */
1274 oldblksize = block->endptr - ((char *) block);
1275
1276 newblock = (AllocBlock) realloc(block, blksize);
1277 if (newblock == NULL)
1278 {
1279 /* Disallow access to the chunk header. */
1281 return MemoryContextAllocationFailure(&set->header, size, flags);
1282 }
1283
1284 /*
1285 * Move the block-header vchunk explicitly. (mcxt.c will take care of
1286 * moving the vchunk for the user data.)
1287 */
1289 block = newblock;
1290
1291 /* updated separately, not to underflow when (oldblksize > blksize) */
1292 set->header.mem_allocated -= oldblksize;
1293 set->header.mem_allocated += blksize;
1294
1295 block->freeptr = block->endptr = ((char *) block) + blksize;
1296
1297 /* Update pointers since block has likely been moved */
1298 chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
1299 pointer = MemoryChunkGetPointer(chunk);
1300 if (block->prev)
1301 block->prev->next = block;
1302 else
1303 set->blocks = block;
1304 if (block->next)
1305 block->next->prev = block;
1306
1307#ifdef MEMORY_CONTEXT_CHECKING
1308#ifdef RANDOMIZE_ALLOCATED_MEMORY
1309
1310 /*
1311 * We can only randomize the extra space if we know the prior request.
1312 * When using Valgrind, randomize_mem() also marks memory UNDEFINED.
1313 */
1314 if (size > chunk->requested_size)
1315 randomize_mem((char *) pointer + chunk->requested_size,
1316 size - chunk->requested_size);
1317#else
1318
1319 /*
1320 * If this is an increase, realloc() will have marked any
1321 * newly-allocated part (from oldchksize to chksize) UNDEFINED, but we
1322 * also need to adjust trailing bytes from the old allocation (from
1323 * chunk->requested_size to oldchksize) as they are marked NOACCESS.
1324 * Make sure not to mark too many bytes in case chunk->requested_size
1325 * < size < oldchksize.
1326 */
1327#ifdef USE_VALGRIND
1328 if (Min(size, oldchksize) > chunk->requested_size)
1329 VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1330 Min(size, oldchksize) - chunk->requested_size);
1331#endif
1332#endif
1333
1334 chunk->requested_size = size;
1335 /* set mark to catch clobber of "unused" space */
1336 Assert(size < chksize);
1337 set_sentinel(pointer, size);
1338#else /* !MEMORY_CONTEXT_CHECKING */
1339
1340 /*
1341 * We may need to adjust marking of bytes from the old allocation as
1342 * some of them may be marked NOACCESS. We don't know how much of the
1343 * old chunk size was the requested size; it could have been as small
1344 * as one byte. We have to be conservative and just mark the entire
1345 * old portion DEFINED. Make sure not to mark memory beyond the new
1346 * allocation in case it's smaller than the old one.
1347 */
1348 VALGRIND_MAKE_MEM_DEFINED(pointer, Min(size, oldchksize));
1349#endif
1350
1351 /* Ensure any padding bytes are marked NOACCESS. */
1352 VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1353
1354 /* Disallow access to the chunk header. */
1356
1357 return pointer;
1358 }
1359
1360 block = MemoryChunkGetBlock(chunk);
1361
1362 /*
1363 * In this path, for speed reasons we just Assert that the referenced
1364 * block is good. We can also Assert that the value field is sane. Future
1365 * field experience may show that these Asserts had better become regular
1366 * runtime test-and-elog checks.
1367 */
1368 Assert(AllocBlockIsValid(block));
1369 set = block->aset;
1370
1374
1375#ifdef MEMORY_CONTEXT_CHECKING
1376 /* Test for someone scribbling on unused space in chunk */
1377 if (chunk->requested_size < oldchksize)
1378 if (!sentinel_ok(pointer, chunk->requested_size))
1379 elog(WARNING, "detected write past chunk end in %s %p",
1380 set->header.name, chunk);
1381#endif
1382
1383 /*
1384 * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1385 * allocated area already is >= the new size. (In particular, we will
1386 * fall out here if the requested size is a decrease.)
1387 */
1388 if (oldchksize >= size)
1389 {
1390#ifdef MEMORY_CONTEXT_CHECKING
1391 Size oldrequest = chunk->requested_size;
1392
1393#ifdef RANDOMIZE_ALLOCATED_MEMORY
1394 /* We can only fill the extra space if we know the prior request */
1395 if (size > oldrequest)
1396 randomize_mem((char *) pointer + oldrequest,
1397 size - oldrequest);
1398#endif
1399
1400 chunk->requested_size = size;
1401
1402 /*
1403 * If this is an increase, mark any newly-available part UNDEFINED.
1404 * Otherwise, mark the obsolete part NOACCESS.
1405 */
1406 if (size > oldrequest)
1407 VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1408 size - oldrequest);
1409 else
1410 VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1411 oldchksize - size);
1412
1413 /* set mark to catch clobber of "unused" space */
1414 if (size < oldchksize)
1415 set_sentinel(pointer, size);
1416#else /* !MEMORY_CONTEXT_CHECKING */
1417
1418 /*
1419 * We don't have the information to determine whether we're growing
1420 * the old request or shrinking it, so we conservatively mark the
1421 * entire new allocation DEFINED.
1422 */
1424 VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1425#endif
1426
1427 /* Disallow access to the chunk header. */
1429
1430 return pointer;
1431 }
1432 else
1433 {
1434 /*
1435 * Enlarge-a-small-chunk case. We just do this by brute force, ie,
1436 * allocate a new chunk and copy the data. Since we know the existing
1437 * data isn't huge, this won't involve any great memcpy expense, so
1438 * it's not worth being smarter. (At one time we tried to avoid
1439 * memcpy when it was possible to enlarge the chunk in-place, but that
1440 * turns out to misbehave unpleasantly for repeated cycles of
1441 * palloc/repalloc/pfree: the eventually freed chunks go into the
1442 * wrong freelist for the next initial palloc request, and so we leak
1443 * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1444 */
1446 Size oldsize;
1447
1448 /* allocate new chunk (this also checks size is valid) */
1449 newPointer = AllocSetAlloc((MemoryContext) set, size, flags);
1450
1451 /* leave immediately if request was not completed */
1452 if (newPointer == NULL)
1453 {
1454 /* Disallow access to the chunk header. */
1456 return MemoryContextAllocationFailure((MemoryContext) set, size, flags);
1457 }
1458
1459 /*
1460 * AllocSetAlloc() may have returned a region that is still NOACCESS.
1461 * Change it to UNDEFINED for the moment; memcpy() will then transfer
1462 * definedness from the old allocation to the new. If we know the old
1463 * allocation, copy just that much. Otherwise, make the entire old
1464 * chunk defined to avoid errors as we copy the currently-NOACCESS
1465 * trailing bytes.
1466 */
1468#ifdef MEMORY_CONTEXT_CHECKING
1469 oldsize = chunk->requested_size;
1470#else
1473#endif
1474
1475 /* transfer existing data (certain to fit) */
1476 memcpy(newPointer, pointer, oldsize);
1477
1478 /* free old chunk */
1479 AllocSetFree(pointer);
1480
1481 return newPointer;
1482 }
1483}
1484
1485/*
1486 * AllocSetGetChunkContext
1487 * Return the MemoryContext that 'pointer' belongs to.
1488 */
1491{
1493 AllocBlock block;
1494 AllocSet set;
1495
1496 /* Allow access to the chunk header. */
1498
1501 else
1503
1504 /* Disallow access to the chunk header. */
1506
1507 Assert(AllocBlockIsValid(block));
1508 set = block->aset;
1509
1510 return &set->header;
1511}
1512
1513/*
1514 * AllocSetGetChunkSpace
1515 * Given a currently-allocated chunk, determine the total space
1516 * it occupies (including all memory-allocation overhead).
1517 */
1518Size
1520{
1522 int fidx;
1523
1524 /* Allow access to the chunk header. */
1526
1528 {
1530
1531 /* Disallow access to the chunk header. */
1533
1534 Assert(AllocBlockIsValid(block));
1535
1536 return block->endptr - (char *) chunk;
1537 }
1538
1541
1542 /* Disallow access to the chunk header. */
1544
1546}
1547
1548/*
1549 * AllocSetIsEmpty
1550 * Is an allocset empty of any allocated space?
1551 */
1552bool
1554{
1555 Assert(AllocSetIsValid(context));
1556
1557 /*
1558 * For now, we say "empty" only if the context is new or just reset. We
1559 * could examine the freelists to determine if all space has been freed,
1560 * but it's not really worth the trouble for present uses of this
1561 * functionality.
1562 */
1563 if (context->isReset)
1564 return true;
1565 return false;
1566}
1567
1568/*
1569 * AllocSetStats
1570 * Compute stats about memory consumption of an allocset.
1571 *
1572 * printfunc: if not NULL, pass a human-readable stats string to this.
1573 * passthru: pass this pointer through to printfunc.
1574 * totals: if not NULL, add stats about this context into *totals.
1575 * print_to_stderr: print stats to stderr if true, elog otherwise.
1576 */
1577void
1581{
1582 AllocSet set = (AllocSet) context;
1583 Size nblocks = 0;
1584 Size freechunks = 0;
1585 Size totalspace;
1586 Size freespace = 0;
1587 AllocBlock block;
1588 int fidx;
1589
1590 Assert(AllocSetIsValid(set));
1591
1592 /* Include context header in totalspace */
1593 totalspace = MAXALIGN(sizeof(AllocSetContext));
1594
1595 for (block = set->blocks; block != NULL; block = block->next)
1596 {
1597 nblocks++;
1598 totalspace += block->endptr - ((char *) block);
1599 freespace += block->endptr - block->freeptr;
1600 }
1601 for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1602 {
1604 MemoryChunk *chunk = set->freelist[fidx];
1605
1606 while (chunk != NULL)
1607 {
1609
1610 /* Allow access to the chunk header. */
1614
1615 freechunks++;
1616 freespace += chksz + ALLOC_CHUNKHDRSZ;
1617
1619 chunk = link->next;
1621 }
1622 }
1623
1624 if (printfunc)
1625 {
1626 char stats_string[200];
1627
1629 "%zu total in %zu blocks; %zu free (%zu chunks); %zu used",
1630 totalspace, nblocks, freespace, freechunks,
1631 totalspace - freespace);
1633 }
1634
1635 if (totals)
1636 {
1637 totals->nblocks += nblocks;
1638 totals->freechunks += freechunks;
1639 totals->totalspace += totalspace;
1640 totals->freespace += freespace;
1641 }
1642}
1643
1644
1645#ifdef MEMORY_CONTEXT_CHECKING
1646
1647/*
1648 * AllocSetCheck
1649 * Walk through chunks and check consistency of memory.
1650 *
1651 * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
1652 * find yourself in an infinite loop when trouble occurs, because this
1653 * routine will be entered again when elog cleanup tries to release memory!
1654 */
1655void
1657{
1658 AllocSet set = (AllocSet) context;
1659 const char *name = set->header.name;
1661 AllocBlock block;
1663
1664 for (prevblock = NULL, block = set->blocks;
1665 block != NULL;
1666 prevblock = block, block = block->next)
1667 {
1668 char *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
1669 Size blk_used = block->freeptr - bpoz;
1670 Size blk_data = 0;
1671 Size nchunks = 0;
1672 bool has_external_chunk = false;
1673
1674 if (IsKeeperBlock(set, block))
1675 total_allocated += block->endptr - ((char *) set);
1676 else
1677 total_allocated += block->endptr - ((char *) block);
1678
1679 /*
1680 * Empty block - empty can be keeper-block only
1681 */
1682 if (!blk_used)
1683 {
1684 if (!IsKeeperBlock(set, block))
1685 elog(WARNING, "problem in alloc set %s: empty block %p",
1686 name, block);
1687 }
1688
1689 /*
1690 * Check block header fields
1691 */
1692 if (block->aset != set ||
1693 block->prev != prevblock ||
1694 block->freeptr < bpoz ||
1695 block->freeptr > block->endptr)
1696 elog(WARNING, "problem in alloc set %s: corrupt header in block %p",
1697 name, block);
1698
1699 /*
1700 * Chunk walker
1701 */
1702 while (bpoz < block->freeptr)
1703 {
1705 Size chsize,
1706 dsize;
1707
1708 /* Allow access to the chunk header. */
1710
1712 {
1713 chsize = block->endptr - (char *) MemoryChunkGetPointer(chunk); /* aligned chunk size */
1714 has_external_chunk = true;
1715
1716 /* make sure this chunk consumes the entire block */
1718 elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p",
1719 name, chunk, block);
1720 }
1721 else
1722 {
1724
1726 elog(WARNING, "problem in alloc set %s: bad chunk size for chunk %p in block %p",
1727 name, chunk, block);
1728
1729 chsize = GetChunkSizeFromFreeListIdx(fidx); /* aligned chunk size */
1730
1731 /*
1732 * Check the stored block offset correctly references this
1733 * block.
1734 */
1735 if (block != MemoryChunkGetBlock(chunk))
1736 elog(WARNING, "problem in alloc set %s: bad block offset for chunk %p in block %p",
1737 name, chunk, block);
1738 }
1739 dsize = chunk->requested_size; /* real data */
1740
1741 /* an allocated chunk's requested size must be <= the chsize */
1742 if (dsize != InvalidAllocSize && dsize > chsize)
1743 elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
1744 name, chunk, block);
1745
1746 /* chsize must not be smaller than the first freelist's size */
1747 if (chsize < (1 << ALLOC_MINBITS))
1748 elog(WARNING, "problem in alloc set %s: bad size %zu for chunk %p in block %p",
1749 name, chsize, chunk, block);
1750
1751 /*
1752 * Check for overwrite of padding space in an allocated chunk.
1753 */
1754 if (dsize != InvalidAllocSize && dsize < chsize &&
1756 elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
1757 name, block, chunk);
1758
1759 /* if chunk is allocated, disallow access to the chunk header */
1760 if (dsize != InvalidAllocSize)
1762
1763 blk_data += chsize;
1764 nchunks++;
1765
1767 }
1768
1769 if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used)
1770 elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p",
1771 name, block);
1772
1773 if (has_external_chunk && nchunks > 1)
1774 elog(WARNING, "problem in alloc set %s: external chunk on non-dedicated block %p",
1775 name, block);
1776 }
1777
1779}
1780
1781#endif /* MEMORY_CONTEXT_CHECKING */
Datum idx(PG_FUNCTION_ARGS)
Definition _int_op.c:262
void AllocSetReset(MemoryContext context)
Definition aset.c:546
static pg_noinline void * AllocSetAllocFromNewBlock(MemoryContext context, Size size, int flags, int fidx)
Definition aset.c:861
#define AllocSetIsValid(set)
Definition aset.c:200
#define AllocBlockIsValid(block)
Definition aset.c:207
void * AllocSetRealloc(void *pointer, Size size, int flags)
Definition aset.c:1218
#define IsKeeperBlock(set, block)
Definition aset.c:248
#define GetFreeListLink(chkptr)
Definition aset.c:138
#define FreeListIdxIsValid(fidx)
Definition aset.c:142
Size AllocSetGetChunkSpace(void *pointer)
Definition aset.c:1519
#define ALLOC_CHUNKHDRSZ
Definition aset.c:109
MemoryContext AllocSetGetChunkContext(void *pointer)
Definition aset.c:1490
void AllocSetStats(MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
Definition aset.c:1578
#define KeeperBlock(set)
Definition aset.c:244
#define GetChunkSizeFromFreeListIdx(fidx)
Definition aset.c:146
#define ALLOC_MINBITS
Definition aset.c:83
struct AllocBlockData * AllocBlock
Definition aset.c:113
#define MAX_FREE_CONTEXTS
Definition aset.c:241
static int AllocSetFreeIndex(Size size)
Definition aset.c:277
bool AllocSetIsEmpty(MemoryContext context)
Definition aset.c:1553
#define ALLOC_BLOCKHDRSZ
Definition aset.c:108
void * AllocSetAlloc(MemoryContext context, Size size, int flags)
Definition aset.c:1012
void * AllocPointer
Definition aset.c:119
#define ALLOCSET_NUM_FREELISTS
Definition aset.c:84
#define ALLOC_CHUNK_FRACTION
Definition aset.c:87
void AllocSetFree(void *pointer)
Definition aset.c:1107
#define FIRST_BLOCKHDRSZ
Definition aset.c:110
void AllocSetDelete(MemoryContext context)
Definition aset.c:632
static void * AllocSetAllocChunkFromBlock(MemoryContext context, AllocBlock block, Size size, Size chunk_size, int fidx)
Definition aset.c:816
#define ALLOC_CHUNK_LIMIT
Definition aset.c:85
static AllocSetFreeList context_freelists[2]
Definition aset.c:257
static pg_noinline void * AllocSetAllocLarge(MemoryContext context, Size size, int flags)
Definition aset.c:735
#define ExternalChunkGetBlock(chunk)
Definition aset.c:215
MemoryContext AllocSetContextCreateInternal(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition aset.c:347
AllocSetContext * AllocSet
Definition aset.c:173
static int32 next
Definition blutils.c:225
#define pg_noinline
Definition c.h:307
#define Min(x, y)
Definition c.h:1040
#define MAXALIGN(LEN)
Definition c.h:859
#define PG_USED_FOR_ASSERTS_ONLY
Definition c.h:235
#define Max(x, y)
Definition c.h:1034
#define Assert(condition)
Definition c.h:906
#define MemSetAligned(start, val, len)
Definition c.h:1086
#define unlikely(x)
Definition c.h:424
uint32_t uint32
Definition c.h:579
#define StaticAssertDecl(condition, errmessage)
Definition c.h:971
size_t Size
Definition c.h:652
int errcode(int sqlerrcode)
Definition elog.c:874
int errmsg(const char *fmt,...)
Definition elog.c:1093
int errdetail(const char *fmt,...) pg_attribute_printf(1
#define WARNING
Definition elog.h:36
#define ERROR
Definition elog.h:39
#define elog(elevel,...)
Definition elog.h:226
#define ereport(elevel,...)
Definition elog.h:150
void MemoryContextCreate(MemoryContext node, NodeTag tag, MemoryContextMethodID method_id, MemoryContext parent, const char *name)
Definition mcxt.c:1149
MemoryContext TopMemoryContext
Definition mcxt.c:166
void MemoryContextStats(MemoryContext context)
Definition mcxt.c:863
void * MemoryContextAllocationFailure(MemoryContext context, Size size, int flags)
Definition mcxt.c:1198
void MemoryContextResetOnly(MemoryContext context)
Definition mcxt.c:422
#define VALGRIND_DESTROY_MEMPOOL(context)
Definition memdebug.h:25
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition memdebug.h:26
#define VALGRIND_MEMPOOL_CHANGE(context, optr, nptr, size)
Definition memdebug.h:31
#define VALGRIND_CREATE_MEMPOOL(context, redzones, zeroed)
Definition memdebug.h:24
#define VALGRIND_MEMPOOL_ALLOC(context, addr, size)
Definition memdebug.h:29
#define VALGRIND_MEMPOOL_TRIM(context, addr, size)
Definition memdebug.h:32
#define VALGRIND_MEMPOOL_FREE(context, addr)
Definition memdebug.h:30
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition memdebug.h:27
#define VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
Definition memdebug.h:28
void(* MemoryStatsPrintFunc)(MemoryContext context, void *passthru, const char *stats_string, bool print_to_stderr)
Definition memnodes.h:54
#define ALLOCSET_SMALL_MINSIZE
Definition memutils.h:167
#define ALLOCSET_DEFAULT_MINSIZE
Definition memutils.h:157
#define AllocHugeSizeIsValid(size)
Definition memutils.h:49
#define InvalidAllocSize
Definition memutils.h:47
#define ALLOCSET_SEPARATE_THRESHOLD
Definition memutils.h:187
#define ALLOCSET_SMALL_INITSIZE
Definition memutils.h:168
#define ALLOCSET_DEFAULT_INITSIZE
Definition memutils.h:158
static void MemoryContextCheckSize(MemoryContext context, Size size, int flags)
@ MCTX_ASET_ID
#define MEMORYCHUNK_MAX_BLOCKOFFSET
static Size MemoryChunkGetValue(MemoryChunk *chunk)
#define MemoryChunkGetPointer(c)
static bool MemoryChunkIsExternal(MemoryChunk *chunk)
static void * MemoryChunkGetBlock(MemoryChunk *chunk)
static void MemoryChunkSetHdrMaskExternal(MemoryChunk *chunk, MemoryContextMethodID methodid)
#define PointerGetMemoryChunk(p)
static void MemoryChunkSetHdrMask(MemoryChunk *chunk, void *block, Size value, MemoryContextMethodID methodid)
struct MemoryContextData * MemoryContext
Definition palloc.h:36
static int pg_leftmost_one_pos32(uint32 word)
Definition pg_bitutils.h:41
PGDLLIMPORT const uint8 pg_leftmost_one_pos[256]
Definition pg_bitutils.c:27
#define snprintf
Definition port.h:260
static int fb(int x)
#define realloc(a, b)
#define free(a)
#define malloc(a)
AllocBlock prev
Definition aset.c:190
AllocSet aset
Definition aset.c:189
char * freeptr
Definition aset.c:192
AllocBlock next
Definition aset.c:191
char * endptr
Definition aset.c:193
uint32 initBlockSize
Definition aset.c:165
uint32 maxBlockSize
Definition aset.c:166
uint32 allocChunkLimit
Definition aset.c:168
MemoryContextData header
Definition aset.c:160
int freeListIndex
Definition aset.c:170
AllocBlock blocks
Definition aset.c:162
uint32 nextBlockSize
Definition aset.c:167
MemoryChunk * freelist[ALLOCSET_NUM_FREELISTS]
Definition aset.c:163
AllocSetContext * first_free
Definition aset.c:253
MemoryContext nextchild
Definition memnodes.h:130
const char * name
Definition memnodes.h:131
const char * name