PostgreSQL Source Code git master
Loading...
Searching...
No Matches
aset.c File Reference
Include dependency graph for aset.c:

Go to the source code of this file.

Data Structures

struct  AllocFreeListLink
 
struct  AllocSetContext
 
struct  AllocBlockData
 
struct  AllocSetFreeList
 

Macros

#define ALLOC_MINBITS   3 /* smallest chunk size is 8 bytes */
 
#define ALLOCSET_NUM_FREELISTS   11
 
#define ALLOC_CHUNK_LIMIT   (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
 
#define ALLOC_CHUNK_FRACTION   4
 
#define ALLOC_BLOCKHDRSZ   MAXALIGN(sizeof(AllocBlockData))
 
#define ALLOC_CHUNKHDRSZ   sizeof(MemoryChunk)
 
#define FIRST_BLOCKHDRSZ
 
#define GetFreeListLink(chkptr)    (AllocFreeListLink *) ((char *) (chkptr) + ALLOC_CHUNKHDRSZ)
 
#define FreeListIdxIsValid(fidx)    ((fidx) >= 0 && (fidx) < ALLOCSET_NUM_FREELISTS)
 
#define GetChunkSizeFromFreeListIdx(fidx)    ((((Size) 1) << ALLOC_MINBITS) << (fidx))
 
#define AllocSetIsValid(set)    ((set) && IsA(set, AllocSetContext))
 
#define AllocBlockIsValid(block)    ((block) && AllocSetIsValid((block)->aset))
 
#define ExternalChunkGetBlock(chunk)    (AllocBlock) ((char *) chunk - ALLOC_BLOCKHDRSZ)
 
#define MAX_FREE_CONTEXTS   100 /* arbitrary limit on freelist length */
 
#define KeeperBlock(set)    ((AllocBlock) (((char *) set) + MAXALIGN(sizeof(AllocSetContext))))
 
#define IsKeeperBlock(set, block)   ((block) == (KeeperBlock(set)))
 

Typedefs

typedef struct AllocBlockDataAllocBlock
 
typedef voidAllocPointer
 
typedef struct AllocFreeListLink AllocFreeListLink
 
typedef struct AllocSetContext AllocSetContext
 
typedef AllocSetContextAllocSet
 
typedef struct AllocBlockData AllocBlockData
 
typedef struct AllocSetFreeList AllocSetFreeList
 

Functions

static int AllocSetFreeIndex (Size size)
 
MemoryContext AllocSetContextCreateInternal (MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
 
void AllocSetReset (MemoryContext context)
 
void AllocSetDelete (MemoryContext context)
 
static pg_noinline voidAllocSetAllocLarge (MemoryContext context, Size size, int flags)
 
static voidAllocSetAllocChunkFromBlock (MemoryContext context, AllocBlock block, Size size, Size chunk_size, int fidx)
 
static pg_noinline voidAllocSetAllocFromNewBlock (MemoryContext context, Size size, int flags, int fidx)
 
voidAllocSetAlloc (MemoryContext context, Size size, int flags)
 
void AllocSetFree (void *pointer)
 
voidAllocSetRealloc (void *pointer, Size size, int flags)
 
MemoryContext AllocSetGetChunkContext (void *pointer)
 
Size AllocSetGetChunkSpace (void *pointer)
 
bool AllocSetIsEmpty (MemoryContext context)
 
void AllocSetStats (MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
 

Variables

static AllocSetFreeList context_freelists [2]
 

Macro Definition Documentation

◆ ALLOC_BLOCKHDRSZ

#define ALLOC_BLOCKHDRSZ   MAXALIGN(sizeof(AllocBlockData))

Definition at line 104 of file aset.c.

◆ ALLOC_CHUNK_FRACTION

#define ALLOC_CHUNK_FRACTION   4

Definition at line 87 of file aset.c.

◆ ALLOC_CHUNK_LIMIT

#define ALLOC_CHUNK_LIMIT   (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))

Definition at line 85 of file aset.c.

◆ ALLOC_CHUNKHDRSZ

#define ALLOC_CHUNKHDRSZ   sizeof(MemoryChunk)

Definition at line 105 of file aset.c.

◆ ALLOC_MINBITS

#define ALLOC_MINBITS   3 /* smallest chunk size is 8 bytes */

Definition at line 83 of file aset.c.

◆ AllocBlockIsValid

#define AllocBlockIsValid (   block)     ((block) && AllocSetIsValid((block)->aset))

Definition at line 203 of file aset.c.

246{
247 int num_free; /* current list length */
248 AllocSetContext *first_free; /* list header */
250
251/* context_freelists[0] is for default params, [1] for small params */
253{
254 {
255 0, NULL
256 },
257 {
258 0, NULL
259 }
260};
261
262
263/* ----------
264 * AllocSetFreeIndex -
265 *
266 * Depending on the size of an allocation compute which freechunk
267 * list of the alloc set it belongs to. Caller must have verified
268 * that size <= ALLOC_CHUNK_LIMIT.
269 * ----------
270 */
271static inline int
273{
274 int idx;
275
276 if (size > (1 << ALLOC_MINBITS))
277 {
278 /*----------
279 * At this point we must compute ceil(log2(size >> ALLOC_MINBITS)).
280 * This is the same as
281 * pg_leftmost_one_pos32((size - 1) >> ALLOC_MINBITS) + 1
282 * or equivalently
283 * pg_leftmost_one_pos32(size - 1) - ALLOC_MINBITS + 1
284 *
285 * However, for platforms without intrinsic support, we duplicate the
286 * logic here, allowing an additional optimization. It's reasonable
287 * to assume that ALLOC_CHUNK_LIMIT fits in 16 bits, so we can unroll
288 * the byte-at-a-time loop in pg_leftmost_one_pos32 and just handle
289 * the last two bytes.
290 *
291 * Yes, this function is enough of a hot-spot to make it worth this
292 * much trouble.
293 *----------
294 */
295#ifdef HAVE_BITSCAN_REVERSE
296 idx = pg_leftmost_one_pos32((uint32) size - 1) - ALLOC_MINBITS + 1;
297#else
298 uint32 t,
299 tsize;
300
301 /* Statically assert that we only have a 16-bit input value. */
303 "ALLOC_CHUNK_LIMIT must be less than 64kB");
304
305 tsize = size - 1;
306 t = tsize >> 8;
308 idx -= ALLOC_MINBITS - 1;
309#endif
310
312 }
313 else
314 idx = 0;
315
316 return idx;
317}
318
319
320/*
321 * Public routines
322 */
323
324
325/*
326 * AllocSetContextCreateInternal
327 * Create a new AllocSet context.
328 *
329 * parent: parent context, or NULL if top-level context
330 * name: name of context (must be statically allocated)
331 * minContextSize: minimum context size
332 * initBlockSize: initial allocation block size
333 * maxBlockSize: maximum allocation block size
334 *
335 * Most callers should abstract the context size parameters using a macro
336 * such as ALLOCSET_DEFAULT_SIZES.
337 *
338 * Note: don't call this directly; go through the wrapper macro
339 * AllocSetContextCreate.
340 */
343 const char *name,
345 Size initBlockSize,
346 Size maxBlockSize)
347{
348 int freeListIndex;
350 AllocSet set;
351 AllocBlock block;
352
353 /* ensure MemoryChunk's size is properly maxaligned */
355 "sizeof(MemoryChunk) is not maxaligned");
356 /* check we have enough space to store the freelist link */
358 "sizeof(AllocFreeListLink) larger than minimum allocation size");
359
360 /*
361 * First, validate allocation parameters. Once these were regular runtime
362 * tests and elog's, but in practice Asserts seem sufficient because
363 * nobody varies their parameters at runtime. We somewhat arbitrarily
364 * enforce a minimum 1K block size. We restrict the maximum block size to
365 * MEMORYCHUNK_MAX_BLOCKOFFSET as MemoryChunks are limited to this in
366 * regards to addressing the offset between the chunk and the block that
367 * the chunk is stored on. We would be unable to store the offset between
368 * the chunk and block for any chunks that were beyond
369 * MEMORYCHUNK_MAX_BLOCKOFFSET bytes into the block if the block was to be
370 * larger than this.
371 */
372 Assert(initBlockSize == MAXALIGN(initBlockSize) &&
373 initBlockSize >= 1024);
374 Assert(maxBlockSize == MAXALIGN(maxBlockSize) &&
375 maxBlockSize >= initBlockSize &&
376 AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
377 Assert(minContextSize == 0 ||
379 minContextSize >= 1024 &&
380 minContextSize <= maxBlockSize));
381 Assert(maxBlockSize <= MEMORYCHUNK_MAX_BLOCKOFFSET);
382
383 /*
384 * Check whether the parameters match either available freelist. We do
385 * not need to demand a match of maxBlockSize.
386 */
388 initBlockSize == ALLOCSET_DEFAULT_INITSIZE)
389 freeListIndex = 0;
391 initBlockSize == ALLOCSET_SMALL_INITSIZE)
392 freeListIndex = 1;
393 else
394 freeListIndex = -1;
395
396 /*
397 * If a suitable freelist entry exists, just recycle that context.
398 */
399 if (freeListIndex >= 0)
400 {
401 AllocSetFreeList *freelist = &context_freelists[freeListIndex];
402
403 if (freelist->first_free != NULL)
404 {
405 /* Remove entry from freelist */
406 set = freelist->first_free;
407 freelist->first_free = (AllocSet) set->header.nextchild;
408 freelist->num_free--;
409
410 /* Update its maxBlockSize; everything else should be OK */
411 set->maxBlockSize = maxBlockSize;
412
413 /* Reinitialize its header, installing correct name and parent */
417 parent,
418 name);
419
420 ((MemoryContext) set)->mem_allocated =
421 KeeperBlock(set)->endptr - ((char *) set);
422
423 return (MemoryContext) set;
424 }
425 }
426
427 /* Determine size of initial block */
430 if (minContextSize != 0)
432 else
433 firstBlockSize = Max(firstBlockSize, initBlockSize);
434
435 /*
436 * Allocate the initial block. Unlike other aset.c blocks, it starts with
437 * the context header and its block header follows that.
438 */
440 if (set == NULL)
441 {
446 errmsg("out of memory"),
447 errdetail("Failed while creating memory context \"%s\".",
448 name)));
449 }
450
451 /*
452 * Avoid writing code that can fail between here and MemoryContextCreate;
453 * we'd leak the header/initial block if we ereport in this stretch.
454 */
455
456 /* Create a vpool associated with the context */
457 VALGRIND_CREATE_MEMPOOL(set, 0, false);
458
459 /*
460 * Create a vchunk covering both the AllocSetContext struct and the keeper
461 * block's header. (Perhaps it would be more sensible for these to be two
462 * separate vchunks, but doing that seems to tickle bugs in some versions
463 * of Valgrind.) We must have these vchunks, and also a vchunk for each
464 * subsequently-added block header, so that Valgrind considers the
465 * pointers within them while checking for leaked memory. Note that
466 * Valgrind doesn't distinguish between these vchunks and those created by
467 * mcxt.c for the user-accessible-data chunks we allocate.
468 */
470
471 /* Fill in the initial block's block header */
472 block = KeeperBlock(set);
473 block->aset = set;
474 block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
475 block->endptr = ((char *) set) + firstBlockSize;
476 block->prev = NULL;
477 block->next = NULL;
478
479 /* Mark unallocated space NOACCESS; leave the block header alone. */
480 VALGRIND_MAKE_MEM_NOACCESS(block->freeptr, block->endptr - block->freeptr);
481
482 /* Remember block as part of block list */
483 set->blocks = block;
484
485 /* Finish filling in aset-specific parts of the context header */
486 MemSetAligned(set->freelist, 0, sizeof(set->freelist));
487
488 set->initBlockSize = (uint32) initBlockSize;
489 set->maxBlockSize = (uint32) maxBlockSize;
490 set->nextBlockSize = (uint32) initBlockSize;
491 set->freeListIndex = freeListIndex;
492
493 /*
494 * Compute the allocation chunk size limit for this context. It can't be
495 * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
496 * If maxBlockSize is small then requests exceeding the maxBlockSize, or
497 * even a significant fraction of it, should be treated as large chunks
498 * too. For the typical case of maxBlockSize a power of 2, the chunk size
499 * limit will be at most 1/8th maxBlockSize, so that given a stream of
500 * requests that are all the maximum chunk size we will waste at most
501 * 1/8th of the allocated space.
502 *
503 * Also, allocChunkLimit must not exceed ALLOCSET_SEPARATE_THRESHOLD.
504 */
506 "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD");
507
508 /*
509 * Determine the maximum size that a chunk can be before we allocate an
510 * entire AllocBlock dedicated for that chunk. We set the absolute limit
511 * of that size as ALLOC_CHUNK_LIMIT but we reduce it further so that we
512 * can fit about ALLOC_CHUNK_FRACTION chunks this size on a maximally
513 * sized block. (We opt to keep allocChunkLimit a power-of-2 value
514 * primarily for legacy reasons rather than calculating it so that exactly
515 * ALLOC_CHUNK_FRACTION chunks fit on a maximally sized block.)
516 */
517 set->allocChunkLimit = ALLOC_CHUNK_LIMIT;
518 while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
519 (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
520 set->allocChunkLimit >>= 1;
521
522 /* Finally, do the type-independent part of context creation */
526 parent,
527 name);
528
529 ((MemoryContext) set)->mem_allocated = firstBlockSize;
530
531 return (MemoryContext) set;
532}
533
534/*
535 * AllocSetReset
536 * Frees all memory which is allocated in the given set.
537 *
538 * Actually, this routine has some discretion about what to do.
539 * It should mark all allocated chunks freed, but it need not necessarily
540 * give back all the resources the set owns. Our actual implementation is
541 * that we give back all but the "keeper" block (which we must keep, since
542 * it shares a malloc chunk with the context header). In this way, we don't
543 * thrash malloc() when a context is repeatedly reset after small allocations,
544 * which is typical behavior for per-tuple contexts.
545 */
546void
548{
549 AllocSet set = (AllocSet) context;
550 AllocBlock block;
552
554
555#ifdef MEMORY_CONTEXT_CHECKING
556 /* Check for corruption and leaks before freeing */
557 AllocSetCheck(context);
558#endif
559
560 /* Remember keeper block size for Assert below */
561 keepersize = KeeperBlock(set)->endptr - ((char *) set);
562
563 /* Clear chunk freelists */
564 MemSetAligned(set->freelist, 0, sizeof(set->freelist));
565
566 block = set->blocks;
567
568 /* New blocks list will be just the keeper block */
569 set->blocks = KeeperBlock(set);
570
571 while (block != NULL)
572 {
573 AllocBlock next = block->next;
574
575 if (IsKeeperBlock(set, block))
576 {
577 /* Reset the block, but don't return it to malloc */
578 char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
579
580#ifdef CLOBBER_FREED_MEMORY
582#else
583 /* wipe_mem() would have done this */
585#endif
586 block->freeptr = datastart;
587 block->prev = NULL;
588 block->next = NULL;
589 }
590 else
591 {
592 /* Normal case, release the block */
593 context->mem_allocated -= block->endptr - ((char *) block);
594
595#ifdef CLOBBER_FREED_MEMORY
596 wipe_mem(block, block->freeptr - ((char *) block));
597#endif
598
599 /*
600 * We need to free the block header's vchunk explicitly, although
601 * the user-data vchunks within will go away in the TRIM below.
602 * Otherwise Valgrind complains about leaked allocations.
603 */
604 VALGRIND_MEMPOOL_FREE(set, block);
605
606 free(block);
607 }
608 block = next;
609 }
610
611 Assert(context->mem_allocated == keepersize);
612
613 /*
614 * Instruct Valgrind to throw away all the vchunks associated with this
615 * context, except for the one covering the AllocSetContext and
616 * keeper-block header. This gets rid of the vchunks for whatever user
617 * data is getting discarded by the context reset.
618 */
620
621 /* Reset block size allocation sequence, too */
622 set->nextBlockSize = set->initBlockSize;
623}
624
625/*
626 * AllocSetDelete
627 * Frees all memory which is allocated in the given set,
628 * in preparation for deletion of the set.
629 *
630 * Unlike AllocSetReset, this *must* free all resources of the set.
631 */
632void
634{
635 AllocSet set = (AllocSet) context;
636 AllocBlock block = set->blocks;
638
640
641#ifdef MEMORY_CONTEXT_CHECKING
642 /* Check for corruption and leaks before freeing */
643 AllocSetCheck(context);
644#endif
645
646 /* Remember keeper block size for Assert below */
647 keepersize = KeeperBlock(set)->endptr - ((char *) set);
648
649 /*
650 * If the context is a candidate for a freelist, put it into that freelist
651 * instead of destroying it.
652 */
653 if (set->freeListIndex >= 0)
654 {
655 AllocSetFreeList *freelist = &context_freelists[set->freeListIndex];
656
657 /*
658 * Reset the context, if it needs it, so that we aren't hanging on to
659 * more than the initial malloc chunk.
660 */
661 if (!context->isReset)
662 MemoryContextResetOnly(context);
663
664 /*
665 * If the freelist is full, just discard what's already in it. See
666 * comments with context_freelists[].
667 */
668 if (freelist->num_free >= MAX_FREE_CONTEXTS)
669 {
670 while (freelist->first_free != NULL)
671 {
672 AllocSetContext *oldset = freelist->first_free;
673
675 freelist->num_free--;
676
677 /* Destroy the context's vpool --- see notes below */
679
680 /* All that remains is to free the header/initial block */
681 free(oldset);
682 }
683 Assert(freelist->num_free == 0);
684 }
685
686 /* Now add the just-deleted context to the freelist. */
687 set->header.nextchild = (MemoryContext) freelist->first_free;
688 freelist->first_free = set;
689 freelist->num_free++;
690
691 return;
692 }
693
694 /* Free all blocks, except the keeper which is part of context header */
695 while (block != NULL)
696 {
697 AllocBlock next = block->next;
698
699 if (!IsKeeperBlock(set, block))
700 context->mem_allocated -= block->endptr - ((char *) block);
701
702#ifdef CLOBBER_FREED_MEMORY
703 wipe_mem(block, block->freeptr - ((char *) block));
704#endif
705
706 if (!IsKeeperBlock(set, block))
707 {
708 /* As in AllocSetReset, free block-header vchunks explicitly */
709 VALGRIND_MEMPOOL_FREE(set, block);
710 free(block);
711 }
712
713 block = next;
714 }
715
716 Assert(context->mem_allocated == keepersize);
717
718 /*
719 * Destroy the vpool. We don't seem to need to explicitly free the
720 * initial block's header vchunk, nor any user-data vchunks that Valgrind
721 * still knows about; they'll all go away automatically.
722 */
724
725 /* Finally, free the context header, including the keeper block */
726 free(set);
727}
728
729/*
730 * Helper for AllocSetAlloc() that allocates an entire block for the chunk.
731 *
732 * AllocSetAlloc()'s comment explains why this is separate.
733 */
735static void *
736AllocSetAllocLarge(MemoryContext context, Size size, int flags)
737{
738 AllocSet set = (AllocSet) context;
739 AllocBlock block;
741 Size chunk_size;
742 Size blksize;
743
744 /* validate 'size' is within the limits for the given 'flags' */
745 MemoryContextCheckSize(context, size, flags);
746
747#ifdef MEMORY_CONTEXT_CHECKING
748 /* ensure there's always space for the sentinel byte */
749 chunk_size = MAXALIGN(size + 1);
750#else
751 chunk_size = MAXALIGN(size);
752#endif
753
754 blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
755 block = (AllocBlock) malloc(blksize);
756 if (block == NULL)
757 return MemoryContextAllocationFailure(context, size, flags);
758
759 /* Make a vchunk covering the new block's header */
761
762 context->mem_allocated += blksize;
763
764 block->aset = set;
765 block->freeptr = block->endptr = ((char *) block) + blksize;
766
767 chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
768
769 /* mark the MemoryChunk as externally managed */
771
772#ifdef MEMORY_CONTEXT_CHECKING
773 chunk->requested_size = size;
774 /* set mark to catch clobber of "unused" space */
775 Assert(size < chunk_size);
777#endif
778#ifdef RANDOMIZE_ALLOCATED_MEMORY
779 /* fill the allocated space with junk */
781#endif
782
783 /*
784 * Stick the new block underneath the active allocation block, if any, so
785 * that we don't lose the use of the space remaining therein.
786 */
787 if (set->blocks != NULL)
788 {
789 block->prev = set->blocks;
790 block->next = set->blocks->next;
791 if (block->next)
792 block->next->prev = block;
793 set->blocks->next = block;
794 }
795 else
796 {
797 block->prev = NULL;
798 block->next = NULL;
799 set->blocks = block;
800 }
801
802 /* Ensure any padding bytes are marked NOACCESS. */
804 chunk_size - size);
805
806 /* Disallow access to the chunk header. */
808
810}
811
812/*
813 * Small helper for allocating a new chunk from a chunk, to avoid duplicating
814 * the code between AllocSetAlloc() and AllocSetAllocFromNewBlock().
815 */
816static inline void *
818 Size size, Size chunk_size, int fidx)
819{
821
822 chunk = (MemoryChunk *) (block->freeptr);
823
824 /* Prepare to initialize the chunk header. */
826
827 block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
828 Assert(block->freeptr <= block->endptr);
829
830 /* store the free list index in the value field */
832
833#ifdef MEMORY_CONTEXT_CHECKING
834 chunk->requested_size = size;
835 /* set mark to catch clobber of "unused" space */
836 if (size < chunk_size)
838#endif
839#ifdef RANDOMIZE_ALLOCATED_MEMORY
840 /* fill the allocated space with junk */
842#endif
843
844 /* Ensure any padding bytes are marked NOACCESS. */
846 chunk_size - size);
847
848 /* Disallow access to the chunk header. */
850
852}
853
854/*
855 * Helper for AllocSetAlloc() that allocates a new block and returns a chunk
856 * allocated from it.
857 *
858 * AllocSetAlloc()'s comment explains why this is separate.
859 */
861static void *
862AllocSetAllocFromNewBlock(MemoryContext context, Size size, int flags,
863 int fidx)
864{
865 AllocSet set = (AllocSet) context;
866 AllocBlock block;
868 Size blksize;
870 Size chunk_size;
871
872 /* due to the keeper block set->blocks should always be valid */
873 Assert(set->blocks != NULL);
874 block = set->blocks;
875 availspace = block->endptr - block->freeptr;
876
877 /*
878 * The existing active (top) block does not have enough room for the
879 * requested allocation, but it might still have a useful amount of space
880 * in it. Once we push it down in the block list, we'll never try to
881 * allocate more space from it. So, before we do that, carve up its free
882 * space into chunks that we can put on the set's freelists.
883 *
884 * Because we can only get here when there's less than ALLOC_CHUNK_LIMIT
885 * left in the block, this loop cannot iterate more than
886 * ALLOCSET_NUM_FREELISTS-1 times.
887 */
888 while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
889 {
894
895 /*
896 * In most cases, we'll get back the index of the next larger freelist
897 * than the one we need to put this chunk on. The exception is when
898 * availchunk is exactly a power of 2.
899 */
901 {
902 a_fidx--;
903 Assert(a_fidx >= 0);
905 }
906
907 chunk = (MemoryChunk *) (block->freeptr);
908
909 /* Prepare to initialize the chunk header. */
911 block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
913
914 /* store the freelist index in the value field */
916#ifdef MEMORY_CONTEXT_CHECKING
917 chunk->requested_size = InvalidAllocSize; /* mark it free */
918#endif
919 /* push this chunk onto the free list */
921
923 link->next = set->freelist[a_fidx];
925
926 set->freelist[a_fidx] = chunk;
927 }
928
929 /*
930 * The first such block has size initBlockSize, and we double the space in
931 * each succeeding block, but not more than maxBlockSize.
932 */
933 blksize = set->nextBlockSize;
934 set->nextBlockSize <<= 1;
935 if (set->nextBlockSize > set->maxBlockSize)
936 set->nextBlockSize = set->maxBlockSize;
937
938 /* Choose the actual chunk size to allocate */
939 chunk_size = GetChunkSizeFromFreeListIdx(fidx);
940 Assert(chunk_size >= size);
941
942 /*
943 * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
944 * space... but try to keep it a power of 2.
945 */
947 while (blksize < required_size)
948 blksize <<= 1;
949
950 /* Try to allocate it */
951 block = (AllocBlock) malloc(blksize);
952
953 /*
954 * We could be asking for pretty big blocks here, so cope if malloc fails.
955 * But give up if there's less than 1 MB or so available...
956 */
957 while (block == NULL && blksize > 1024 * 1024)
958 {
959 blksize >>= 1;
960 if (blksize < required_size)
961 break;
962 block = (AllocBlock) malloc(blksize);
963 }
964
965 if (block == NULL)
966 return MemoryContextAllocationFailure(context, size, flags);
967
968 /* Make a vchunk covering the new block's header */
970
971 context->mem_allocated += blksize;
972
973 block->aset = set;
974 block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
975 block->endptr = ((char *) block) + blksize;
976
977 /* Mark unallocated space NOACCESS. */
979 blksize - ALLOC_BLOCKHDRSZ);
980
981 block->prev = NULL;
982 block->next = set->blocks;
983 if (block->next)
984 block->next->prev = block;
985 set->blocks = block;
986
987 return AllocSetAllocChunkFromBlock(context, block, size, chunk_size, fidx);
988}
989
990/*
991 * AllocSetAlloc
992 * Returns a pointer to allocated memory of given size or raises an ERROR
993 * on allocation failure, or returns NULL when flags contains
994 * MCXT_ALLOC_NO_OOM.
995 *
996 * No request may exceed:
997 * MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
998 * All callers use a much-lower limit.
999 *
1000 * Note: when using valgrind, it doesn't matter how the returned allocation
1001 * is marked, as mcxt.c will set it to UNDEFINED. In some paths we will
1002 * return space that is marked NOACCESS - AllocSetRealloc has to beware!
1003 *
1004 * This function should only contain the most common code paths. Everything
1005 * else should be in pg_noinline helper functions, thus avoiding the overhead
1006 * of creating a stack frame for the common cases. Allocating memory is often
1007 * a bottleneck in many workloads, so avoiding stack frame setup is
1008 * worthwhile. Helper functions should always directly return the newly
1009 * allocated memory so that we can just return that address directly as a tail
1010 * call.
1011 */
1012void *
1013AllocSetAlloc(MemoryContext context, Size size, int flags)
1014{
1015 AllocSet set = (AllocSet) context;
1016 AllocBlock block;
1018 int fidx;
1019 Size chunk_size;
1021
1022 Assert(AllocSetIsValid(set));
1023
1024 /* due to the keeper block set->blocks should never be NULL */
1025 Assert(set->blocks != NULL);
1026
1027 /*
1028 * If requested size exceeds maximum for chunks we hand the request off to
1029 * AllocSetAllocLarge().
1030 */
1031 if (size > set->allocChunkLimit)
1032 return AllocSetAllocLarge(context, size, flags);
1033
1034 /*
1035 * Request is small enough to be treated as a chunk. Look in the
1036 * corresponding free list to see if there is a free chunk we could reuse.
1037 * If one is found, remove it from the free list, make it again a member
1038 * of the alloc set and return its data address.
1039 *
1040 * Note that we don't attempt to ensure there's space for the sentinel
1041 * byte here. We expect a large proportion of allocations to be for sizes
1042 * which are already a power of 2. If we were to always make space for a
1043 * sentinel byte in MEMORY_CONTEXT_CHECKING builds, then we'd end up
1044 * doubling the memory requirements for such allocations.
1045 */
1046 fidx = AllocSetFreeIndex(size);
1047 chunk = set->freelist[fidx];
1048 if (chunk != NULL)
1049 {
1051
1052 /* Allow access to the chunk header. */
1054
1056
1057 /* pop this chunk off the freelist */
1059 set->freelist[fidx] = link->next;
1061
1062#ifdef MEMORY_CONTEXT_CHECKING
1063 chunk->requested_size = size;
1064 /* set mark to catch clobber of "unused" space */
1067#endif
1068#ifdef RANDOMIZE_ALLOCATED_MEMORY
1069 /* fill the allocated space with junk */
1070 randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
1071#endif
1072
1073 /* Ensure any padding bytes are marked NOACCESS. */
1076
1077 /* Disallow access to the chunk header. */
1079
1081 }
1082
1083 /*
1084 * Choose the actual chunk size to allocate.
1085 */
1086 chunk_size = GetChunkSizeFromFreeListIdx(fidx);
1087 Assert(chunk_size >= size);
1088
1089 block = set->blocks;
1090 availspace = block->endptr - block->freeptr;
1091
1092 /*
1093 * If there is enough room in the active allocation block, we will put the
1094 * chunk into that block. Else must start a new one.
1095 */
1096 if (unlikely(availspace < (chunk_size + ALLOC_CHUNKHDRSZ)))
1097 return AllocSetAllocFromNewBlock(context, size, flags, fidx);
1098
1099 /* There's enough space on the current block, so allocate from that */
1100 return AllocSetAllocChunkFromBlock(context, block, size, chunk_size, fidx);
1101}
1102
1103/*
1104 * AllocSetFree
1105 * Frees allocated memory; memory is removed from the set.
1106 */
1107void
1108AllocSetFree(void *pointer)
1109{
1110 AllocSet set;
1112
1113 /* Allow access to the chunk header. */
1115
1117 {
1118 /* Release single-chunk block. */
1120
1121 /*
1122 * Try to verify that we have a sane block pointer: the block header
1123 * should reference an aset and the freeptr should match the endptr.
1124 */
1125 if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
1126 elog(ERROR, "could not find block containing chunk %p", chunk);
1127
1128 set = block->aset;
1129
1130#ifdef MEMORY_CONTEXT_CHECKING
1131 {
1132 /* Test for someone scribbling on unused space in chunk */
1133 Assert(chunk->requested_size < (block->endptr - (char *) pointer));
1134 if (!sentinel_ok(pointer, chunk->requested_size))
1135 elog(WARNING, "detected write past chunk end in %s %p",
1136 set->header.name, chunk);
1137 }
1138#endif
1139
1140 /* OK, remove block from aset's list and free it */
1141 if (block->prev)
1142 block->prev->next = block->next;
1143 else
1144 set->blocks = block->next;
1145 if (block->next)
1146 block->next->prev = block->prev;
1147
1148 set->header.mem_allocated -= block->endptr - ((char *) block);
1149
1150#ifdef CLOBBER_FREED_MEMORY
1151 wipe_mem(block, block->freeptr - ((char *) block));
1152#endif
1153
1154 /* As in AllocSetReset, free block-header vchunks explicitly */
1155 VALGRIND_MEMPOOL_FREE(set, block);
1156
1157 free(block);
1158 }
1159 else
1160 {
1162 int fidx;
1164
1165 /*
1166 * In this path, for speed reasons we just Assert that the referenced
1167 * block is good. We can also Assert that the value field is sane.
1168 * Future field experience may show that these Asserts had better
1169 * become regular runtime test-and-elog checks.
1170 */
1171 Assert(AllocBlockIsValid(block));
1172 set = block->aset;
1173
1177
1178#ifdef MEMORY_CONTEXT_CHECKING
1179 /* Test for someone scribbling on unused space in chunk */
1180 if (chunk->requested_size < GetChunkSizeFromFreeListIdx(fidx))
1181 if (!sentinel_ok(pointer, chunk->requested_size))
1182 elog(WARNING, "detected write past chunk end in %s %p",
1183 set->header.name, chunk);
1184#endif
1185
1186#ifdef CLOBBER_FREED_MEMORY
1188#endif
1189 /* push this chunk onto the top of the free list */
1191 link->next = set->freelist[fidx];
1193 set->freelist[fidx] = chunk;
1194
1195#ifdef MEMORY_CONTEXT_CHECKING
1196
1197 /*
1198 * Reset requested_size to InvalidAllocSize in chunks that are on free
1199 * list.
1200 */
1201 chunk->requested_size = InvalidAllocSize;
1202#endif
1203 }
1204}
1205
1206/*
1207 * AllocSetRealloc
1208 * Returns new pointer to allocated memory of given size or NULL if
1209 * request could not be completed; this memory is added to the set.
1210 * Memory associated with given pointer is copied into the new memory,
1211 * and the old memory is freed.
1212 *
1213 * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size. This
1214 * makes our Valgrind client requests less-precise, hazarding false negatives.
1215 * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old
1216 * request size.)
1217 */
1218void *
1219AllocSetRealloc(void *pointer, Size size, int flags)
1220{
1221 AllocBlock block;
1222 AllocSet set;
1225 int fidx;
1226
1227 /* Allow access to the chunk header. */
1229
1231 {
1232 /*
1233 * The chunk must have been allocated as a single-chunk block. Use
1234 * realloc() to make the containing block bigger, or smaller, with
1235 * minimum space wastage.
1236 */
1238 Size chksize;
1239 Size blksize;
1241
1243
1244 /*
1245 * Try to verify that we have a sane block pointer: the block header
1246 * should reference an aset and the freeptr should match the endptr.
1247 */
1248 if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
1249 elog(ERROR, "could not find block containing chunk %p", chunk);
1250
1251 set = block->aset;
1252
1253 /* only check size in paths where the limits could be hit */
1254 MemoryContextCheckSize((MemoryContext) set, size, flags);
1255
1256 oldchksize = block->endptr - (char *) pointer;
1257
1258#ifdef MEMORY_CONTEXT_CHECKING
1259 /* Test for someone scribbling on unused space in chunk */
1260 Assert(chunk->requested_size < oldchksize);
1261 if (!sentinel_ok(pointer, chunk->requested_size))
1262 elog(WARNING, "detected write past chunk end in %s %p",
1263 set->header.name, chunk);
1264#endif
1265
1266#ifdef MEMORY_CONTEXT_CHECKING
1267 /* ensure there's always space for the sentinel byte */
1268 chksize = MAXALIGN(size + 1);
1269#else
1270 chksize = MAXALIGN(size);
1271#endif
1272
1273 /* Do the realloc */
1275 oldblksize = block->endptr - ((char *) block);
1276
1277 newblock = (AllocBlock) realloc(block, blksize);
1278 if (newblock == NULL)
1279 {
1280 /* Disallow access to the chunk header. */
1282 return MemoryContextAllocationFailure(&set->header, size, flags);
1283 }
1284
1285 /*
1286 * Move the block-header vchunk explicitly. (mcxt.c will take care of
1287 * moving the vchunk for the user data.)
1288 */
1290 block = newblock;
1291
1292 /* updated separately, not to underflow when (oldblksize > blksize) */
1293 set->header.mem_allocated -= oldblksize;
1294 set->header.mem_allocated += blksize;
1295
1296 block->freeptr = block->endptr = ((char *) block) + blksize;
1297
1298 /* Update pointers since block has likely been moved */
1299 chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
1300 pointer = MemoryChunkGetPointer(chunk);
1301 if (block->prev)
1302 block->prev->next = block;
1303 else
1304 set->blocks = block;
1305 if (block->next)
1306 block->next->prev = block;
1307
1308#ifdef MEMORY_CONTEXT_CHECKING
1309#ifdef RANDOMIZE_ALLOCATED_MEMORY
1310
1311 /*
1312 * We can only randomize the extra space if we know the prior request.
1313 * When using Valgrind, randomize_mem() also marks memory UNDEFINED.
1314 */
1315 if (size > chunk->requested_size)
1316 randomize_mem((char *) pointer + chunk->requested_size,
1317 size - chunk->requested_size);
1318#else
1319
1320 /*
1321 * If this is an increase, realloc() will have marked any
1322 * newly-allocated part (from oldchksize to chksize) UNDEFINED, but we
1323 * also need to adjust trailing bytes from the old allocation (from
1324 * chunk->requested_size to oldchksize) as they are marked NOACCESS.
1325 * Make sure not to mark too many bytes in case chunk->requested_size
1326 * < size < oldchksize.
1327 */
1328#ifdef USE_VALGRIND
1329 if (Min(size, oldchksize) > chunk->requested_size)
1330 VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1331 Min(size, oldchksize) - chunk->requested_size);
1332#endif
1333#endif
1334
1335 chunk->requested_size = size;
1336 /* set mark to catch clobber of "unused" space */
1337 Assert(size < chksize);
1338 set_sentinel(pointer, size);
1339#else /* !MEMORY_CONTEXT_CHECKING */
1340
1341 /*
1342 * We may need to adjust marking of bytes from the old allocation as
1343 * some of them may be marked NOACCESS. We don't know how much of the
1344 * old chunk size was the requested size; it could have been as small
1345 * as one byte. We have to be conservative and just mark the entire
1346 * old portion DEFINED. Make sure not to mark memory beyond the new
1347 * allocation in case it's smaller than the old one.
1348 */
1349 VALGRIND_MAKE_MEM_DEFINED(pointer, Min(size, oldchksize));
1350#endif
1351
1352 /* Ensure any padding bytes are marked NOACCESS. */
1353 VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1354
1355 /* Disallow access to the chunk header. */
1357
1358 return pointer;
1359 }
1360
1361 block = MemoryChunkGetBlock(chunk);
1362
1363 /*
1364 * In this path, for speed reasons we just Assert that the referenced
1365 * block is good. We can also Assert that the value field is sane. Future
1366 * field experience may show that these Asserts had better become regular
1367 * runtime test-and-elog checks.
1368 */
1369 Assert(AllocBlockIsValid(block));
1370 set = block->aset;
1371
1375
1376#ifdef MEMORY_CONTEXT_CHECKING
1377 /* Test for someone scribbling on unused space in chunk */
1378 if (chunk->requested_size < oldchksize)
1379 if (!sentinel_ok(pointer, chunk->requested_size))
1380 elog(WARNING, "detected write past chunk end in %s %p",
1381 set->header.name, chunk);
1382#endif
1383
1384 /*
1385 * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1386 * allocated area already is >= the new size. (In particular, we will
1387 * fall out here if the requested size is a decrease.)
1388 */
1389 if (oldchksize >= size)
1390 {
1391#ifdef MEMORY_CONTEXT_CHECKING
1392 Size oldrequest = chunk->requested_size;
1393
1394#ifdef RANDOMIZE_ALLOCATED_MEMORY
1395 /* We can only fill the extra space if we know the prior request */
1396 if (size > oldrequest)
1397 randomize_mem((char *) pointer + oldrequest,
1398 size - oldrequest);
1399#endif
1400
1401 chunk->requested_size = size;
1402
1403 /*
1404 * If this is an increase, mark any newly-available part UNDEFINED.
1405 * Otherwise, mark the obsolete part NOACCESS.
1406 */
1407 if (size > oldrequest)
1408 VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1409 size - oldrequest);
1410 else
1411 VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1412 oldchksize - size);
1413
1414 /* set mark to catch clobber of "unused" space */
1415 if (size < oldchksize)
1416 set_sentinel(pointer, size);
1417#else /* !MEMORY_CONTEXT_CHECKING */
1418
1419 /*
1420 * We don't have the information to determine whether we're growing
1421 * the old request or shrinking it, so we conservatively mark the
1422 * entire new allocation DEFINED.
1423 */
1425 VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1426#endif
1427
1428 /* Disallow access to the chunk header. */
1430
1431 return pointer;
1432 }
1433 else
1434 {
1435 /*
1436 * Enlarge-a-small-chunk case. We just do this by brute force, ie,
1437 * allocate a new chunk and copy the data. Since we know the existing
1438 * data isn't huge, this won't involve any great memcpy expense, so
1439 * it's not worth being smarter. (At one time we tried to avoid
1440 * memcpy when it was possible to enlarge the chunk in-place, but that
1441 * turns out to misbehave unpleasantly for repeated cycles of
1442 * palloc/repalloc/pfree: the eventually freed chunks go into the
1443 * wrong freelist for the next initial palloc request, and so we leak
1444 * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1445 */
1447 Size oldsize;
1448
1449 /* allocate new chunk (this also checks size is valid) */
1450 newPointer = AllocSetAlloc((MemoryContext) set, size, flags);
1451
1452 /* leave immediately if request was not completed */
1453 if (newPointer == NULL)
1454 {
1455 /* Disallow access to the chunk header. */
1457 return MemoryContextAllocationFailure((MemoryContext) set, size, flags);
1458 }
1459
1460 /*
1461 * AllocSetAlloc() may have returned a region that is still NOACCESS.
1462 * Change it to UNDEFINED for the moment; memcpy() will then transfer
1463 * definedness from the old allocation to the new. If we know the old
1464 * allocation, copy just that much. Otherwise, make the entire old
1465 * chunk defined to avoid errors as we copy the currently-NOACCESS
1466 * trailing bytes.
1467 */
1469#ifdef MEMORY_CONTEXT_CHECKING
1470 oldsize = chunk->requested_size;
1471#else
1474#endif
1475
1476 /* transfer existing data (certain to fit) */
1477 memcpy(newPointer, pointer, oldsize);
1478
1479 /* free old chunk */
1480 AllocSetFree(pointer);
1481
1482 return newPointer;
1483 }
1484}
1485
1486/*
1487 * AllocSetGetChunkContext
1488 * Return the MemoryContext that 'pointer' belongs to.
1489 */
1491AllocSetGetChunkContext(void *pointer)
1492{
1494 AllocBlock block;
1495 AllocSet set;
1496
1497 /* Allow access to the chunk header. */
1499
1502 else
1504
1505 /* Disallow access to the chunk header. */
1507
1508 Assert(AllocBlockIsValid(block));
1509 set = block->aset;
1510
1511 return &set->header;
1512}
1513
1514/*
1515 * AllocSetGetChunkSpace
1516 * Given a currently-allocated chunk, determine the total space
1517 * it occupies (including all memory-allocation overhead).
1518 */
1519Size
1520AllocSetGetChunkSpace(void *pointer)
1521{
1523 int fidx;
1524
1525 /* Allow access to the chunk header. */
1527
1529 {
1531
1532 /* Disallow access to the chunk header. */
1534
1535 Assert(AllocBlockIsValid(block));
1536
1537 return block->endptr - (char *) chunk;
1538 }
1539
1542
1543 /* Disallow access to the chunk header. */
1545
1547}
1548
1549/*
1550 * AllocSetIsEmpty
1551 * Is an allocset empty of any allocated space?
1552 */
1553bool
1555{
1556 Assert(AllocSetIsValid(context));
1557
1558 /*
1559 * For now, we say "empty" only if the context is new or just reset. We
1560 * could examine the freelists to determine if all space has been freed,
1561 * but it's not really worth the trouble for present uses of this
1562 * functionality.
1563 */
1564 if (context->isReset)
1565 return true;
1566 return false;
1567}
1568
1569/*
1570 * AllocSetStats
1571 * Compute stats about memory consumption of an allocset.
1572 *
1573 * printfunc: if not NULL, pass a human-readable stats string to this.
1574 * passthru: pass this pointer through to printfunc.
1575 * totals: if not NULL, add stats about this context into *totals.
1576 * print_to_stderr: print stats to stderr if true, elog otherwise.
1577 */
1578void
1582{
1583 AllocSet set = (AllocSet) context;
1584 Size nblocks = 0;
1585 Size freechunks = 0;
1586 Size totalspace;
1587 Size freespace = 0;
1588 AllocBlock block;
1589 int fidx;
1590
1591 Assert(AllocSetIsValid(set));
1592
1593 /* Include context header in totalspace */
1594 totalspace = MAXALIGN(sizeof(AllocSetContext));
1595
1596 for (block = set->blocks; block != NULL; block = block->next)
1597 {
1598 nblocks++;
1599 totalspace += block->endptr - ((char *) block);
1600 freespace += block->endptr - block->freeptr;
1601 }
1602 for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1603 {
1605 MemoryChunk *chunk = set->freelist[fidx];
1606
1607 while (chunk != NULL)
1608 {
1610
1611 /* Allow access to the chunk header. */
1615
1616 freechunks++;
1617 freespace += chksz + ALLOC_CHUNKHDRSZ;
1618
1620 chunk = link->next;
1622 }
1623 }
1624
1625 if (printfunc)
1626 {
1627 char stats_string[200];
1628
1630 "%zu total in %zu blocks; %zu free (%zu chunks); %zu used",
1631 totalspace, nblocks, freespace, freechunks,
1632 totalspace - freespace);
1634 }
1635
1636 if (totals)
1637 {
1638 totals->nblocks += nblocks;
1639 totals->freechunks += freechunks;
1640 totals->totalspace += totalspace;
1641 totals->freespace += freespace;
1642 }
1643}
1644
1645
1646#ifdef MEMORY_CONTEXT_CHECKING
1647
1648/*
1649 * AllocSetCheck
1650 * Walk through chunks and check consistency of memory.
1651 *
1652 * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
1653 * find yourself in an infinite loop when trouble occurs, because this
1654 * routine will be entered again when elog cleanup tries to release memory!
1655 */
1656void
1658{
1659 AllocSet set = (AllocSet) context;
1660 const char *name = set->header.name;
1662 AllocBlock block;
1664
1665 for (prevblock = NULL, block = set->blocks;
1666 block != NULL;
1667 prevblock = block, block = block->next)
1668 {
1669 char *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
1670 Size blk_used = block->freeptr - bpoz;
1671 Size blk_data = 0;
1672 Size nchunks = 0;
1673 bool has_external_chunk = false;
1674
1675 if (IsKeeperBlock(set, block))
1676 total_allocated += block->endptr - ((char *) set);
1677 else
1678 total_allocated += block->endptr - ((char *) block);
1679
1680 /*
1681 * Empty block - empty can be keeper-block only
1682 */
1683 if (!blk_used)
1684 {
1685 if (!IsKeeperBlock(set, block))
1686 elog(WARNING, "problem in alloc set %s: empty block %p",
1687 name, block);
1688 }
1689
1690 /*
1691 * Check block header fields
1692 */
1693 if (block->aset != set ||
1694 block->prev != prevblock ||
1695 block->freeptr < bpoz ||
1696 block->freeptr > block->endptr)
1697 elog(WARNING, "problem in alloc set %s: corrupt header in block %p",
1698 name, block);
1699
1700 /*
1701 * Chunk walker
1702 */
1703 while (bpoz < block->freeptr)
1704 {
1706 Size chsize,
1707 dsize;
1708
1709 /* Allow access to the chunk header. */
1711
1713 {
1714 chsize = block->endptr - (char *) MemoryChunkGetPointer(chunk); /* aligned chunk size */
1715 has_external_chunk = true;
1716
1717 /* make sure this chunk consumes the entire block */
1719 elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p",
1720 name, chunk, block);
1721 }
1722 else
1723 {
1725
1727 elog(WARNING, "problem in alloc set %s: bad chunk size for chunk %p in block %p",
1728 name, chunk, block);
1729
1730 chsize = GetChunkSizeFromFreeListIdx(fidx); /* aligned chunk size */
1731
1732 /*
1733 * Check the stored block offset correctly references this
1734 * block.
1735 */
1736 if (block != MemoryChunkGetBlock(chunk))
1737 elog(WARNING, "problem in alloc set %s: bad block offset for chunk %p in block %p",
1738 name, chunk, block);
1739 }
1740 dsize = chunk->requested_size; /* real data */
1741
1742 /* an allocated chunk's requested size must be <= the chsize */
1743 if (dsize != InvalidAllocSize && dsize > chsize)
1744 elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
1745 name, chunk, block);
1746
1747 /* chsize must not be smaller than the first freelist's size */
1748 if (chsize < (1 << ALLOC_MINBITS))
1749 elog(WARNING, "problem in alloc set %s: bad size %zu for chunk %p in block %p",
1750 name, chsize, chunk, block);
1751
1752 /*
1753 * Check for overwrite of padding space in an allocated chunk.
1754 */
1755 if (dsize != InvalidAllocSize && dsize < chsize &&
1757 elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
1758 name, block, chunk);
1759
1760 /* if chunk is allocated, disallow access to the chunk header */
1761 if (dsize != InvalidAllocSize)
1763
1764 blk_data += chsize;
1765 nchunks++;
1766
1768 }
1769
1770 if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used)
1771 elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p",
1772 name, block);
1773
1774 if (has_external_chunk && nchunks > 1)
1775 elog(WARNING, "problem in alloc set %s: external chunk on non-dedicated block %p",
1776 name, block);
1777 }
1778
1780}
1781
1782#endif /* MEMORY_CONTEXT_CHECKING */
Datum idx(PG_FUNCTION_ARGS)
Definition _int_op.c:262
void AllocSetReset(MemoryContext context)
Definition aset.c:548
static pg_noinline void * AllocSetAllocFromNewBlock(MemoryContext context, Size size, int flags, int fidx)
Definition aset.c:863
#define AllocSetIsValid(set)
Definition aset.c:196
#define AllocBlockIsValid(block)
Definition aset.c:203
void * AllocSetRealloc(void *pointer, Size size, int flags)
Definition aset.c:1220
#define IsKeeperBlock(set, block)
Definition aset.c:244
#define GetFreeListLink(chkptr)
Definition aset.c:134
#define FreeListIdxIsValid(fidx)
Definition aset.c:138
Size AllocSetGetChunkSpace(void *pointer)
Definition aset.c:1521
#define ALLOC_CHUNKHDRSZ
Definition aset.c:105
MemoryContext AllocSetGetChunkContext(void *pointer)
Definition aset.c:1492
void AllocSetStats(MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
Definition aset.c:1580
#define KeeperBlock(set)
Definition aset.c:240
#define GetChunkSizeFromFreeListIdx(fidx)
Definition aset.c:142
#define ALLOC_MINBITS
Definition aset.c:83
struct AllocBlockData * AllocBlock
Definition aset.c:109
#define MAX_FREE_CONTEXTS
Definition aset.c:237
static int AllocSetFreeIndex(Size size)
Definition aset.c:273
bool AllocSetIsEmpty(MemoryContext context)
Definition aset.c:1555
#define ALLOC_BLOCKHDRSZ
Definition aset.c:104
void * AllocSetAlloc(MemoryContext context, Size size, int flags)
Definition aset.c:1014
void * AllocPointer
Definition aset.c:115
#define ALLOCSET_NUM_FREELISTS
Definition aset.c:84
#define ALLOC_CHUNK_FRACTION
Definition aset.c:87
void AllocSetFree(void *pointer)
Definition aset.c:1109
#define FIRST_BLOCKHDRSZ
Definition aset.c:106
void AllocSetDelete(MemoryContext context)
Definition aset.c:634
static void * AllocSetAllocChunkFromBlock(MemoryContext context, AllocBlock block, Size size, Size chunk_size, int fidx)
Definition aset.c:818
#define ALLOC_CHUNK_LIMIT
Definition aset.c:85
static AllocSetFreeList context_freelists[2]
Definition aset.c:253
static pg_noinline void * AllocSetAllocLarge(MemoryContext context, Size size, int flags)
Definition aset.c:737
#define ExternalChunkGetBlock(chunk)
Definition aset.c:211
MemoryContext AllocSetContextCreateInternal(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition aset.c:343
AllocSetContext * AllocSet
Definition aset.c:169
static int32 next
Definition blutils.c:225
#define pg_noinline
Definition c.h:295
#define Min(x, y)
Definition c.h:997
#define MAXALIGN(LEN)
Definition c.h:826
#define PG_USED_FOR_ASSERTS_ONLY
Definition c.h:223
#define Max(x, y)
Definition c.h:991
#define Assert(condition)
Definition c.h:873
#define MemSetAligned(start, val, len)
Definition c.h:1043
#define unlikely(x)
Definition c.h:412
uint32_t uint32
Definition c.h:546
#define StaticAssertDecl(condition, errmessage)
Definition c.h:942
#define StaticAssertStmt(condition, errmessage)
Definition c.h:944
size_t Size
Definition c.h:619
int errdetail(const char *fmt,...)
Definition elog.c:1216
int errcode(int sqlerrcode)
Definition elog.c:863
int errmsg(const char *fmt,...)
Definition elog.c:1080
#define WARNING
Definition elog.h:36
#define ERROR
Definition elog.h:39
#define elog(elevel,...)
Definition elog.h:226
#define ereport(elevel,...)
Definition elog.h:150
void MemoryContextCreate(MemoryContext node, NodeTag tag, MemoryContextMethodID method_id, MemoryContext parent, const char *name)
Definition mcxt.c:1149
MemoryContext TopMemoryContext
Definition mcxt.c:166
void MemoryContextStats(MemoryContext context)
Definition mcxt.c:863
void * MemoryContextAllocationFailure(MemoryContext context, Size size, int flags)
Definition mcxt.c:1198
void MemoryContextResetOnly(MemoryContext context)
Definition mcxt.c:422
#define VALGRIND_DESTROY_MEMPOOL(context)
Definition memdebug.h:25
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition memdebug.h:26
#define VALGRIND_MEMPOOL_CHANGE(context, optr, nptr, size)
Definition memdebug.h:31
#define VALGRIND_CREATE_MEMPOOL(context, redzones, zeroed)
Definition memdebug.h:24
#define VALGRIND_MEMPOOL_ALLOC(context, addr, size)
Definition memdebug.h:29
#define VALGRIND_MEMPOOL_TRIM(context, addr, size)
Definition memdebug.h:32
#define VALGRIND_MEMPOOL_FREE(context, addr)
Definition memdebug.h:30
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition memdebug.h:27
#define VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
Definition memdebug.h:28
void(* MemoryStatsPrintFunc)(MemoryContext context, void *passthru, const char *stats_string, bool print_to_stderr)
Definition memnodes.h:54
#define ALLOCSET_SMALL_MINSIZE
Definition memutils.h:167
#define ALLOCSET_DEFAULT_MINSIZE
Definition memutils.h:157
#define AllocHugeSizeIsValid(size)
Definition memutils.h:49
#define InvalidAllocSize
Definition memutils.h:47
#define ALLOCSET_SEPARATE_THRESHOLD
Definition memutils.h:187
#define ALLOCSET_SMALL_INITSIZE
Definition memutils.h:168
#define ALLOCSET_DEFAULT_INITSIZE
Definition memutils.h:158
static void MemoryContextCheckSize(MemoryContext context, Size size, int flags)
@ MCTX_ASET_ID
#define MEMORYCHUNK_MAX_BLOCKOFFSET
static Size MemoryChunkGetValue(MemoryChunk *chunk)
#define MemoryChunkGetPointer(c)
static bool MemoryChunkIsExternal(MemoryChunk *chunk)
static void * MemoryChunkGetBlock(MemoryChunk *chunk)
static void MemoryChunkSetHdrMaskExternal(MemoryChunk *chunk, MemoryContextMethodID methodid)
#define PointerGetMemoryChunk(p)
static void MemoryChunkSetHdrMask(MemoryChunk *chunk, void *block, Size value, MemoryContextMethodID methodid)
struct MemoryContextData * MemoryContext
Definition palloc.h:36
static int pg_leftmost_one_pos32(uint32 word)
Definition pg_bitutils.h:41
PGDLLIMPORT const uint8 pg_leftmost_one_pos[256]
Definition pg_bitutils.c:27
#define snprintf
Definition port.h:260
static int fb(int x)
#define realloc(a, b)
#define free(a)
#define malloc(a)
AllocBlock prev
Definition aset.c:186
AllocSet aset
Definition aset.c:185
char * freeptr
Definition aset.c:188
AllocBlock next
Definition aset.c:187
char * endptr
Definition aset.c:189
MemoryContextData header
Definition aset.c:156
AllocSetContext * first_free
Definition aset.c:249
MemoryContext nextchild
Definition memnodes.h:130
const char * name
Definition memnodes.h:131
const char * name

◆ ALLOCSET_NUM_FREELISTS

#define ALLOCSET_NUM_FREELISTS   11

Definition at line 84 of file aset.c.

◆ AllocSetIsValid

#define AllocSetIsValid (   set)     ((set) && IsA(set, AllocSetContext))

Definition at line 196 of file aset.c.

◆ ExternalChunkGetBlock

#define ExternalChunkGetBlock (   chunk)     (AllocBlock) ((char *) chunk - ALLOC_BLOCKHDRSZ)

Definition at line 211 of file aset.c.

◆ FIRST_BLOCKHDRSZ

#define FIRST_BLOCKHDRSZ
Value:

Definition at line 106 of file aset.c.

◆ FreeListIdxIsValid

#define FreeListIdxIsValid (   fidx)     ((fidx) >= 0 && (fidx) < ALLOCSET_NUM_FREELISTS)

Definition at line 138 of file aset.c.

◆ GetChunkSizeFromFreeListIdx

#define GetChunkSizeFromFreeListIdx (   fidx)     ((((Size) 1) << ALLOC_MINBITS) << (fidx))

Definition at line 142 of file aset.c.

◆ GetFreeListLink

#define GetFreeListLink (   chkptr)     (AllocFreeListLink *) ((char *) (chkptr) + ALLOC_CHUNKHDRSZ)

Definition at line 134 of file aset.c.

◆ IsKeeperBlock

#define IsKeeperBlock (   set,
  block 
)    ((block) == (KeeperBlock(set)))

Definition at line 244 of file aset.c.

◆ KeeperBlock

#define KeeperBlock (   set)     ((AllocBlock) (((char *) set) + MAXALIGN(sizeof(AllocSetContext))))

Definition at line 240 of file aset.c.

◆ MAX_FREE_CONTEXTS

#define MAX_FREE_CONTEXTS   100 /* arbitrary limit on freelist length */

Definition at line 237 of file aset.c.

Typedef Documentation

◆ AllocBlock

Definition at line 109 of file aset.c.

◆ AllocBlockData

◆ AllocFreeListLink

◆ AllocPointer

Definition at line 115 of file aset.c.

◆ AllocSet

Definition at line 169 of file aset.c.

◆ AllocSetContext

◆ AllocSetFreeList

Function Documentation

◆ AllocSetAlloc()

void * AllocSetAlloc ( MemoryContext  context,
Size  size,
int  flags 
)

Definition at line 1014 of file aset.c.

1015{
1016 AllocSet set = (AllocSet) context;
1017 AllocBlock block;
1019 int fidx;
1020 Size chunk_size;
1022
1023 Assert(AllocSetIsValid(set));
1024
1025 /* due to the keeper block set->blocks should never be NULL */
1026 Assert(set->blocks != NULL);
1027
1028 /*
1029 * If requested size exceeds maximum for chunks we hand the request off to
1030 * AllocSetAllocLarge().
1031 */
1032 if (size > set->allocChunkLimit)
1033 return AllocSetAllocLarge(context, size, flags);
1034
1035 /*
1036 * Request is small enough to be treated as a chunk. Look in the
1037 * corresponding free list to see if there is a free chunk we could reuse.
1038 * If one is found, remove it from the free list, make it again a member
1039 * of the alloc set and return its data address.
1040 *
1041 * Note that we don't attempt to ensure there's space for the sentinel
1042 * byte here. We expect a large proportion of allocations to be for sizes
1043 * which are already a power of 2. If we were to always make space for a
1044 * sentinel byte in MEMORY_CONTEXT_CHECKING builds, then we'd end up
1045 * doubling the memory requirements for such allocations.
1046 */
1047 fidx = AllocSetFreeIndex(size);
1048 chunk = set->freelist[fidx];
1049 if (chunk != NULL)
1050 {
1052
1053 /* Allow access to the chunk header. */
1055
1057
1058 /* pop this chunk off the freelist */
1060 set->freelist[fidx] = link->next;
1062
1063#ifdef MEMORY_CONTEXT_CHECKING
1064 chunk->requested_size = size;
1065 /* set mark to catch clobber of "unused" space */
1068#endif
1069#ifdef RANDOMIZE_ALLOCATED_MEMORY
1070 /* fill the allocated space with junk */
1071 randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
1072#endif
1073
1074 /* Ensure any padding bytes are marked NOACCESS. */
1077
1078 /* Disallow access to the chunk header. */
1080
1082 }
1083
1084 /*
1085 * Choose the actual chunk size to allocate.
1086 */
1087 chunk_size = GetChunkSizeFromFreeListIdx(fidx);
1088 Assert(chunk_size >= size);
1089
1090 block = set->blocks;
1091 availspace = block->endptr - block->freeptr;
1092
1093 /*
1094 * If there is enough room in the active allocation block, we will put the
1095 * chunk into that block. Else must start a new one.
1096 */
1097 if (unlikely(availspace < (chunk_size + ALLOC_CHUNKHDRSZ)))
1098 return AllocSetAllocFromNewBlock(context, size, flags, fidx);
1099
1100 /* There's enough space on the current block, so allocate from that */
1101 return AllocSetAllocChunkFromBlock(context, block, size, chunk_size, fidx);
1102}
uint32 allocChunkLimit
Definition aset.c:164
AllocBlock blocks
Definition aset.c:158
MemoryChunk * freelist[ALLOCSET_NUM_FREELISTS]
Definition aset.c:159

References ALLOC_CHUNKHDRSZ, AllocSetContext::allocChunkLimit, AllocSetAllocChunkFromBlock(), AllocSetAllocFromNewBlock(), AllocSetAllocLarge(), AllocSetFreeIndex(), AllocSetIsValid, Assert, AllocSetContext::blocks, AllocBlockData::endptr, fb(), AllocSetContext::freelist, AllocBlockData::freeptr, GetChunkSizeFromFreeListIdx, GetFreeListLink, MemoryChunkGetPointer, MemoryChunkGetValue(), unlikely, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

Referenced by AllocSetRealloc().

◆ AllocSetAllocChunkFromBlock()

static void * AllocSetAllocChunkFromBlock ( MemoryContext  context,
AllocBlock  block,
Size  size,
Size  chunk_size,
int  fidx 
)
inlinestatic

Definition at line 818 of file aset.c.

820{
822
823 chunk = (MemoryChunk *) (block->freeptr);
824
825 /* Prepare to initialize the chunk header. */
827
828 block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
829 Assert(block->freeptr <= block->endptr);
830
831 /* store the free list index in the value field */
833
834#ifdef MEMORY_CONTEXT_CHECKING
835 chunk->requested_size = size;
836 /* set mark to catch clobber of "unused" space */
837 if (size < chunk_size)
839#endif
840#ifdef RANDOMIZE_ALLOCATED_MEMORY
841 /* fill the allocated space with junk */
843#endif
844
845 /* Ensure any padding bytes are marked NOACCESS. */
847 chunk_size - size);
848
849 /* Disallow access to the chunk header. */
851
853}

References ALLOC_CHUNKHDRSZ, Assert, AllocBlockData::endptr, fb(), AllocBlockData::freeptr, MCTX_ASET_ID, MemoryChunkGetPointer, MemoryChunkSetHdrMask(), VALGRIND_MAKE_MEM_NOACCESS, and VALGRIND_MAKE_MEM_UNDEFINED.

Referenced by AllocSetAlloc(), and AllocSetAllocFromNewBlock().

◆ AllocSetAllocFromNewBlock()

static pg_noinline void * AllocSetAllocFromNewBlock ( MemoryContext  context,
Size  size,
int  flags,
int  fidx 
)
static

Definition at line 863 of file aset.c.

865{
866 AllocSet set = (AllocSet) context;
867 AllocBlock block;
869 Size blksize;
871 Size chunk_size;
872
873 /* due to the keeper block set->blocks should always be valid */
874 Assert(set->blocks != NULL);
875 block = set->blocks;
876 availspace = block->endptr - block->freeptr;
877
878 /*
879 * The existing active (top) block does not have enough room for the
880 * requested allocation, but it might still have a useful amount of space
881 * in it. Once we push it down in the block list, we'll never try to
882 * allocate more space from it. So, before we do that, carve up its free
883 * space into chunks that we can put on the set's freelists.
884 *
885 * Because we can only get here when there's less than ALLOC_CHUNK_LIMIT
886 * left in the block, this loop cannot iterate more than
887 * ALLOCSET_NUM_FREELISTS-1 times.
888 */
889 while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
890 {
895
896 /*
897 * In most cases, we'll get back the index of the next larger freelist
898 * than the one we need to put this chunk on. The exception is when
899 * availchunk is exactly a power of 2.
900 */
902 {
903 a_fidx--;
904 Assert(a_fidx >= 0);
906 }
907
908 chunk = (MemoryChunk *) (block->freeptr);
909
910 /* Prepare to initialize the chunk header. */
912 block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
914
915 /* store the freelist index in the value field */
917#ifdef MEMORY_CONTEXT_CHECKING
918 chunk->requested_size = InvalidAllocSize; /* mark it free */
919#endif
920 /* push this chunk onto the free list */
922
924 link->next = set->freelist[a_fidx];
926
927 set->freelist[a_fidx] = chunk;
928 }
929
930 /*
931 * The first such block has size initBlockSize, and we double the space in
932 * each succeeding block, but not more than maxBlockSize.
933 */
934 blksize = set->nextBlockSize;
935 set->nextBlockSize <<= 1;
936 if (set->nextBlockSize > set->maxBlockSize)
937 set->nextBlockSize = set->maxBlockSize;
938
939 /* Choose the actual chunk size to allocate */
940 chunk_size = GetChunkSizeFromFreeListIdx(fidx);
941 Assert(chunk_size >= size);
942
943 /*
944 * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
945 * space... but try to keep it a power of 2.
946 */
948 while (blksize < required_size)
949 blksize <<= 1;
950
951 /* Try to allocate it */
952 block = (AllocBlock) malloc(blksize);
953
954 /*
955 * We could be asking for pretty big blocks here, so cope if malloc fails.
956 * But give up if there's less than 1 MB or so available...
957 */
958 while (block == NULL && blksize > 1024 * 1024)
959 {
960 blksize >>= 1;
961 if (blksize < required_size)
962 break;
963 block = (AllocBlock) malloc(blksize);
964 }
965
966 if (block == NULL)
967 return MemoryContextAllocationFailure(context, size, flags);
968
969 /* Make a vchunk covering the new block's header */
971
972 context->mem_allocated += blksize;
973
974 block->aset = set;
975 block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
976 block->endptr = ((char *) block) + blksize;
977
978 /* Mark unallocated space NOACCESS. */
980 blksize - ALLOC_BLOCKHDRSZ);
981
982 block->prev = NULL;
983 block->next = set->blocks;
984 if (block->next)
985 block->next->prev = block;
986 set->blocks = block;
987
988 return AllocSetAllocChunkFromBlock(context, block, size, chunk_size, fidx);
989}
uint32 maxBlockSize
Definition aset.c:162
uint32 nextBlockSize
Definition aset.c:163

References ALLOC_BLOCKHDRSZ, ALLOC_CHUNKHDRSZ, ALLOC_MINBITS, AllocSetAllocChunkFromBlock(), AllocSetFreeIndex(), AllocBlockData::aset, Assert, AllocSetContext::blocks, AllocBlockData::endptr, fb(), AllocSetContext::freelist, AllocBlockData::freeptr, GetChunkSizeFromFreeListIdx, GetFreeListLink, InvalidAllocSize, malloc, AllocSetContext::maxBlockSize, MCTX_ASET_ID, MemoryContextData::mem_allocated, MemoryChunkSetHdrMask(), MemoryContextAllocationFailure(), AllocBlockData::next, AllocSetContext::nextBlockSize, AllocBlockData::prev, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MAKE_MEM_NOACCESS, VALGRIND_MAKE_MEM_UNDEFINED, and VALGRIND_MEMPOOL_ALLOC.

Referenced by AllocSetAlloc().

◆ AllocSetAllocLarge()

static pg_noinline void * AllocSetAllocLarge ( MemoryContext  context,
Size  size,
int  flags 
)
static

Definition at line 737 of file aset.c.

738{
739 AllocSet set = (AllocSet) context;
740 AllocBlock block;
742 Size chunk_size;
743 Size blksize;
744
745 /* validate 'size' is within the limits for the given 'flags' */
746 MemoryContextCheckSize(context, size, flags);
747
748#ifdef MEMORY_CONTEXT_CHECKING
749 /* ensure there's always space for the sentinel byte */
750 chunk_size = MAXALIGN(size + 1);
751#else
752 chunk_size = MAXALIGN(size);
753#endif
754
755 blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
756 block = (AllocBlock) malloc(blksize);
757 if (block == NULL)
758 return MemoryContextAllocationFailure(context, size, flags);
759
760 /* Make a vchunk covering the new block's header */
762
763 context->mem_allocated += blksize;
764
765 block->aset = set;
766 block->freeptr = block->endptr = ((char *) block) + blksize;
767
768 chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
769
770 /* mark the MemoryChunk as externally managed */
772
773#ifdef MEMORY_CONTEXT_CHECKING
774 chunk->requested_size = size;
775 /* set mark to catch clobber of "unused" space */
776 Assert(size < chunk_size);
778#endif
779#ifdef RANDOMIZE_ALLOCATED_MEMORY
780 /* fill the allocated space with junk */
782#endif
783
784 /*
785 * Stick the new block underneath the active allocation block, if any, so
786 * that we don't lose the use of the space remaining therein.
787 */
788 if (set->blocks != NULL)
789 {
790 block->prev = set->blocks;
791 block->next = set->blocks->next;
792 if (block->next)
793 block->next->prev = block;
794 set->blocks->next = block;
795 }
796 else
797 {
798 block->prev = NULL;
799 block->next = NULL;
800 set->blocks = block;
801 }
802
803 /* Ensure any padding bytes are marked NOACCESS. */
805 chunk_size - size);
806
807 /* Disallow access to the chunk header. */
809
811}

References ALLOC_BLOCKHDRSZ, ALLOC_CHUNKHDRSZ, AllocBlockData::aset, Assert, AllocSetContext::blocks, AllocBlockData::endptr, fb(), AllocBlockData::freeptr, malloc, MAXALIGN, MCTX_ASET_ID, MemoryContextData::mem_allocated, MemoryChunkGetPointer, MemoryChunkSetHdrMaskExternal(), MemoryContextAllocationFailure(), MemoryContextCheckSize(), AllocBlockData::next, AllocBlockData::prev, VALGRIND_MAKE_MEM_NOACCESS, and VALGRIND_MEMPOOL_ALLOC.

Referenced by AllocSetAlloc().

◆ AllocSetContextCreateInternal()

MemoryContext AllocSetContextCreateInternal ( MemoryContext  parent,
const char name,
Size  minContextSize,
Size  initBlockSize,
Size  maxBlockSize 
)

Definition at line 343 of file aset.c.

348{
349 int freeListIndex;
351 AllocSet set;
352 AllocBlock block;
353
354 /* ensure MemoryChunk's size is properly maxaligned */
356 "sizeof(MemoryChunk) is not maxaligned");
357 /* check we have enough space to store the freelist link */
359 "sizeof(AllocFreeListLink) larger than minimum allocation size");
360
361 /*
362 * First, validate allocation parameters. Once these were regular runtime
363 * tests and elog's, but in practice Asserts seem sufficient because
364 * nobody varies their parameters at runtime. We somewhat arbitrarily
365 * enforce a minimum 1K block size. We restrict the maximum block size to
366 * MEMORYCHUNK_MAX_BLOCKOFFSET as MemoryChunks are limited to this in
367 * regards to addressing the offset between the chunk and the block that
368 * the chunk is stored on. We would be unable to store the offset between
369 * the chunk and block for any chunks that were beyond
370 * MEMORYCHUNK_MAX_BLOCKOFFSET bytes into the block if the block was to be
371 * larger than this.
372 */
373 Assert(initBlockSize == MAXALIGN(initBlockSize) &&
374 initBlockSize >= 1024);
375 Assert(maxBlockSize == MAXALIGN(maxBlockSize) &&
376 maxBlockSize >= initBlockSize &&
377 AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
378 Assert(minContextSize == 0 ||
380 minContextSize >= 1024 &&
381 minContextSize <= maxBlockSize));
382 Assert(maxBlockSize <= MEMORYCHUNK_MAX_BLOCKOFFSET);
383
384 /*
385 * Check whether the parameters match either available freelist. We do
386 * not need to demand a match of maxBlockSize.
387 */
389 initBlockSize == ALLOCSET_DEFAULT_INITSIZE)
390 freeListIndex = 0;
392 initBlockSize == ALLOCSET_SMALL_INITSIZE)
393 freeListIndex = 1;
394 else
395 freeListIndex = -1;
396
397 /*
398 * If a suitable freelist entry exists, just recycle that context.
399 */
400 if (freeListIndex >= 0)
401 {
402 AllocSetFreeList *freelist = &context_freelists[freeListIndex];
403
404 if (freelist->first_free != NULL)
405 {
406 /* Remove entry from freelist */
407 set = freelist->first_free;
408 freelist->first_free = (AllocSet) set->header.nextchild;
409 freelist->num_free--;
410
411 /* Update its maxBlockSize; everything else should be OK */
412 set->maxBlockSize = maxBlockSize;
413
414 /* Reinitialize its header, installing correct name and parent */
418 parent,
419 name);
420
421 ((MemoryContext) set)->mem_allocated =
422 KeeperBlock(set)->endptr - ((char *) set);
423
424 return (MemoryContext) set;
425 }
426 }
427
428 /* Determine size of initial block */
431 if (minContextSize != 0)
433 else
434 firstBlockSize = Max(firstBlockSize, initBlockSize);
435
436 /*
437 * Allocate the initial block. Unlike other aset.c blocks, it starts with
438 * the context header and its block header follows that.
439 */
441 if (set == NULL)
442 {
447 errmsg("out of memory"),
448 errdetail("Failed while creating memory context \"%s\".",
449 name)));
450 }
451
452 /*
453 * Avoid writing code that can fail between here and MemoryContextCreate;
454 * we'd leak the header/initial block if we ereport in this stretch.
455 */
456
457 /* Create a vpool associated with the context */
458 VALGRIND_CREATE_MEMPOOL(set, 0, false);
459
460 /*
461 * Create a vchunk covering both the AllocSetContext struct and the keeper
462 * block's header. (Perhaps it would be more sensible for these to be two
463 * separate vchunks, but doing that seems to tickle bugs in some versions
464 * of Valgrind.) We must have these vchunks, and also a vchunk for each
465 * subsequently-added block header, so that Valgrind considers the
466 * pointers within them while checking for leaked memory. Note that
467 * Valgrind doesn't distinguish between these vchunks and those created by
468 * mcxt.c for the user-accessible-data chunks we allocate.
469 */
471
472 /* Fill in the initial block's block header */
473 block = KeeperBlock(set);
474 block->aset = set;
475 block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
476 block->endptr = ((char *) set) + firstBlockSize;
477 block->prev = NULL;
478 block->next = NULL;
479
480 /* Mark unallocated space NOACCESS; leave the block header alone. */
481 VALGRIND_MAKE_MEM_NOACCESS(block->freeptr, block->endptr - block->freeptr);
482
483 /* Remember block as part of block list */
484 set->blocks = block;
485
486 /* Finish filling in aset-specific parts of the context header */
487 MemSetAligned(set->freelist, 0, sizeof(set->freelist));
488
489 set->initBlockSize = (uint32) initBlockSize;
490 set->maxBlockSize = (uint32) maxBlockSize;
491 set->nextBlockSize = (uint32) initBlockSize;
492 set->freeListIndex = freeListIndex;
493
494 /*
495 * Compute the allocation chunk size limit for this context. It can't be
496 * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
497 * If maxBlockSize is small then requests exceeding the maxBlockSize, or
498 * even a significant fraction of it, should be treated as large chunks
499 * too. For the typical case of maxBlockSize a power of 2, the chunk size
500 * limit will be at most 1/8th maxBlockSize, so that given a stream of
501 * requests that are all the maximum chunk size we will waste at most
502 * 1/8th of the allocated space.
503 *
504 * Also, allocChunkLimit must not exceed ALLOCSET_SEPARATE_THRESHOLD.
505 */
507 "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD");
508
509 /*
510 * Determine the maximum size that a chunk can be before we allocate an
511 * entire AllocBlock dedicated for that chunk. We set the absolute limit
512 * of that size as ALLOC_CHUNK_LIMIT but we reduce it further so that we
513 * can fit about ALLOC_CHUNK_FRACTION chunks this size on a maximally
514 * sized block. (We opt to keep allocChunkLimit a power-of-2 value
515 * primarily for legacy reasons rather than calculating it so that exactly
516 * ALLOC_CHUNK_FRACTION chunks fit on a maximally sized block.)
517 */
519 while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
520 (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
521 set->allocChunkLimit >>= 1;
522
523 /* Finally, do the type-independent part of context creation */
527 parent,
528 name);
529
530 ((MemoryContext) set)->mem_allocated = firstBlockSize;
531
532 return (MemoryContext) set;
533}
uint32 initBlockSize
Definition aset.c:161
int freeListIndex
Definition aset.c:166

References ALLOC_BLOCKHDRSZ, ALLOC_CHUNK_FRACTION, ALLOC_CHUNK_LIMIT, ALLOC_CHUNKHDRSZ, ALLOC_MINBITS, AllocSetContext::allocChunkLimit, AllocHugeSizeIsValid, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_SEPARATE_THRESHOLD, ALLOCSET_SMALL_INITSIZE, ALLOCSET_SMALL_MINSIZE, AllocBlockData::aset, Assert, AllocSetContext::blocks, context_freelists, AllocBlockData::endptr, ereport, errcode(), errdetail(), errmsg(), ERROR, fb(), FIRST_BLOCKHDRSZ, AllocSetFreeList::first_free, AllocSetContext::freelist, AllocSetContext::freeListIndex, AllocBlockData::freeptr, AllocSetContext::header, AllocSetContext::initBlockSize, KeeperBlock, malloc, Max, MAXALIGN, AllocSetContext::maxBlockSize, MCTX_ASET_ID, MEMORYCHUNK_MAX_BLOCKOFFSET, MemoryContextCreate(), MemoryContextStats(), MemSetAligned, name, AllocBlockData::next, AllocSetContext::nextBlockSize, MemoryContextData::nextchild, AllocSetFreeList::num_free, AllocBlockData::prev, StaticAssertDecl, StaticAssertStmt, TopMemoryContext, VALGRIND_CREATE_MEMPOOL, VALGRIND_MAKE_MEM_NOACCESS, and VALGRIND_MEMPOOL_ALLOC.

◆ AllocSetDelete()

void AllocSetDelete ( MemoryContext  context)

Definition at line 634 of file aset.c.

635{
636 AllocSet set = (AllocSet) context;
637 AllocBlock block = set->blocks;
639
641
642#ifdef MEMORY_CONTEXT_CHECKING
643 /* Check for corruption and leaks before freeing */
644 AllocSetCheck(context);
645#endif
646
647 /* Remember keeper block size for Assert below */
648 keepersize = KeeperBlock(set)->endptr - ((char *) set);
649
650 /*
651 * If the context is a candidate for a freelist, put it into that freelist
652 * instead of destroying it.
653 */
654 if (set->freeListIndex >= 0)
655 {
657
658 /*
659 * Reset the context, if it needs it, so that we aren't hanging on to
660 * more than the initial malloc chunk.
661 */
662 if (!context->isReset)
663 MemoryContextResetOnly(context);
664
665 /*
666 * If the freelist is full, just discard what's already in it. See
667 * comments with context_freelists[].
668 */
669 if (freelist->num_free >= MAX_FREE_CONTEXTS)
670 {
671 while (freelist->first_free != NULL)
672 {
673 AllocSetContext *oldset = freelist->first_free;
674
676 freelist->num_free--;
677
678 /* Destroy the context's vpool --- see notes below */
680
681 /* All that remains is to free the header/initial block */
682 free(oldset);
683 }
684 Assert(freelist->num_free == 0);
685 }
686
687 /* Now add the just-deleted context to the freelist. */
688 set->header.nextchild = (MemoryContext) freelist->first_free;
689 freelist->first_free = set;
690 freelist->num_free++;
691
692 return;
693 }
694
695 /* Free all blocks, except the keeper which is part of context header */
696 while (block != NULL)
697 {
698 AllocBlock next = block->next;
699
700 if (!IsKeeperBlock(set, block))
701 context->mem_allocated -= block->endptr - ((char *) block);
702
703#ifdef CLOBBER_FREED_MEMORY
704 wipe_mem(block, block->freeptr - ((char *) block));
705#endif
706
707 if (!IsKeeperBlock(set, block))
708 {
709 /* As in AllocSetReset, free block-header vchunks explicitly */
710 VALGRIND_MEMPOOL_FREE(set, block);
711 free(block);
712 }
713
714 block = next;
715 }
716
717 Assert(context->mem_allocated == keepersize);
718
719 /*
720 * Destroy the vpool. We don't seem to need to explicitly free the
721 * initial block's header vchunk, nor any user-data vchunks that Valgrind
722 * still knows about; they'll all go away automatically.
723 */
725
726 /* Finally, free the context header, including the keeper block */
727 free(set);
728}

References AllocSetIsValid, Assert, AllocSetContext::blocks, context_freelists, AllocBlockData::endptr, fb(), AllocSetFreeList::first_free, free, AllocSetContext::freeListIndex, AllocBlockData::freeptr, AllocSetContext::header, IsKeeperBlock, MemoryContextData::isReset, KeeperBlock, MAX_FREE_CONTEXTS, MemoryContextData::mem_allocated, MemoryContextResetOnly(), next, AllocBlockData::next, MemoryContextData::nextchild, AllocSetFreeList::num_free, PG_USED_FOR_ASSERTS_ONLY, VALGRIND_DESTROY_MEMPOOL, and VALGRIND_MEMPOOL_FREE.

◆ AllocSetFree()

void AllocSetFree ( void pointer)

Definition at line 1109 of file aset.c.

1110{
1111 AllocSet set;
1113
1114 /* Allow access to the chunk header. */
1116
1118 {
1119 /* Release single-chunk block. */
1121
1122 /*
1123 * Try to verify that we have a sane block pointer: the block header
1124 * should reference an aset and the freeptr should match the endptr.
1125 */
1126 if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
1127 elog(ERROR, "could not find block containing chunk %p", chunk);
1128
1129 set = block->aset;
1130
1131#ifdef MEMORY_CONTEXT_CHECKING
1132 {
1133 /* Test for someone scribbling on unused space in chunk */
1134 Assert(chunk->requested_size < (block->endptr - (char *) pointer));
1135 if (!sentinel_ok(pointer, chunk->requested_size))
1136 elog(WARNING, "detected write past chunk end in %s %p",
1137 set->header.name, chunk);
1138 }
1139#endif
1140
1141 /* OK, remove block from aset's list and free it */
1142 if (block->prev)
1143 block->prev->next = block->next;
1144 else
1145 set->blocks = block->next;
1146 if (block->next)
1147 block->next->prev = block->prev;
1148
1149 set->header.mem_allocated -= block->endptr - ((char *) block);
1150
1151#ifdef CLOBBER_FREED_MEMORY
1152 wipe_mem(block, block->freeptr - ((char *) block));
1153#endif
1154
1155 /* As in AllocSetReset, free block-header vchunks explicitly */
1156 VALGRIND_MEMPOOL_FREE(set, block);
1157
1158 free(block);
1159 }
1160 else
1161 {
1163 int fidx;
1165
1166 /*
1167 * In this path, for speed reasons we just Assert that the referenced
1168 * block is good. We can also Assert that the value field is sane.
1169 * Future field experience may show that these Asserts had better
1170 * become regular runtime test-and-elog checks.
1171 */
1172 Assert(AllocBlockIsValid(block));
1173 set = block->aset;
1174
1178
1179#ifdef MEMORY_CONTEXT_CHECKING
1180 /* Test for someone scribbling on unused space in chunk */
1181 if (chunk->requested_size < GetChunkSizeFromFreeListIdx(fidx))
1182 if (!sentinel_ok(pointer, chunk->requested_size))
1183 elog(WARNING, "detected write past chunk end in %s %p",
1184 set->header.name, chunk);
1185#endif
1186
1187#ifdef CLOBBER_FREED_MEMORY
1189#endif
1190 /* push this chunk onto the top of the free list */
1192 link->next = set->freelist[fidx];
1194 set->freelist[fidx] = chunk;
1195
1196#ifdef MEMORY_CONTEXT_CHECKING
1197
1198 /*
1199 * Reset requested_size to InvalidAllocSize in chunks that are on free
1200 * list.
1201 */
1202 chunk->requested_size = InvalidAllocSize;
1203#endif
1204 }
1205}

References ALLOC_CHUNKHDRSZ, AllocBlockIsValid, AllocBlockData::aset, Assert, AllocSetContext::blocks, elog, AllocBlockData::endptr, ERROR, ExternalChunkGetBlock, fb(), free, AllocSetContext::freelist, FreeListIdxIsValid, AllocBlockData::freeptr, GetChunkSizeFromFreeListIdx, GetFreeListLink, AllocSetContext::header, InvalidAllocSize, MemoryContextData::mem_allocated, MemoryChunkGetBlock(), MemoryChunkGetValue(), MemoryChunkIsExternal(), MemoryContextData::name, AllocBlockData::next, PointerGetMemoryChunk, AllocBlockData::prev, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MAKE_MEM_NOACCESS, VALGRIND_MEMPOOL_FREE, and WARNING.

Referenced by AllocSetRealloc().

◆ AllocSetFreeIndex()

static int AllocSetFreeIndex ( Size  size)
inlinestatic

Definition at line 273 of file aset.c.

274{
275 int idx;
276
277 if (size > (1 << ALLOC_MINBITS))
278 {
279 /*----------
280 * At this point we must compute ceil(log2(size >> ALLOC_MINBITS)).
281 * This is the same as
282 * pg_leftmost_one_pos32((size - 1) >> ALLOC_MINBITS) + 1
283 * or equivalently
284 * pg_leftmost_one_pos32(size - 1) - ALLOC_MINBITS + 1
285 *
286 * However, for platforms without intrinsic support, we duplicate the
287 * logic here, allowing an additional optimization. It's reasonable
288 * to assume that ALLOC_CHUNK_LIMIT fits in 16 bits, so we can unroll
289 * the byte-at-a-time loop in pg_leftmost_one_pos32 and just handle
290 * the last two bytes.
291 *
292 * Yes, this function is enough of a hot-spot to make it worth this
293 * much trouble.
294 *----------
295 */
296#ifdef HAVE_BITSCAN_REVERSE
297 idx = pg_leftmost_one_pos32((uint32) size - 1) - ALLOC_MINBITS + 1;
298#else
299 uint32 t,
300 tsize;
301
302 /* Statically assert that we only have a 16-bit input value. */
304 "ALLOC_CHUNK_LIMIT must be less than 64kB");
305
306 tsize = size - 1;
307 t = tsize >> 8;
309 idx -= ALLOC_MINBITS - 1;
310#endif
311
313 }
314 else
315 idx = 0;
316
317 return idx;
318}

References ALLOC_CHUNK_LIMIT, ALLOC_MINBITS, ALLOCSET_NUM_FREELISTS, Assert, fb(), idx(), pg_leftmost_one_pos, pg_leftmost_one_pos32(), and StaticAssertDecl.

Referenced by AllocSetAlloc(), and AllocSetAllocFromNewBlock().

◆ AllocSetGetChunkContext()

MemoryContext AllocSetGetChunkContext ( void pointer)

Definition at line 1492 of file aset.c.

1493{
1495 AllocBlock block;
1496 AllocSet set;
1497
1498 /* Allow access to the chunk header. */
1500
1503 else
1505
1506 /* Disallow access to the chunk header. */
1508
1509 Assert(AllocBlockIsValid(block));
1510 set = block->aset;
1511
1512 return &set->header;
1513}

References ALLOC_CHUNKHDRSZ, AllocBlockIsValid, AllocBlockData::aset, Assert, ExternalChunkGetBlock, fb(), AllocSetContext::header, MemoryChunkGetBlock(), MemoryChunkIsExternal(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ AllocSetGetChunkSpace()

Size AllocSetGetChunkSpace ( void pointer)

Definition at line 1521 of file aset.c.

1522{
1524 int fidx;
1525
1526 /* Allow access to the chunk header. */
1528
1530 {
1532
1533 /* Disallow access to the chunk header. */
1535
1536 Assert(AllocBlockIsValid(block));
1537
1538 return block->endptr - (char *) chunk;
1539 }
1540
1543
1544 /* Disallow access to the chunk header. */
1546
1548}

References ALLOC_CHUNKHDRSZ, AllocBlockIsValid, Assert, AllocBlockData::endptr, ExternalChunkGetBlock, fb(), FreeListIdxIsValid, GetChunkSizeFromFreeListIdx, MemoryChunkGetValue(), MemoryChunkIsExternal(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ AllocSetIsEmpty()

bool AllocSetIsEmpty ( MemoryContext  context)

Definition at line 1555 of file aset.c.

1556{
1557 Assert(AllocSetIsValid(context));
1558
1559 /*
1560 * For now, we say "empty" only if the context is new or just reset. We
1561 * could examine the freelists to determine if all space has been freed,
1562 * but it's not really worth the trouble for present uses of this
1563 * functionality.
1564 */
1565 if (context->isReset)
1566 return true;
1567 return false;
1568}

References AllocSetIsValid, Assert, and MemoryContextData::isReset.

◆ AllocSetRealloc()

void * AllocSetRealloc ( void pointer,
Size  size,
int  flags 
)

Definition at line 1220 of file aset.c.

1221{
1222 AllocBlock block;
1223 AllocSet set;
1226 int fidx;
1227
1228 /* Allow access to the chunk header. */
1230
1232 {
1233 /*
1234 * The chunk must have been allocated as a single-chunk block. Use
1235 * realloc() to make the containing block bigger, or smaller, with
1236 * minimum space wastage.
1237 */
1239 Size chksize;
1240 Size blksize;
1242
1244
1245 /*
1246 * Try to verify that we have a sane block pointer: the block header
1247 * should reference an aset and the freeptr should match the endptr.
1248 */
1249 if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
1250 elog(ERROR, "could not find block containing chunk %p", chunk);
1251
1252 set = block->aset;
1253
1254 /* only check size in paths where the limits could be hit */
1255 MemoryContextCheckSize((MemoryContext) set, size, flags);
1256
1257 oldchksize = block->endptr - (char *) pointer;
1258
1259#ifdef MEMORY_CONTEXT_CHECKING
1260 /* Test for someone scribbling on unused space in chunk */
1261 Assert(chunk->requested_size < oldchksize);
1262 if (!sentinel_ok(pointer, chunk->requested_size))
1263 elog(WARNING, "detected write past chunk end in %s %p",
1264 set->header.name, chunk);
1265#endif
1266
1267#ifdef MEMORY_CONTEXT_CHECKING
1268 /* ensure there's always space for the sentinel byte */
1269 chksize = MAXALIGN(size + 1);
1270#else
1271 chksize = MAXALIGN(size);
1272#endif
1273
1274 /* Do the realloc */
1276 oldblksize = block->endptr - ((char *) block);
1277
1278 newblock = (AllocBlock) realloc(block, blksize);
1279 if (newblock == NULL)
1280 {
1281 /* Disallow access to the chunk header. */
1283 return MemoryContextAllocationFailure(&set->header, size, flags);
1284 }
1285
1286 /*
1287 * Move the block-header vchunk explicitly. (mcxt.c will take care of
1288 * moving the vchunk for the user data.)
1289 */
1291 block = newblock;
1292
1293 /* updated separately, not to underflow when (oldblksize > blksize) */
1295 set->header.mem_allocated += blksize;
1296
1297 block->freeptr = block->endptr = ((char *) block) + blksize;
1298
1299 /* Update pointers since block has likely been moved */
1300 chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
1301 pointer = MemoryChunkGetPointer(chunk);
1302 if (block->prev)
1303 block->prev->next = block;
1304 else
1305 set->blocks = block;
1306 if (block->next)
1307 block->next->prev = block;
1308
1309#ifdef MEMORY_CONTEXT_CHECKING
1310#ifdef RANDOMIZE_ALLOCATED_MEMORY
1311
1312 /*
1313 * We can only randomize the extra space if we know the prior request.
1314 * When using Valgrind, randomize_mem() also marks memory UNDEFINED.
1315 */
1316 if (size > chunk->requested_size)
1317 randomize_mem((char *) pointer + chunk->requested_size,
1318 size - chunk->requested_size);
1319#else
1320
1321 /*
1322 * If this is an increase, realloc() will have marked any
1323 * newly-allocated part (from oldchksize to chksize) UNDEFINED, but we
1324 * also need to adjust trailing bytes from the old allocation (from
1325 * chunk->requested_size to oldchksize) as they are marked NOACCESS.
1326 * Make sure not to mark too many bytes in case chunk->requested_size
1327 * < size < oldchksize.
1328 */
1329#ifdef USE_VALGRIND
1330 if (Min(size, oldchksize) > chunk->requested_size)
1331 VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1332 Min(size, oldchksize) - chunk->requested_size);
1333#endif
1334#endif
1335
1336 chunk->requested_size = size;
1337 /* set mark to catch clobber of "unused" space */
1338 Assert(size < chksize);
1339 set_sentinel(pointer, size);
1340#else /* !MEMORY_CONTEXT_CHECKING */
1341
1342 /*
1343 * We may need to adjust marking of bytes from the old allocation as
1344 * some of them may be marked NOACCESS. We don't know how much of the
1345 * old chunk size was the requested size; it could have been as small
1346 * as one byte. We have to be conservative and just mark the entire
1347 * old portion DEFINED. Make sure not to mark memory beyond the new
1348 * allocation in case it's smaller than the old one.
1349 */
1350 VALGRIND_MAKE_MEM_DEFINED(pointer, Min(size, oldchksize));
1351#endif
1352
1353 /* Ensure any padding bytes are marked NOACCESS. */
1354 VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1355
1356 /* Disallow access to the chunk header. */
1358
1359 return pointer;
1360 }
1361
1362 block = MemoryChunkGetBlock(chunk);
1363
1364 /*
1365 * In this path, for speed reasons we just Assert that the referenced
1366 * block is good. We can also Assert that the value field is sane. Future
1367 * field experience may show that these Asserts had better become regular
1368 * runtime test-and-elog checks.
1369 */
1370 Assert(AllocBlockIsValid(block));
1371 set = block->aset;
1372
1376
1377#ifdef MEMORY_CONTEXT_CHECKING
1378 /* Test for someone scribbling on unused space in chunk */
1379 if (chunk->requested_size < oldchksize)
1380 if (!sentinel_ok(pointer, chunk->requested_size))
1381 elog(WARNING, "detected write past chunk end in %s %p",
1382 set->header.name, chunk);
1383#endif
1384
1385 /*
1386 * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1387 * allocated area already is >= the new size. (In particular, we will
1388 * fall out here if the requested size is a decrease.)
1389 */
1390 if (oldchksize >= size)
1391 {
1392#ifdef MEMORY_CONTEXT_CHECKING
1393 Size oldrequest = chunk->requested_size;
1394
1395#ifdef RANDOMIZE_ALLOCATED_MEMORY
1396 /* We can only fill the extra space if we know the prior request */
1397 if (size > oldrequest)
1398 randomize_mem((char *) pointer + oldrequest,
1399 size - oldrequest);
1400#endif
1401
1402 chunk->requested_size = size;
1403
1404 /*
1405 * If this is an increase, mark any newly-available part UNDEFINED.
1406 * Otherwise, mark the obsolete part NOACCESS.
1407 */
1408 if (size > oldrequest)
1409 VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1410 size - oldrequest);
1411 else
1412 VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1413 oldchksize - size);
1414
1415 /* set mark to catch clobber of "unused" space */
1416 if (size < oldchksize)
1417 set_sentinel(pointer, size);
1418#else /* !MEMORY_CONTEXT_CHECKING */
1419
1420 /*
1421 * We don't have the information to determine whether we're growing
1422 * the old request or shrinking it, so we conservatively mark the
1423 * entire new allocation DEFINED.
1424 */
1426 VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1427#endif
1428
1429 /* Disallow access to the chunk header. */
1431
1432 return pointer;
1433 }
1434 else
1435 {
1436 /*
1437 * Enlarge-a-small-chunk case. We just do this by brute force, ie,
1438 * allocate a new chunk and copy the data. Since we know the existing
1439 * data isn't huge, this won't involve any great memcpy expense, so
1440 * it's not worth being smarter. (At one time we tried to avoid
1441 * memcpy when it was possible to enlarge the chunk in-place, but that
1442 * turns out to misbehave unpleasantly for repeated cycles of
1443 * palloc/repalloc/pfree: the eventually freed chunks go into the
1444 * wrong freelist for the next initial palloc request, and so we leak
1445 * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1446 */
1448 Size oldsize;
1449
1450 /* allocate new chunk (this also checks size is valid) */
1451 newPointer = AllocSetAlloc((MemoryContext) set, size, flags);
1452
1453 /* leave immediately if request was not completed */
1454 if (newPointer == NULL)
1455 {
1456 /* Disallow access to the chunk header. */
1458 return MemoryContextAllocationFailure((MemoryContext) set, size, flags);
1459 }
1460
1461 /*
1462 * AllocSetAlloc() may have returned a region that is still NOACCESS.
1463 * Change it to UNDEFINED for the moment; memcpy() will then transfer
1464 * definedness from the old allocation to the new. If we know the old
1465 * allocation, copy just that much. Otherwise, make the entire old
1466 * chunk defined to avoid errors as we copy the currently-NOACCESS
1467 * trailing bytes.
1468 */
1470#ifdef MEMORY_CONTEXT_CHECKING
1471 oldsize = chunk->requested_size;
1472#else
1475#endif
1476
1477 /* transfer existing data (certain to fit) */
1478 memcpy(newPointer, pointer, oldsize);
1479
1480 /* free old chunk */
1481 AllocSetFree(pointer);
1482
1483 return newPointer;
1484 }
1485}

References ALLOC_BLOCKHDRSZ, ALLOC_CHUNKHDRSZ, AllocBlockIsValid, AllocSetAlloc(), AllocSetFree(), AllocBlockData::aset, Assert, elog, AllocBlockData::endptr, ERROR, ExternalChunkGetBlock, fb(), FreeListIdxIsValid, AllocBlockData::freeptr, GetChunkSizeFromFreeListIdx, AllocSetContext::header, MAXALIGN, MemoryContextData::mem_allocated, MemoryChunkGetBlock(), MemoryChunkGetPointer, MemoryChunkGetValue(), MemoryChunkIsExternal(), MemoryContextAllocationFailure(), MemoryContextCheckSize(), Min, MemoryContextData::name, AllocBlockData::next, PointerGetMemoryChunk, AllocBlockData::prev, realloc, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MAKE_MEM_NOACCESS, VALGRIND_MAKE_MEM_UNDEFINED, VALGRIND_MEMPOOL_CHANGE, and WARNING.

◆ AllocSetReset()

void AllocSetReset ( MemoryContext  context)

Definition at line 548 of file aset.c.

549{
550 AllocSet set = (AllocSet) context;
551 AllocBlock block;
553
555
556#ifdef MEMORY_CONTEXT_CHECKING
557 /* Check for corruption and leaks before freeing */
558 AllocSetCheck(context);
559#endif
560
561 /* Remember keeper block size for Assert below */
562 keepersize = KeeperBlock(set)->endptr - ((char *) set);
563
564 /* Clear chunk freelists */
565 MemSetAligned(set->freelist, 0, sizeof(set->freelist));
566
567 block = set->blocks;
568
569 /* New blocks list will be just the keeper block */
570 set->blocks = KeeperBlock(set);
571
572 while (block != NULL)
573 {
574 AllocBlock next = block->next;
575
576 if (IsKeeperBlock(set, block))
577 {
578 /* Reset the block, but don't return it to malloc */
579 char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
580
581#ifdef CLOBBER_FREED_MEMORY
583#else
584 /* wipe_mem() would have done this */
586#endif
587 block->freeptr = datastart;
588 block->prev = NULL;
589 block->next = NULL;
590 }
591 else
592 {
593 /* Normal case, release the block */
594 context->mem_allocated -= block->endptr - ((char *) block);
595
596#ifdef CLOBBER_FREED_MEMORY
597 wipe_mem(block, block->freeptr - ((char *) block));
598#endif
599
600 /*
601 * We need to free the block header's vchunk explicitly, although
602 * the user-data vchunks within will go away in the TRIM below.
603 * Otherwise Valgrind complains about leaked allocations.
604 */
605 VALGRIND_MEMPOOL_FREE(set, block);
606
607 free(block);
608 }
609 block = next;
610 }
611
612 Assert(context->mem_allocated == keepersize);
613
614 /*
615 * Instruct Valgrind to throw away all the vchunks associated with this
616 * context, except for the one covering the AllocSetContext and
617 * keeper-block header. This gets rid of the vchunks for whatever user
618 * data is getting discarded by the context reset.
619 */
621
622 /* Reset block size allocation sequence, too */
623 set->nextBlockSize = set->initBlockSize;
624}

References ALLOC_BLOCKHDRSZ, AllocSetIsValid, Assert, AllocSetContext::blocks, AllocBlockData::endptr, fb(), FIRST_BLOCKHDRSZ, free, AllocSetContext::freelist, AllocBlockData::freeptr, AllocSetContext::initBlockSize, IsKeeperBlock, KeeperBlock, MemoryContextData::mem_allocated, MemSetAligned, next, AllocBlockData::next, AllocSetContext::nextBlockSize, PG_USED_FOR_ASSERTS_ONLY, AllocBlockData::prev, VALGRIND_MAKE_MEM_NOACCESS, VALGRIND_MEMPOOL_FREE, and VALGRIND_MEMPOOL_TRIM.

◆ AllocSetStats()

void AllocSetStats ( MemoryContext  context,
MemoryStatsPrintFunc  printfunc,
void passthru,
MemoryContextCounters totals,
bool  print_to_stderr 
)

Definition at line 1580 of file aset.c.

1583{
1584 AllocSet set = (AllocSet) context;
1585 Size nblocks = 0;
1586 Size freechunks = 0;
1587 Size totalspace;
1588 Size freespace = 0;
1589 AllocBlock block;
1590 int fidx;
1591
1592 Assert(AllocSetIsValid(set));
1593
1594 /* Include context header in totalspace */
1595 totalspace = MAXALIGN(sizeof(AllocSetContext));
1596
1597 for (block = set->blocks; block != NULL; block = block->next)
1598 {
1599 nblocks++;
1600 totalspace += block->endptr - ((char *) block);
1601 freespace += block->endptr - block->freeptr;
1602 }
1603 for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1604 {
1606 MemoryChunk *chunk = set->freelist[fidx];
1607
1608 while (chunk != NULL)
1609 {
1611
1612 /* Allow access to the chunk header. */
1616
1617 freechunks++;
1618 freespace += chksz + ALLOC_CHUNKHDRSZ;
1619
1621 chunk = link->next;
1623 }
1624 }
1625
1626 if (printfunc)
1627 {
1628 char stats_string[200];
1629
1631 "%zu total in %zu blocks; %zu free (%zu chunks); %zu used",
1632 totalspace, nblocks, freespace, freechunks,
1633 totalspace - freespace);
1635 }
1636
1637 if (totals)
1638 {
1639 totals->nblocks += nblocks;
1640 totals->freechunks += freechunks;
1641 totals->totalspace += totalspace;
1642 totals->freespace += freespace;
1643 }
1644}

References ALLOC_CHUNKHDRSZ, ALLOCSET_NUM_FREELISTS, AllocSetIsValid, Assert, AllocSetContext::blocks, AllocBlockData::endptr, fb(), AllocSetContext::freelist, AllocBlockData::freeptr, GetChunkSizeFromFreeListIdx, GetFreeListLink, MAXALIGN, MemoryChunkGetValue(), AllocBlockData::next, snprintf, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

Variable Documentation

◆ context_freelists

AllocSetFreeList context_freelists[2]
static
Initial value:
=
{
{
0, NULL
},
{
0, NULL
}
}

Definition at line 253 of file aset.c.

254{
255 {
256 0, NULL
257 },
258 {
259 0, NULL
260 }
261};

Referenced by AllocSetContextCreateInternal(), and AllocSetDelete().