PostgreSQL Source Code git master
Loading...
Searching...
No Matches
aset.c File Reference
Include dependency graph for aset.c:

Go to the source code of this file.

Data Structures

struct  AllocFreeListLink
 
struct  AllocSetContext
 
struct  AllocBlockData
 
struct  AllocSetFreeList
 

Macros

#define ALLOC_MINBITS   3 /* smallest chunk size is 8 bytes */
 
#define ALLOCSET_NUM_FREELISTS   11
 
#define ALLOC_CHUNK_LIMIT   (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
 
#define ALLOC_CHUNK_FRACTION   4
 
#define ALLOC_BLOCKHDRSZ   MAXALIGN(sizeof(AllocBlockData))
 
#define ALLOC_CHUNKHDRSZ   sizeof(MemoryChunk)
 
#define FIRST_BLOCKHDRSZ
 
#define GetFreeListLink(chkptr)    (AllocFreeListLink *) ((char *) (chkptr) + ALLOC_CHUNKHDRSZ)
 
#define FreeListIdxIsValid(fidx)    ((fidx) >= 0 && (fidx) < ALLOCSET_NUM_FREELISTS)
 
#define GetChunkSizeFromFreeListIdx(fidx)    ((((Size) 1) << ALLOC_MINBITS) << (fidx))
 
#define AllocSetIsValid(set)    ((set) && IsA(set, AllocSetContext))
 
#define AllocBlockIsValid(block)    ((block) && AllocSetIsValid((block)->aset))
 
#define ExternalChunkGetBlock(chunk)    (AllocBlock) ((char *) chunk - ALLOC_BLOCKHDRSZ)
 
#define MAX_FREE_CONTEXTS   100 /* arbitrary limit on freelist length */
 
#define KeeperBlock(set)    ((AllocBlock) (((char *) set) + MAXALIGN(sizeof(AllocSetContext))))
 
#define IsKeeperBlock(set, block)   ((block) == (KeeperBlock(set)))
 

Typedefs

typedef struct AllocBlockDataAllocBlock
 
typedef voidAllocPointer
 
typedef struct AllocFreeListLink AllocFreeListLink
 
typedef struct AllocSetContext AllocSetContext
 
typedef AllocSetContextAllocSet
 
typedef struct AllocBlockData AllocBlockData
 
typedef struct AllocSetFreeList AllocSetFreeList
 

Functions

 StaticAssertDecl (ALLOC_CHUNK_LIMIT==ALLOCSET_SEPARATE_THRESHOLD, "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD")
 
static int AllocSetFreeIndex (Size size)
 
MemoryContext AllocSetContextCreateInternal (MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
 
void AllocSetReset (MemoryContext context)
 
void AllocSetDelete (MemoryContext context)
 
static pg_noinline voidAllocSetAllocLarge (MemoryContext context, Size size, int flags)
 
static voidAllocSetAllocChunkFromBlock (MemoryContext context, AllocBlock block, Size size, Size chunk_size, int fidx)
 
static pg_noinline voidAllocSetAllocFromNewBlock (MemoryContext context, Size size, int flags, int fidx)
 
voidAllocSetAlloc (MemoryContext context, Size size, int flags)
 
void AllocSetFree (void *pointer)
 
voidAllocSetRealloc (void *pointer, Size size, int flags)
 
MemoryContext AllocSetGetChunkContext (void *pointer)
 
Size AllocSetGetChunkSpace (void *pointer)
 
bool AllocSetIsEmpty (MemoryContext context)
 
void AllocSetStats (MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
 

Variables

static AllocSetFreeList context_freelists [2]
 

Macro Definition Documentation

◆ ALLOC_BLOCKHDRSZ

#define ALLOC_BLOCKHDRSZ   MAXALIGN(sizeof(AllocBlockData))

Definition at line 108 of file aset.c.

◆ ALLOC_CHUNK_FRACTION

#define ALLOC_CHUNK_FRACTION   4

Definition at line 87 of file aset.c.

◆ ALLOC_CHUNK_LIMIT

#define ALLOC_CHUNK_LIMIT   (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))

Definition at line 85 of file aset.c.

◆ ALLOC_CHUNKHDRSZ

#define ALLOC_CHUNKHDRSZ   sizeof(MemoryChunk)

Definition at line 109 of file aset.c.

◆ ALLOC_MINBITS

#define ALLOC_MINBITS   3 /* smallest chunk size is 8 bytes */

Definition at line 83 of file aset.c.

◆ AllocBlockIsValid

#define AllocBlockIsValid (   block)     ((block) && AllocSetIsValid((block)->aset))

Definition at line 207 of file aset.c.

250{
251 int num_free; /* current list length */
252 AllocSetContext *first_free; /* list header */
254
255/* context_freelists[0] is for default params, [1] for small params */
257{
258 {
259 0, NULL
260 },
261 {
262 0, NULL
263 }
264};
265
266
267/* ----------
268 * AllocSetFreeIndex -
269 *
270 * Depending on the size of an allocation compute which freechunk
271 * list of the alloc set it belongs to. Caller must have verified
272 * that size <= ALLOC_CHUNK_LIMIT.
273 * ----------
274 */
275static inline int
277{
278 int idx;
279
280 if (size > (1 << ALLOC_MINBITS))
281 {
282 /*----------
283 * At this point we must compute ceil(log2(size >> ALLOC_MINBITS)).
284 * This is the same as
285 * pg_leftmost_one_pos32((size - 1) >> ALLOC_MINBITS) + 1
286 * or equivalently
287 * pg_leftmost_one_pos32(size - 1) - ALLOC_MINBITS + 1
288 *
289 * However, for platforms without intrinsic support, we duplicate the
290 * logic here, allowing an additional optimization. It's reasonable
291 * to assume that ALLOC_CHUNK_LIMIT fits in 16 bits, so we can unroll
292 * the byte-at-a-time loop in pg_leftmost_one_pos32 and just handle
293 * the last two bytes.
294 *
295 * Yes, this function is enough of a hot-spot to make it worth this
296 * much trouble.
297 *----------
298 */
299#ifdef HAVE_BITSCAN_REVERSE
300 idx = pg_leftmost_one_pos32((uint32) size - 1) - ALLOC_MINBITS + 1;
301#else
302 uint32 t,
303 tsize;
304
305 /* Statically assert that we only have a 16-bit input value. */
307 "ALLOC_CHUNK_LIMIT must be less than 64kB");
308
309 tsize = size - 1;
310 t = tsize >> 8;
312 idx -= ALLOC_MINBITS - 1;
313#endif
314
316 }
317 else
318 idx = 0;
319
320 return idx;
321}
322
323
324/*
325 * Public routines
326 */
327
328
329/*
330 * AllocSetContextCreateInternal
331 * Create a new AllocSet context.
332 *
333 * parent: parent context, or NULL if top-level context
334 * name: name of context (must be statically allocated)
335 * minContextSize: minimum context size
336 * initBlockSize: initial allocation block size
337 * maxBlockSize: maximum allocation block size
338 *
339 * Most callers should abstract the context size parameters using a macro
340 * such as ALLOCSET_DEFAULT_SIZES.
341 *
342 * Note: don't call this directly; go through the wrapper macro
343 * AllocSetContextCreate.
344 */
347 const char *name,
349 Size initBlockSize,
350 Size maxBlockSize)
351{
352 int freeListIndex;
354 AllocSet set;
355 AllocBlock block;
356
357 /* ensure MemoryChunk's size is properly maxaligned */
359 "sizeof(MemoryChunk) is not maxaligned");
360 /* check we have enough space to store the freelist link */
362 "sizeof(AllocFreeListLink) larger than minimum allocation size");
363
364 /*
365 * First, validate allocation parameters. Once these were regular runtime
366 * tests and elog's, but in practice Asserts seem sufficient because
367 * nobody varies their parameters at runtime. We somewhat arbitrarily
368 * enforce a minimum 1K block size. We restrict the maximum block size to
369 * MEMORYCHUNK_MAX_BLOCKOFFSET as MemoryChunks are limited to this in
370 * regards to addressing the offset between the chunk and the block that
371 * the chunk is stored on. We would be unable to store the offset between
372 * the chunk and block for any chunks that were beyond
373 * MEMORYCHUNK_MAX_BLOCKOFFSET bytes into the block if the block was to be
374 * larger than this.
375 */
376 Assert(initBlockSize == MAXALIGN(initBlockSize) &&
377 initBlockSize >= 1024);
378 Assert(maxBlockSize == MAXALIGN(maxBlockSize) &&
379 maxBlockSize >= initBlockSize &&
380 AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
381 Assert(minContextSize == 0 ||
383 minContextSize >= 1024 &&
384 minContextSize <= maxBlockSize));
385 Assert(maxBlockSize <= MEMORYCHUNK_MAX_BLOCKOFFSET);
386
387 /*
388 * Check whether the parameters match either available freelist. We do
389 * not need to demand a match of maxBlockSize.
390 */
392 initBlockSize == ALLOCSET_DEFAULT_INITSIZE)
393 freeListIndex = 0;
395 initBlockSize == ALLOCSET_SMALL_INITSIZE)
396 freeListIndex = 1;
397 else
398 freeListIndex = -1;
399
400 /*
401 * If a suitable freelist entry exists, just recycle that context.
402 */
403 if (freeListIndex >= 0)
404 {
405 AllocSetFreeList *freelist = &context_freelists[freeListIndex];
406
407 if (freelist->first_free != NULL)
408 {
409 /* Remove entry from freelist */
410 set = freelist->first_free;
411 freelist->first_free = (AllocSet) set->header.nextchild;
412 freelist->num_free--;
413
414 /* Update its maxBlockSize; everything else should be OK */
415 set->maxBlockSize = maxBlockSize;
416
417 /* Reinitialize its header, installing correct name and parent */
421 parent,
422 name);
423
424 ((MemoryContext) set)->mem_allocated =
425 KeeperBlock(set)->endptr - ((char *) set);
426
427 return (MemoryContext) set;
428 }
429 }
430
431 /* Determine size of initial block */
434 if (minContextSize != 0)
436 else
437 firstBlockSize = Max(firstBlockSize, initBlockSize);
438
439 /*
440 * Allocate the initial block. Unlike other aset.c blocks, it starts with
441 * the context header and its block header follows that.
442 */
444 if (set == NULL)
445 {
450 errmsg("out of memory"),
451 errdetail("Failed while creating memory context \"%s\".",
452 name)));
453 }
454
455 /*
456 * Avoid writing code that can fail between here and MemoryContextCreate;
457 * we'd leak the header/initial block if we ereport in this stretch.
458 */
459
460 /* Create a vpool associated with the context */
461 VALGRIND_CREATE_MEMPOOL(set, 0, false);
462
463 /*
464 * Create a vchunk covering both the AllocSetContext struct and the keeper
465 * block's header. (Perhaps it would be more sensible for these to be two
466 * separate vchunks, but doing that seems to tickle bugs in some versions
467 * of Valgrind.) We must have these vchunks, and also a vchunk for each
468 * subsequently-added block header, so that Valgrind considers the
469 * pointers within them while checking for leaked memory. Note that
470 * Valgrind doesn't distinguish between these vchunks and those created by
471 * mcxt.c for the user-accessible-data chunks we allocate.
472 */
474
475 /* Fill in the initial block's block header */
476 block = KeeperBlock(set);
477 block->aset = set;
478 block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
479 block->endptr = ((char *) set) + firstBlockSize;
480 block->prev = NULL;
481 block->next = NULL;
482
483 /* Mark unallocated space NOACCESS; leave the block header alone. */
484 VALGRIND_MAKE_MEM_NOACCESS(block->freeptr, block->endptr - block->freeptr);
485
486 /* Remember block as part of block list */
487 set->blocks = block;
488
489 /* Finish filling in aset-specific parts of the context header */
490 MemSetAligned(set->freelist, 0, sizeof(set->freelist));
491
492 set->initBlockSize = (uint32) initBlockSize;
493 set->maxBlockSize = (uint32) maxBlockSize;
494 set->nextBlockSize = (uint32) initBlockSize;
495 set->freeListIndex = freeListIndex;
496
497 /*
498 * Compute the allocation chunk size limit for this context. It can't be
499 * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
500 * If maxBlockSize is small then requests exceeding the maxBlockSize, or
501 * even a significant fraction of it, should be treated as large chunks
502 * too. For the typical case of maxBlockSize a power of 2, the chunk size
503 * limit will be at most 1/8th maxBlockSize, so that given a stream of
504 * requests that are all the maximum chunk size we will waste at most
505 * 1/8th of the allocated space.
506 *
507 * Determine the maximum size that a chunk can be before we allocate an
508 * entire AllocBlock dedicated for that chunk. We set the absolute limit
509 * of that size as ALLOC_CHUNK_LIMIT but we reduce it further so that we
510 * can fit about ALLOC_CHUNK_FRACTION chunks this size on a maximally
511 * sized block. (We opt to keep allocChunkLimit a power-of-2 value
512 * primarily for legacy reasons rather than calculating it so that exactly
513 * ALLOC_CHUNK_FRACTION chunks fit on a maximally sized block.)
514 */
515 set->allocChunkLimit = ALLOC_CHUNK_LIMIT;
516 while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
517 (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
518 set->allocChunkLimit >>= 1;
519
520 /* Finally, do the type-independent part of context creation */
524 parent,
525 name);
526
527 ((MemoryContext) set)->mem_allocated = firstBlockSize;
528
529 return (MemoryContext) set;
530}
531
532/*
533 * AllocSetReset
534 * Frees all memory which is allocated in the given set.
535 *
536 * Actually, this routine has some discretion about what to do.
537 * It should mark all allocated chunks freed, but it need not necessarily
538 * give back all the resources the set owns. Our actual implementation is
539 * that we give back all but the "keeper" block (which we must keep, since
540 * it shares a malloc chunk with the context header). In this way, we don't
541 * thrash malloc() when a context is repeatedly reset after small allocations,
542 * which is typical behavior for per-tuple contexts.
543 */
544void
546{
547 AllocSet set = (AllocSet) context;
548 AllocBlock block;
550
552
553#ifdef MEMORY_CONTEXT_CHECKING
554 /* Check for corruption and leaks before freeing */
555 AllocSetCheck(context);
556#endif
557
558 /* Remember keeper block size for Assert below */
559 keepersize = KeeperBlock(set)->endptr - ((char *) set);
560
561 /* Clear chunk freelists */
562 MemSetAligned(set->freelist, 0, sizeof(set->freelist));
563
564 block = set->blocks;
565
566 /* New blocks list will be just the keeper block */
567 set->blocks = KeeperBlock(set);
568
569 while (block != NULL)
570 {
571 AllocBlock next = block->next;
572
573 if (IsKeeperBlock(set, block))
574 {
575 /* Reset the block, but don't return it to malloc */
576 char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
577
578#ifdef CLOBBER_FREED_MEMORY
580#else
581 /* wipe_mem() would have done this */
583#endif
584 block->freeptr = datastart;
585 block->prev = NULL;
586 block->next = NULL;
587 }
588 else
589 {
590 /* Normal case, release the block */
591 context->mem_allocated -= block->endptr - ((char *) block);
592
593#ifdef CLOBBER_FREED_MEMORY
594 wipe_mem(block, block->freeptr - ((char *) block));
595#endif
596
597 /*
598 * We need to free the block header's vchunk explicitly, although
599 * the user-data vchunks within will go away in the TRIM below.
600 * Otherwise Valgrind complains about leaked allocations.
601 */
602 VALGRIND_MEMPOOL_FREE(set, block);
603
604 free(block);
605 }
606 block = next;
607 }
608
609 Assert(context->mem_allocated == keepersize);
610
611 /*
612 * Instruct Valgrind to throw away all the vchunks associated with this
613 * context, except for the one covering the AllocSetContext and
614 * keeper-block header. This gets rid of the vchunks for whatever user
615 * data is getting discarded by the context reset.
616 */
618
619 /* Reset block size allocation sequence, too */
620 set->nextBlockSize = set->initBlockSize;
621}
622
623/*
624 * AllocSetDelete
625 * Frees all memory which is allocated in the given set,
626 * in preparation for deletion of the set.
627 *
628 * Unlike AllocSetReset, this *must* free all resources of the set.
629 */
630void
632{
633 AllocSet set = (AllocSet) context;
634 AllocBlock block = set->blocks;
636
638
639#ifdef MEMORY_CONTEXT_CHECKING
640 /* Check for corruption and leaks before freeing */
641 AllocSetCheck(context);
642#endif
643
644 /* Remember keeper block size for Assert below */
645 keepersize = KeeperBlock(set)->endptr - ((char *) set);
646
647 /*
648 * If the context is a candidate for a freelist, put it into that freelist
649 * instead of destroying it.
650 */
651 if (set->freeListIndex >= 0)
652 {
653 AllocSetFreeList *freelist = &context_freelists[set->freeListIndex];
654
655 /*
656 * Reset the context, if it needs it, so that we aren't hanging on to
657 * more than the initial malloc chunk.
658 */
659 if (!context->isReset)
660 MemoryContextResetOnly(context);
661
662 /*
663 * If the freelist is full, just discard what's already in it. See
664 * comments with context_freelists[].
665 */
666 if (freelist->num_free >= MAX_FREE_CONTEXTS)
667 {
668 while (freelist->first_free != NULL)
669 {
670 AllocSetContext *oldset = freelist->first_free;
671
673 freelist->num_free--;
674
675 /* Destroy the context's vpool --- see notes below */
677
678 /* All that remains is to free the header/initial block */
679 free(oldset);
680 }
681 Assert(freelist->num_free == 0);
682 }
683
684 /* Now add the just-deleted context to the freelist. */
685 set->header.nextchild = (MemoryContext) freelist->first_free;
686 freelist->first_free = set;
687 freelist->num_free++;
688
689 return;
690 }
691
692 /* Free all blocks, except the keeper which is part of context header */
693 while (block != NULL)
694 {
695 AllocBlock next = block->next;
696
697 if (!IsKeeperBlock(set, block))
698 context->mem_allocated -= block->endptr - ((char *) block);
699
700#ifdef CLOBBER_FREED_MEMORY
701 wipe_mem(block, block->freeptr - ((char *) block));
702#endif
703
704 if (!IsKeeperBlock(set, block))
705 {
706 /* As in AllocSetReset, free block-header vchunks explicitly */
707 VALGRIND_MEMPOOL_FREE(set, block);
708 free(block);
709 }
710
711 block = next;
712 }
713
714 Assert(context->mem_allocated == keepersize);
715
716 /*
717 * Destroy the vpool. We don't seem to need to explicitly free the
718 * initial block's header vchunk, nor any user-data vchunks that Valgrind
719 * still knows about; they'll all go away automatically.
720 */
722
723 /* Finally, free the context header, including the keeper block */
724 free(set);
725}
726
727/*
728 * Helper for AllocSetAlloc() that allocates an entire block for the chunk.
729 *
730 * AllocSetAlloc()'s comment explains why this is separate.
731 */
733static void *
734AllocSetAllocLarge(MemoryContext context, Size size, int flags)
735{
736 AllocSet set = (AllocSet) context;
737 AllocBlock block;
739 Size chunk_size;
740 Size blksize;
741
742 /* validate 'size' is within the limits for the given 'flags' */
743 MemoryContextCheckSize(context, size, flags);
744
745#ifdef MEMORY_CONTEXT_CHECKING
746 /* ensure there's always space for the sentinel byte */
747 chunk_size = MAXALIGN(size + 1);
748#else
749 chunk_size = MAXALIGN(size);
750#endif
751
752 blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
753 block = (AllocBlock) malloc(blksize);
754 if (block == NULL)
755 return MemoryContextAllocationFailure(context, size, flags);
756
757 /* Make a vchunk covering the new block's header */
759
760 context->mem_allocated += blksize;
761
762 block->aset = set;
763 block->freeptr = block->endptr = ((char *) block) + blksize;
764
765 chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
766
767 /* mark the MemoryChunk as externally managed */
769
770#ifdef MEMORY_CONTEXT_CHECKING
771 chunk->requested_size = size;
772 /* set mark to catch clobber of "unused" space */
773 Assert(size < chunk_size);
775#endif
776#ifdef RANDOMIZE_ALLOCATED_MEMORY
777 /* fill the allocated space with junk */
779#endif
780
781 /*
782 * Stick the new block underneath the active allocation block, if any, so
783 * that we don't lose the use of the space remaining therein.
784 */
785 if (set->blocks != NULL)
786 {
787 block->prev = set->blocks;
788 block->next = set->blocks->next;
789 if (block->next)
790 block->next->prev = block;
791 set->blocks->next = block;
792 }
793 else
794 {
795 block->prev = NULL;
796 block->next = NULL;
797 set->blocks = block;
798 }
799
800 /* Ensure any padding bytes are marked NOACCESS. */
802 chunk_size - size);
803
804 /* Disallow access to the chunk header. */
806
808}
809
810/*
811 * Small helper for allocating a new chunk from a chunk, to avoid duplicating
812 * the code between AllocSetAlloc() and AllocSetAllocFromNewBlock().
813 */
814static inline void *
816 Size size, Size chunk_size, int fidx)
817{
819
820 chunk = (MemoryChunk *) (block->freeptr);
821
822 /* Prepare to initialize the chunk header. */
824
825 block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
826 Assert(block->freeptr <= block->endptr);
827
828 /* store the free list index in the value field */
830
831#ifdef MEMORY_CONTEXT_CHECKING
832 chunk->requested_size = size;
833 /* set mark to catch clobber of "unused" space */
834 if (size < chunk_size)
836#endif
837#ifdef RANDOMIZE_ALLOCATED_MEMORY
838 /* fill the allocated space with junk */
840#endif
841
842 /* Ensure any padding bytes are marked NOACCESS. */
844 chunk_size - size);
845
846 /* Disallow access to the chunk header. */
848
850}
851
852/*
853 * Helper for AllocSetAlloc() that allocates a new block and returns a chunk
854 * allocated from it.
855 *
856 * AllocSetAlloc()'s comment explains why this is separate.
857 */
859static void *
860AllocSetAllocFromNewBlock(MemoryContext context, Size size, int flags,
861 int fidx)
862{
863 AllocSet set = (AllocSet) context;
864 AllocBlock block;
866 Size blksize;
868 Size chunk_size;
869
870 /* due to the keeper block set->blocks should always be valid */
871 Assert(set->blocks != NULL);
872 block = set->blocks;
873 availspace = block->endptr - block->freeptr;
874
875 /*
876 * The existing active (top) block does not have enough room for the
877 * requested allocation, but it might still have a useful amount of space
878 * in it. Once we push it down in the block list, we'll never try to
879 * allocate more space from it. So, before we do that, carve up its free
880 * space into chunks that we can put on the set's freelists.
881 *
882 * Because we can only get here when there's less than ALLOC_CHUNK_LIMIT
883 * left in the block, this loop cannot iterate more than
884 * ALLOCSET_NUM_FREELISTS-1 times.
885 */
886 while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
887 {
892
893 /*
894 * In most cases, we'll get back the index of the next larger freelist
895 * than the one we need to put this chunk on. The exception is when
896 * availchunk is exactly a power of 2.
897 */
899 {
900 a_fidx--;
901 Assert(a_fidx >= 0);
903 }
904
905 chunk = (MemoryChunk *) (block->freeptr);
906
907 /* Prepare to initialize the chunk header. */
909 block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
911
912 /* store the freelist index in the value field */
914#ifdef MEMORY_CONTEXT_CHECKING
915 chunk->requested_size = InvalidAllocSize; /* mark it free */
916#endif
917 /* push this chunk onto the free list */
919
921 link->next = set->freelist[a_fidx];
923
924 set->freelist[a_fidx] = chunk;
925 }
926
927 /*
928 * The first such block has size initBlockSize, and we double the space in
929 * each succeeding block, but not more than maxBlockSize.
930 */
931 blksize = set->nextBlockSize;
932 set->nextBlockSize <<= 1;
933 if (set->nextBlockSize > set->maxBlockSize)
934 set->nextBlockSize = set->maxBlockSize;
935
936 /* Choose the actual chunk size to allocate */
937 chunk_size = GetChunkSizeFromFreeListIdx(fidx);
938 Assert(chunk_size >= size);
939
940 /*
941 * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
942 * space... but try to keep it a power of 2.
943 */
945 while (blksize < required_size)
946 blksize <<= 1;
947
948 /* Try to allocate it */
949 block = (AllocBlock) malloc(blksize);
950
951 /*
952 * We could be asking for pretty big blocks here, so cope if malloc fails.
953 * But give up if there's less than 1 MB or so available...
954 */
955 while (block == NULL && blksize > 1024 * 1024)
956 {
957 blksize >>= 1;
958 if (blksize < required_size)
959 break;
960 block = (AllocBlock) malloc(blksize);
961 }
962
963 if (block == NULL)
964 return MemoryContextAllocationFailure(context, size, flags);
965
966 /* Make a vchunk covering the new block's header */
968
969 context->mem_allocated += blksize;
970
971 block->aset = set;
972 block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
973 block->endptr = ((char *) block) + blksize;
974
975 /* Mark unallocated space NOACCESS. */
977 blksize - ALLOC_BLOCKHDRSZ);
978
979 block->prev = NULL;
980 block->next = set->blocks;
981 if (block->next)
982 block->next->prev = block;
983 set->blocks = block;
984
985 return AllocSetAllocChunkFromBlock(context, block, size, chunk_size, fidx);
986}
987
988/*
989 * AllocSetAlloc
990 * Returns a pointer to allocated memory of given size or raises an ERROR
991 * on allocation failure, or returns NULL when flags contains
992 * MCXT_ALLOC_NO_OOM.
993 *
994 * No request may exceed:
995 * MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
996 * All callers use a much-lower limit.
997 *
998 * Note: when using valgrind, it doesn't matter how the returned allocation
999 * is marked, as mcxt.c will set it to UNDEFINED. In some paths we will
1000 * return space that is marked NOACCESS - AllocSetRealloc has to beware!
1001 *
1002 * This function should only contain the most common code paths. Everything
1003 * else should be in pg_noinline helper functions, thus avoiding the overhead
1004 * of creating a stack frame for the common cases. Allocating memory is often
1005 * a bottleneck in many workloads, so avoiding stack frame setup is
1006 * worthwhile. Helper functions should always directly return the newly
1007 * allocated memory so that we can just return that address directly as a tail
1008 * call.
1009 */
1010void *
1011AllocSetAlloc(MemoryContext context, Size size, int flags)
1012{
1013 AllocSet set = (AllocSet) context;
1014 AllocBlock block;
1016 int fidx;
1017 Size chunk_size;
1019
1020 Assert(AllocSetIsValid(set));
1021
1022 /* due to the keeper block set->blocks should never be NULL */
1023 Assert(set->blocks != NULL);
1024
1025 /*
1026 * If requested size exceeds maximum for chunks we hand the request off to
1027 * AllocSetAllocLarge().
1028 */
1029 if (size > set->allocChunkLimit)
1030 return AllocSetAllocLarge(context, size, flags);
1031
1032 /*
1033 * Request is small enough to be treated as a chunk. Look in the
1034 * corresponding free list to see if there is a free chunk we could reuse.
1035 * If one is found, remove it from the free list, make it again a member
1036 * of the alloc set and return its data address.
1037 *
1038 * Note that we don't attempt to ensure there's space for the sentinel
1039 * byte here. We expect a large proportion of allocations to be for sizes
1040 * which are already a power of 2. If we were to always make space for a
1041 * sentinel byte in MEMORY_CONTEXT_CHECKING builds, then we'd end up
1042 * doubling the memory requirements for such allocations.
1043 */
1044 fidx = AllocSetFreeIndex(size);
1045 chunk = set->freelist[fidx];
1046 if (chunk != NULL)
1047 {
1049
1050 /* Allow access to the chunk header. */
1052
1054
1055 /* pop this chunk off the freelist */
1057 set->freelist[fidx] = link->next;
1059
1060#ifdef MEMORY_CONTEXT_CHECKING
1061 chunk->requested_size = size;
1062 /* set mark to catch clobber of "unused" space */
1065#endif
1066#ifdef RANDOMIZE_ALLOCATED_MEMORY
1067 /* fill the allocated space with junk */
1068 randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
1069#endif
1070
1071 /* Ensure any padding bytes are marked NOACCESS. */
1074
1075 /* Disallow access to the chunk header. */
1077
1079 }
1080
1081 /*
1082 * Choose the actual chunk size to allocate.
1083 */
1084 chunk_size = GetChunkSizeFromFreeListIdx(fidx);
1085 Assert(chunk_size >= size);
1086
1087 block = set->blocks;
1088 availspace = block->endptr - block->freeptr;
1089
1090 /*
1091 * If there is enough room in the active allocation block, we will put the
1092 * chunk into that block. Else must start a new one.
1093 */
1094 if (unlikely(availspace < (chunk_size + ALLOC_CHUNKHDRSZ)))
1095 return AllocSetAllocFromNewBlock(context, size, flags, fidx);
1096
1097 /* There's enough space on the current block, so allocate from that */
1098 return AllocSetAllocChunkFromBlock(context, block, size, chunk_size, fidx);
1099}
1100
1101/*
1102 * AllocSetFree
1103 * Frees allocated memory; memory is removed from the set.
1104 */
1105void
1106AllocSetFree(void *pointer)
1107{
1108 AllocSet set;
1110
1111 /* Allow access to the chunk header. */
1113
1115 {
1116 /* Release single-chunk block. */
1118
1119 /*
1120 * Try to verify that we have a sane block pointer: the block header
1121 * should reference an aset and the freeptr should match the endptr.
1122 */
1123 if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
1124 elog(ERROR, "could not find block containing chunk %p", chunk);
1125
1126 set = block->aset;
1127
1128#ifdef MEMORY_CONTEXT_CHECKING
1129 {
1130 /* Test for someone scribbling on unused space in chunk */
1131 Assert(chunk->requested_size < (block->endptr - (char *) pointer));
1132 if (!sentinel_ok(pointer, chunk->requested_size))
1133 elog(WARNING, "detected write past chunk end in %s %p",
1134 set->header.name, chunk);
1135 }
1136#endif
1137
1138 /* OK, remove block from aset's list and free it */
1139 if (block->prev)
1140 block->prev->next = block->next;
1141 else
1142 set->blocks = block->next;
1143 if (block->next)
1144 block->next->prev = block->prev;
1145
1146 set->header.mem_allocated -= block->endptr - ((char *) block);
1147
1148#ifdef CLOBBER_FREED_MEMORY
1149 wipe_mem(block, block->freeptr - ((char *) block));
1150#endif
1151
1152 /* As in AllocSetReset, free block-header vchunks explicitly */
1153 VALGRIND_MEMPOOL_FREE(set, block);
1154
1155 free(block);
1156 }
1157 else
1158 {
1160 int fidx;
1162
1163 /*
1164 * In this path, for speed reasons we just Assert that the referenced
1165 * block is good. We can also Assert that the value field is sane.
1166 * Future field experience may show that these Asserts had better
1167 * become regular runtime test-and-elog checks.
1168 */
1169 Assert(AllocBlockIsValid(block));
1170 set = block->aset;
1171
1175
1176#ifdef MEMORY_CONTEXT_CHECKING
1177 /* Test for someone scribbling on unused space in chunk */
1178 if (chunk->requested_size < GetChunkSizeFromFreeListIdx(fidx))
1179 if (!sentinel_ok(pointer, chunk->requested_size))
1180 elog(WARNING, "detected write past chunk end in %s %p",
1181 set->header.name, chunk);
1182#endif
1183
1184#ifdef CLOBBER_FREED_MEMORY
1186#endif
1187 /* push this chunk onto the top of the free list */
1189 link->next = set->freelist[fidx];
1191 set->freelist[fidx] = chunk;
1192
1193#ifdef MEMORY_CONTEXT_CHECKING
1194
1195 /*
1196 * Reset requested_size to InvalidAllocSize in chunks that are on free
1197 * list.
1198 */
1199 chunk->requested_size = InvalidAllocSize;
1200#endif
1201 }
1202}
1203
1204/*
1205 * AllocSetRealloc
1206 * Returns new pointer to allocated memory of given size or NULL if
1207 * request could not be completed; this memory is added to the set.
1208 * Memory associated with given pointer is copied into the new memory,
1209 * and the old memory is freed.
1210 *
1211 * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size. This
1212 * makes our Valgrind client requests less-precise, hazarding false negatives.
1213 * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old
1214 * request size.)
1215 */
1216void *
1217AllocSetRealloc(void *pointer, Size size, int flags)
1218{
1219 AllocBlock block;
1220 AllocSet set;
1223 int fidx;
1224
1225 /* Allow access to the chunk header. */
1227
1229 {
1230 /*
1231 * The chunk must have been allocated as a single-chunk block. Use
1232 * realloc() to make the containing block bigger, or smaller, with
1233 * minimum space wastage.
1234 */
1236 Size chksize;
1237 Size blksize;
1239
1241
1242 /*
1243 * Try to verify that we have a sane block pointer: the block header
1244 * should reference an aset and the freeptr should match the endptr.
1245 */
1246 if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
1247 elog(ERROR, "could not find block containing chunk %p", chunk);
1248
1249 set = block->aset;
1250
1251 /* only check size in paths where the limits could be hit */
1252 MemoryContextCheckSize((MemoryContext) set, size, flags);
1253
1254 oldchksize = block->endptr - (char *) pointer;
1255
1256#ifdef MEMORY_CONTEXT_CHECKING
1257 /* Test for someone scribbling on unused space in chunk */
1258 Assert(chunk->requested_size < oldchksize);
1259 if (!sentinel_ok(pointer, chunk->requested_size))
1260 elog(WARNING, "detected write past chunk end in %s %p",
1261 set->header.name, chunk);
1262#endif
1263
1264#ifdef MEMORY_CONTEXT_CHECKING
1265 /* ensure there's always space for the sentinel byte */
1266 chksize = MAXALIGN(size + 1);
1267#else
1268 chksize = MAXALIGN(size);
1269#endif
1270
1271 /* Do the realloc */
1273 oldblksize = block->endptr - ((char *) block);
1274
1275 newblock = (AllocBlock) realloc(block, blksize);
1276 if (newblock == NULL)
1277 {
1278 /* Disallow access to the chunk header. */
1280 return MemoryContextAllocationFailure(&set->header, size, flags);
1281 }
1282
1283 /*
1284 * Move the block-header vchunk explicitly. (mcxt.c will take care of
1285 * moving the vchunk for the user data.)
1286 */
1288 block = newblock;
1289
1290 /* updated separately, not to underflow when (oldblksize > blksize) */
1291 set->header.mem_allocated -= oldblksize;
1292 set->header.mem_allocated += blksize;
1293
1294 block->freeptr = block->endptr = ((char *) block) + blksize;
1295
1296 /* Update pointers since block has likely been moved */
1297 chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
1298 pointer = MemoryChunkGetPointer(chunk);
1299 if (block->prev)
1300 block->prev->next = block;
1301 else
1302 set->blocks = block;
1303 if (block->next)
1304 block->next->prev = block;
1305
1306#ifdef MEMORY_CONTEXT_CHECKING
1307#ifdef RANDOMIZE_ALLOCATED_MEMORY
1308
1309 /*
1310 * We can only randomize the extra space if we know the prior request.
1311 * When using Valgrind, randomize_mem() also marks memory UNDEFINED.
1312 */
1313 if (size > chunk->requested_size)
1314 randomize_mem((char *) pointer + chunk->requested_size,
1315 size - chunk->requested_size);
1316#else
1317
1318 /*
1319 * If this is an increase, realloc() will have marked any
1320 * newly-allocated part (from oldchksize to chksize) UNDEFINED, but we
1321 * also need to adjust trailing bytes from the old allocation (from
1322 * chunk->requested_size to oldchksize) as they are marked NOACCESS.
1323 * Make sure not to mark too many bytes in case chunk->requested_size
1324 * < size < oldchksize.
1325 */
1326#ifdef USE_VALGRIND
1327 if (Min(size, oldchksize) > chunk->requested_size)
1328 VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1329 Min(size, oldchksize) - chunk->requested_size);
1330#endif
1331#endif
1332
1333 chunk->requested_size = size;
1334 /* set mark to catch clobber of "unused" space */
1335 Assert(size < chksize);
1336 set_sentinel(pointer, size);
1337#else /* !MEMORY_CONTEXT_CHECKING */
1338
1339 /*
1340 * We may need to adjust marking of bytes from the old allocation as
1341 * some of them may be marked NOACCESS. We don't know how much of the
1342 * old chunk size was the requested size; it could have been as small
1343 * as one byte. We have to be conservative and just mark the entire
1344 * old portion DEFINED. Make sure not to mark memory beyond the new
1345 * allocation in case it's smaller than the old one.
1346 */
1347 VALGRIND_MAKE_MEM_DEFINED(pointer, Min(size, oldchksize));
1348#endif
1349
1350 /* Ensure any padding bytes are marked NOACCESS. */
1351 VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1352
1353 /* Disallow access to the chunk header. */
1355
1356 return pointer;
1357 }
1358
1359 block = MemoryChunkGetBlock(chunk);
1360
1361 /*
1362 * In this path, for speed reasons we just Assert that the referenced
1363 * block is good. We can also Assert that the value field is sane. Future
1364 * field experience may show that these Asserts had better become regular
1365 * runtime test-and-elog checks.
1366 */
1367 Assert(AllocBlockIsValid(block));
1368 set = block->aset;
1369
1373
1374#ifdef MEMORY_CONTEXT_CHECKING
1375 /* Test for someone scribbling on unused space in chunk */
1376 if (chunk->requested_size < oldchksize)
1377 if (!sentinel_ok(pointer, chunk->requested_size))
1378 elog(WARNING, "detected write past chunk end in %s %p",
1379 set->header.name, chunk);
1380#endif
1381
1382 /*
1383 * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1384 * allocated area already is >= the new size. (In particular, we will
1385 * fall out here if the requested size is a decrease.)
1386 */
1387 if (oldchksize >= size)
1388 {
1389#ifdef MEMORY_CONTEXT_CHECKING
1390 Size oldrequest = chunk->requested_size;
1391
1392#ifdef RANDOMIZE_ALLOCATED_MEMORY
1393 /* We can only fill the extra space if we know the prior request */
1394 if (size > oldrequest)
1395 randomize_mem((char *) pointer + oldrequest,
1396 size - oldrequest);
1397#endif
1398
1399 chunk->requested_size = size;
1400
1401 /*
1402 * If this is an increase, mark any newly-available part UNDEFINED.
1403 * Otherwise, mark the obsolete part NOACCESS.
1404 */
1405 if (size > oldrequest)
1406 VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1407 size - oldrequest);
1408 else
1409 VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1410 oldchksize - size);
1411
1412 /* set mark to catch clobber of "unused" space */
1413 if (size < oldchksize)
1414 set_sentinel(pointer, size);
1415#else /* !MEMORY_CONTEXT_CHECKING */
1416
1417 /*
1418 * We don't have the information to determine whether we're growing
1419 * the old request or shrinking it, so we conservatively mark the
1420 * entire new allocation DEFINED.
1421 */
1423 VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1424#endif
1425
1426 /* Disallow access to the chunk header. */
1428
1429 return pointer;
1430 }
1431 else
1432 {
1433 /*
1434 * Enlarge-a-small-chunk case. We just do this by brute force, ie,
1435 * allocate a new chunk and copy the data. Since we know the existing
1436 * data isn't huge, this won't involve any great memcpy expense, so
1437 * it's not worth being smarter. (At one time we tried to avoid
1438 * memcpy when it was possible to enlarge the chunk in-place, but that
1439 * turns out to misbehave unpleasantly for repeated cycles of
1440 * palloc/repalloc/pfree: the eventually freed chunks go into the
1441 * wrong freelist for the next initial palloc request, and so we leak
1442 * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1443 */
1445 Size oldsize;
1446
1447 /* allocate new chunk (this also checks size is valid) */
1448 newPointer = AllocSetAlloc((MemoryContext) set, size, flags);
1449
1450 /* leave immediately if request was not completed */
1451 if (newPointer == NULL)
1452 {
1453 /* Disallow access to the chunk header. */
1455 return MemoryContextAllocationFailure((MemoryContext) set, size, flags);
1456 }
1457
1458 /*
1459 * AllocSetAlloc() may have returned a region that is still NOACCESS.
1460 * Change it to UNDEFINED for the moment; memcpy() will then transfer
1461 * definedness from the old allocation to the new. If we know the old
1462 * allocation, copy just that much. Otherwise, make the entire old
1463 * chunk defined to avoid errors as we copy the currently-NOACCESS
1464 * trailing bytes.
1465 */
1467#ifdef MEMORY_CONTEXT_CHECKING
1468 oldsize = chunk->requested_size;
1469#else
1472#endif
1473
1474 /* transfer existing data (certain to fit) */
1475 memcpy(newPointer, pointer, oldsize);
1476
1477 /* free old chunk */
1478 AllocSetFree(pointer);
1479
1480 return newPointer;
1481 }
1482}
1483
1484/*
1485 * AllocSetGetChunkContext
1486 * Return the MemoryContext that 'pointer' belongs to.
1487 */
1489AllocSetGetChunkContext(void *pointer)
1490{
1492 AllocBlock block;
1493 AllocSet set;
1494
1495 /* Allow access to the chunk header. */
1497
1500 else
1502
1503 /* Disallow access to the chunk header. */
1505
1506 Assert(AllocBlockIsValid(block));
1507 set = block->aset;
1508
1509 return &set->header;
1510}
1511
1512/*
1513 * AllocSetGetChunkSpace
1514 * Given a currently-allocated chunk, determine the total space
1515 * it occupies (including all memory-allocation overhead).
1516 */
1517Size
1518AllocSetGetChunkSpace(void *pointer)
1519{
1521 int fidx;
1522
1523 /* Allow access to the chunk header. */
1525
1527 {
1529
1530 /* Disallow access to the chunk header. */
1532
1533 Assert(AllocBlockIsValid(block));
1534
1535 return block->endptr - (char *) chunk;
1536 }
1537
1540
1541 /* Disallow access to the chunk header. */
1543
1545}
1546
1547/*
1548 * AllocSetIsEmpty
1549 * Is an allocset empty of any allocated space?
1550 */
1551bool
1553{
1554 Assert(AllocSetIsValid(context));
1555
1556 /*
1557 * For now, we say "empty" only if the context is new or just reset. We
1558 * could examine the freelists to determine if all space has been freed,
1559 * but it's not really worth the trouble for present uses of this
1560 * functionality.
1561 */
1562 if (context->isReset)
1563 return true;
1564 return false;
1565}
1566
1567/*
1568 * AllocSetStats
1569 * Compute stats about memory consumption of an allocset.
1570 *
1571 * printfunc: if not NULL, pass a human-readable stats string to this.
1572 * passthru: pass this pointer through to printfunc.
1573 * totals: if not NULL, add stats about this context into *totals.
1574 * print_to_stderr: print stats to stderr if true, elog otherwise.
1575 */
1576void
1580{
1581 AllocSet set = (AllocSet) context;
1582 Size nblocks = 0;
1583 Size freechunks = 0;
1584 Size totalspace;
1585 Size freespace = 0;
1586 AllocBlock block;
1587 int fidx;
1588
1589 Assert(AllocSetIsValid(set));
1590
1591 /* Include context header in totalspace */
1592 totalspace = MAXALIGN(sizeof(AllocSetContext));
1593
1594 for (block = set->blocks; block != NULL; block = block->next)
1595 {
1596 nblocks++;
1597 totalspace += block->endptr - ((char *) block);
1598 freespace += block->endptr - block->freeptr;
1599 }
1600 for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1601 {
1603 MemoryChunk *chunk = set->freelist[fidx];
1604
1605 while (chunk != NULL)
1606 {
1608
1609 /* Allow access to the chunk header. */
1613
1614 freechunks++;
1615 freespace += chksz + ALLOC_CHUNKHDRSZ;
1616
1618 chunk = link->next;
1620 }
1621 }
1622
1623 if (printfunc)
1624 {
1625 char stats_string[200];
1626
1628 "%zu total in %zu blocks; %zu free (%zu chunks); %zu used",
1629 totalspace, nblocks, freespace, freechunks,
1630 totalspace - freespace);
1632 }
1633
1634 if (totals)
1635 {
1636 totals->nblocks += nblocks;
1637 totals->freechunks += freechunks;
1638 totals->totalspace += totalspace;
1639 totals->freespace += freespace;
1640 }
1641}
1642
1643
1644#ifdef MEMORY_CONTEXT_CHECKING
1645
1646/*
1647 * AllocSetCheck
1648 * Walk through chunks and check consistency of memory.
1649 *
1650 * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
1651 * find yourself in an infinite loop when trouble occurs, because this
1652 * routine will be entered again when elog cleanup tries to release memory!
1653 */
1654void
1656{
1657 AllocSet set = (AllocSet) context;
1658 const char *name = set->header.name;
1660 AllocBlock block;
1662
1663 for (prevblock = NULL, block = set->blocks;
1664 block != NULL;
1665 prevblock = block, block = block->next)
1666 {
1667 char *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
1668 Size blk_used = block->freeptr - bpoz;
1669 Size blk_data = 0;
1670 Size nchunks = 0;
1671 bool has_external_chunk = false;
1672
1673 if (IsKeeperBlock(set, block))
1674 total_allocated += block->endptr - ((char *) set);
1675 else
1676 total_allocated += block->endptr - ((char *) block);
1677
1678 /*
1679 * Empty block - empty can be keeper-block only
1680 */
1681 if (!blk_used)
1682 {
1683 if (!IsKeeperBlock(set, block))
1684 elog(WARNING, "problem in alloc set %s: empty block %p",
1685 name, block);
1686 }
1687
1688 /*
1689 * Check block header fields
1690 */
1691 if (block->aset != set ||
1692 block->prev != prevblock ||
1693 block->freeptr < bpoz ||
1694 block->freeptr > block->endptr)
1695 elog(WARNING, "problem in alloc set %s: corrupt header in block %p",
1696 name, block);
1697
1698 /*
1699 * Chunk walker
1700 */
1701 while (bpoz < block->freeptr)
1702 {
1704 Size chsize,
1705 dsize;
1706
1707 /* Allow access to the chunk header. */
1709
1711 {
1712 chsize = block->endptr - (char *) MemoryChunkGetPointer(chunk); /* aligned chunk size */
1713 has_external_chunk = true;
1714
1715 /* make sure this chunk consumes the entire block */
1717 elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p",
1718 name, chunk, block);
1719 }
1720 else
1721 {
1723
1725 elog(WARNING, "problem in alloc set %s: bad chunk size for chunk %p in block %p",
1726 name, chunk, block);
1727
1728 chsize = GetChunkSizeFromFreeListIdx(fidx); /* aligned chunk size */
1729
1730 /*
1731 * Check the stored block offset correctly references this
1732 * block.
1733 */
1734 if (block != MemoryChunkGetBlock(chunk))
1735 elog(WARNING, "problem in alloc set %s: bad block offset for chunk %p in block %p",
1736 name, chunk, block);
1737 }
1738 dsize = chunk->requested_size; /* real data */
1739
1740 /* an allocated chunk's requested size must be <= the chsize */
1741 if (dsize != InvalidAllocSize && dsize > chsize)
1742 elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
1743 name, chunk, block);
1744
1745 /* chsize must not be smaller than the first freelist's size */
1746 if (chsize < (1 << ALLOC_MINBITS))
1747 elog(WARNING, "problem in alloc set %s: bad size %zu for chunk %p in block %p",
1748 name, chsize, chunk, block);
1749
1750 /*
1751 * Check for overwrite of padding space in an allocated chunk.
1752 */
1753 if (dsize != InvalidAllocSize && dsize < chsize &&
1755 elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
1756 name, block, chunk);
1757
1758 /* if chunk is allocated, disallow access to the chunk header */
1759 if (dsize != InvalidAllocSize)
1761
1762 blk_data += chsize;
1763 nchunks++;
1764
1766 }
1767
1768 if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used)
1769 elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p",
1770 name, block);
1771
1772 if (has_external_chunk && nchunks > 1)
1773 elog(WARNING, "problem in alloc set %s: external chunk on non-dedicated block %p",
1774 name, block);
1775 }
1776
1778}
1779
1780#endif /* MEMORY_CONTEXT_CHECKING */
Datum idx(PG_FUNCTION_ARGS)
Definition _int_op.c:262
void AllocSetReset(MemoryContext context)
Definition aset.c:546
static pg_noinline void * AllocSetAllocFromNewBlock(MemoryContext context, Size size, int flags, int fidx)
Definition aset.c:861
#define AllocSetIsValid(set)
Definition aset.c:200
#define AllocBlockIsValid(block)
Definition aset.c:207
void * AllocSetRealloc(void *pointer, Size size, int flags)
Definition aset.c:1218
#define IsKeeperBlock(set, block)
Definition aset.c:248
#define GetFreeListLink(chkptr)
Definition aset.c:138
#define FreeListIdxIsValid(fidx)
Definition aset.c:142
Size AllocSetGetChunkSpace(void *pointer)
Definition aset.c:1519
#define ALLOC_CHUNKHDRSZ
Definition aset.c:109
MemoryContext AllocSetGetChunkContext(void *pointer)
Definition aset.c:1490
void AllocSetStats(MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
Definition aset.c:1578
#define KeeperBlock(set)
Definition aset.c:244
#define GetChunkSizeFromFreeListIdx(fidx)
Definition aset.c:146
#define ALLOC_MINBITS
Definition aset.c:83
struct AllocBlockData * AllocBlock
Definition aset.c:113
#define MAX_FREE_CONTEXTS
Definition aset.c:241
static int AllocSetFreeIndex(Size size)
Definition aset.c:277
bool AllocSetIsEmpty(MemoryContext context)
Definition aset.c:1553
#define ALLOC_BLOCKHDRSZ
Definition aset.c:108
void * AllocSetAlloc(MemoryContext context, Size size, int flags)
Definition aset.c:1012
void * AllocPointer
Definition aset.c:119
#define ALLOCSET_NUM_FREELISTS
Definition aset.c:84
#define ALLOC_CHUNK_FRACTION
Definition aset.c:87
void AllocSetFree(void *pointer)
Definition aset.c:1107
#define FIRST_BLOCKHDRSZ
Definition aset.c:110
void AllocSetDelete(MemoryContext context)
Definition aset.c:632
static void * AllocSetAllocChunkFromBlock(MemoryContext context, AllocBlock block, Size size, Size chunk_size, int fidx)
Definition aset.c:816
#define ALLOC_CHUNK_LIMIT
Definition aset.c:85
static AllocSetFreeList context_freelists[2]
Definition aset.c:257
static pg_noinline void * AllocSetAllocLarge(MemoryContext context, Size size, int flags)
Definition aset.c:735
#define ExternalChunkGetBlock(chunk)
Definition aset.c:215
MemoryContext AllocSetContextCreateInternal(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition aset.c:347
AllocSetContext * AllocSet
Definition aset.c:173
static int32 next
Definition blutils.c:225
#define pg_noinline
Definition c.h:307
#define Min(x, y)
Definition c.h:1019
#define MAXALIGN(LEN)
Definition c.h:838
#define PG_USED_FOR_ASSERTS_ONLY
Definition c.h:235
#define Max(x, y)
Definition c.h:1013
#define Assert(condition)
Definition c.h:885
#define MemSetAligned(start, val, len)
Definition c.h:1065
#define unlikely(x)
Definition c.h:424
uint32_t uint32
Definition c.h:558
#define StaticAssertDecl(condition, errmessage)
Definition c.h:950
size_t Size
Definition c.h:631
int errcode(int sqlerrcode)
Definition elog.c:874
int errmsg(const char *fmt,...)
Definition elog.c:1093
int errdetail(const char *fmt,...) pg_attribute_printf(1
#define WARNING
Definition elog.h:36
#define ERROR
Definition elog.h:39
#define elog(elevel,...)
Definition elog.h:226
#define ereport(elevel,...)
Definition elog.h:150
void MemoryContextCreate(MemoryContext node, NodeTag tag, MemoryContextMethodID method_id, MemoryContext parent, const char *name)
Definition mcxt.c:1149
MemoryContext TopMemoryContext
Definition mcxt.c:166
void MemoryContextStats(MemoryContext context)
Definition mcxt.c:863
void * MemoryContextAllocationFailure(MemoryContext context, Size size, int flags)
Definition mcxt.c:1198
void MemoryContextResetOnly(MemoryContext context)
Definition mcxt.c:422
#define VALGRIND_DESTROY_MEMPOOL(context)
Definition memdebug.h:25
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition memdebug.h:26
#define VALGRIND_MEMPOOL_CHANGE(context, optr, nptr, size)
Definition memdebug.h:31
#define VALGRIND_CREATE_MEMPOOL(context, redzones, zeroed)
Definition memdebug.h:24
#define VALGRIND_MEMPOOL_ALLOC(context, addr, size)
Definition memdebug.h:29
#define VALGRIND_MEMPOOL_TRIM(context, addr, size)
Definition memdebug.h:32
#define VALGRIND_MEMPOOL_FREE(context, addr)
Definition memdebug.h:30
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition memdebug.h:27
#define VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
Definition memdebug.h:28
void(* MemoryStatsPrintFunc)(MemoryContext context, void *passthru, const char *stats_string, bool print_to_stderr)
Definition memnodes.h:54
#define ALLOCSET_SMALL_MINSIZE
Definition memutils.h:167
#define ALLOCSET_DEFAULT_MINSIZE
Definition memutils.h:157
#define AllocHugeSizeIsValid(size)
Definition memutils.h:49
#define InvalidAllocSize
Definition memutils.h:47
#define ALLOCSET_SMALL_INITSIZE
Definition memutils.h:168
#define ALLOCSET_DEFAULT_INITSIZE
Definition memutils.h:158
static void MemoryContextCheckSize(MemoryContext context, Size size, int flags)
@ MCTX_ASET_ID
#define MEMORYCHUNK_MAX_BLOCKOFFSET
static Size MemoryChunkGetValue(MemoryChunk *chunk)
#define MemoryChunkGetPointer(c)
static bool MemoryChunkIsExternal(MemoryChunk *chunk)
static void * MemoryChunkGetBlock(MemoryChunk *chunk)
static void MemoryChunkSetHdrMaskExternal(MemoryChunk *chunk, MemoryContextMethodID methodid)
#define PointerGetMemoryChunk(p)
static void MemoryChunkSetHdrMask(MemoryChunk *chunk, void *block, Size value, MemoryContextMethodID methodid)
struct MemoryContextData * MemoryContext
Definition palloc.h:36
static int pg_leftmost_one_pos32(uint32 word)
Definition pg_bitutils.h:41
PGDLLIMPORT const uint8 pg_leftmost_one_pos[256]
Definition pg_bitutils.c:27
#define snprintf
Definition port.h:260
static int fb(int x)
#define realloc(a, b)
#define free(a)
#define malloc(a)
AllocBlock prev
Definition aset.c:190
AllocSet aset
Definition aset.c:189
char * freeptr
Definition aset.c:192
AllocBlock next
Definition aset.c:191
char * endptr
Definition aset.c:193
MemoryContextData header
Definition aset.c:160
AllocSetContext * first_free
Definition aset.c:253
MemoryContext nextchild
Definition memnodes.h:130
const char * name
Definition memnodes.h:131
const char * name

◆ ALLOCSET_NUM_FREELISTS

#define ALLOCSET_NUM_FREELISTS   11

Definition at line 84 of file aset.c.

◆ AllocSetIsValid

#define AllocSetIsValid (   set)     ((set) && IsA(set, AllocSetContext))

Definition at line 200 of file aset.c.

◆ ExternalChunkGetBlock

#define ExternalChunkGetBlock (   chunk)     (AllocBlock) ((char *) chunk - ALLOC_BLOCKHDRSZ)

Definition at line 215 of file aset.c.

◆ FIRST_BLOCKHDRSZ

#define FIRST_BLOCKHDRSZ
Value:

Definition at line 110 of file aset.c.

◆ FreeListIdxIsValid

#define FreeListIdxIsValid (   fidx)     ((fidx) >= 0 && (fidx) < ALLOCSET_NUM_FREELISTS)

Definition at line 142 of file aset.c.

◆ GetChunkSizeFromFreeListIdx

#define GetChunkSizeFromFreeListIdx (   fidx)     ((((Size) 1) << ALLOC_MINBITS) << (fidx))

Definition at line 146 of file aset.c.

◆ GetFreeListLink

#define GetFreeListLink (   chkptr)     (AllocFreeListLink *) ((char *) (chkptr) + ALLOC_CHUNKHDRSZ)

Definition at line 138 of file aset.c.

◆ IsKeeperBlock

#define IsKeeperBlock (   set,
  block 
)    ((block) == (KeeperBlock(set)))

Definition at line 248 of file aset.c.

◆ KeeperBlock

#define KeeperBlock (   set)     ((AllocBlock) (((char *) set) + MAXALIGN(sizeof(AllocSetContext))))

Definition at line 244 of file aset.c.

◆ MAX_FREE_CONTEXTS

#define MAX_FREE_CONTEXTS   100 /* arbitrary limit on freelist length */

Definition at line 241 of file aset.c.

Typedef Documentation

◆ AllocBlock

Definition at line 113 of file aset.c.

◆ AllocBlockData

◆ AllocFreeListLink

◆ AllocPointer

Definition at line 119 of file aset.c.

◆ AllocSet

Definition at line 173 of file aset.c.

◆ AllocSetContext

◆ AllocSetFreeList

Function Documentation

◆ AllocSetAlloc()

void * AllocSetAlloc ( MemoryContext  context,
Size  size,
int  flags 
)

Definition at line 1012 of file aset.c.

1013{
1014 AllocSet set = (AllocSet) context;
1015 AllocBlock block;
1017 int fidx;
1018 Size chunk_size;
1020
1021 Assert(AllocSetIsValid(set));
1022
1023 /* due to the keeper block set->blocks should never be NULL */
1024 Assert(set->blocks != NULL);
1025
1026 /*
1027 * If requested size exceeds maximum for chunks we hand the request off to
1028 * AllocSetAllocLarge().
1029 */
1030 if (size > set->allocChunkLimit)
1031 return AllocSetAllocLarge(context, size, flags);
1032
1033 /*
1034 * Request is small enough to be treated as a chunk. Look in the
1035 * corresponding free list to see if there is a free chunk we could reuse.
1036 * If one is found, remove it from the free list, make it again a member
1037 * of the alloc set and return its data address.
1038 *
1039 * Note that we don't attempt to ensure there's space for the sentinel
1040 * byte here. We expect a large proportion of allocations to be for sizes
1041 * which are already a power of 2. If we were to always make space for a
1042 * sentinel byte in MEMORY_CONTEXT_CHECKING builds, then we'd end up
1043 * doubling the memory requirements for such allocations.
1044 */
1045 fidx = AllocSetFreeIndex(size);
1046 chunk = set->freelist[fidx];
1047 if (chunk != NULL)
1048 {
1050
1051 /* Allow access to the chunk header. */
1053
1055
1056 /* pop this chunk off the freelist */
1058 set->freelist[fidx] = link->next;
1060
1061#ifdef MEMORY_CONTEXT_CHECKING
1062 chunk->requested_size = size;
1063 /* set mark to catch clobber of "unused" space */
1066#endif
1067#ifdef RANDOMIZE_ALLOCATED_MEMORY
1068 /* fill the allocated space with junk */
1069 randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
1070#endif
1071
1072 /* Ensure any padding bytes are marked NOACCESS. */
1075
1076 /* Disallow access to the chunk header. */
1078
1080 }
1081
1082 /*
1083 * Choose the actual chunk size to allocate.
1084 */
1085 chunk_size = GetChunkSizeFromFreeListIdx(fidx);
1086 Assert(chunk_size >= size);
1087
1088 block = set->blocks;
1089 availspace = block->endptr - block->freeptr;
1090
1091 /*
1092 * If there is enough room in the active allocation block, we will put the
1093 * chunk into that block. Else must start a new one.
1094 */
1095 if (unlikely(availspace < (chunk_size + ALLOC_CHUNKHDRSZ)))
1096 return AllocSetAllocFromNewBlock(context, size, flags, fidx);
1097
1098 /* There's enough space on the current block, so allocate from that */
1099 return AllocSetAllocChunkFromBlock(context, block, size, chunk_size, fidx);
1100}
uint32 allocChunkLimit
Definition aset.c:168
AllocBlock blocks
Definition aset.c:162
MemoryChunk * freelist[ALLOCSET_NUM_FREELISTS]
Definition aset.c:163

References ALLOC_CHUNKHDRSZ, AllocSetContext::allocChunkLimit, AllocSetAllocChunkFromBlock(), AllocSetAllocFromNewBlock(), AllocSetAllocLarge(), AllocSetFreeIndex(), AllocSetIsValid, Assert, AllocSetContext::blocks, AllocBlockData::endptr, fb(), AllocSetContext::freelist, AllocBlockData::freeptr, GetChunkSizeFromFreeListIdx, GetFreeListLink, MemoryChunkGetPointer, MemoryChunkGetValue(), unlikely, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

Referenced by AllocSetRealloc().

◆ AllocSetAllocChunkFromBlock()

static void * AllocSetAllocChunkFromBlock ( MemoryContext  context,
AllocBlock  block,
Size  size,
Size  chunk_size,
int  fidx 
)
inlinestatic

Definition at line 816 of file aset.c.

818{
820
821 chunk = (MemoryChunk *) (block->freeptr);
822
823 /* Prepare to initialize the chunk header. */
825
826 block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
827 Assert(block->freeptr <= block->endptr);
828
829 /* store the free list index in the value field */
831
832#ifdef MEMORY_CONTEXT_CHECKING
833 chunk->requested_size = size;
834 /* set mark to catch clobber of "unused" space */
835 if (size < chunk_size)
837#endif
838#ifdef RANDOMIZE_ALLOCATED_MEMORY
839 /* fill the allocated space with junk */
841#endif
842
843 /* Ensure any padding bytes are marked NOACCESS. */
845 chunk_size - size);
846
847 /* Disallow access to the chunk header. */
849
851}

References ALLOC_CHUNKHDRSZ, Assert, AllocBlockData::endptr, fb(), AllocBlockData::freeptr, MCTX_ASET_ID, MemoryChunkGetPointer, MemoryChunkSetHdrMask(), VALGRIND_MAKE_MEM_NOACCESS, and VALGRIND_MAKE_MEM_UNDEFINED.

Referenced by AllocSetAlloc(), and AllocSetAllocFromNewBlock().

◆ AllocSetAllocFromNewBlock()

static pg_noinline void * AllocSetAllocFromNewBlock ( MemoryContext  context,
Size  size,
int  flags,
int  fidx 
)
static

Definition at line 861 of file aset.c.

863{
864 AllocSet set = (AllocSet) context;
865 AllocBlock block;
867 Size blksize;
869 Size chunk_size;
870
871 /* due to the keeper block set->blocks should always be valid */
872 Assert(set->blocks != NULL);
873 block = set->blocks;
874 availspace = block->endptr - block->freeptr;
875
876 /*
877 * The existing active (top) block does not have enough room for the
878 * requested allocation, but it might still have a useful amount of space
879 * in it. Once we push it down in the block list, we'll never try to
880 * allocate more space from it. So, before we do that, carve up its free
881 * space into chunks that we can put on the set's freelists.
882 *
883 * Because we can only get here when there's less than ALLOC_CHUNK_LIMIT
884 * left in the block, this loop cannot iterate more than
885 * ALLOCSET_NUM_FREELISTS-1 times.
886 */
887 while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
888 {
893
894 /*
895 * In most cases, we'll get back the index of the next larger freelist
896 * than the one we need to put this chunk on. The exception is when
897 * availchunk is exactly a power of 2.
898 */
900 {
901 a_fidx--;
902 Assert(a_fidx >= 0);
904 }
905
906 chunk = (MemoryChunk *) (block->freeptr);
907
908 /* Prepare to initialize the chunk header. */
910 block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
912
913 /* store the freelist index in the value field */
915#ifdef MEMORY_CONTEXT_CHECKING
916 chunk->requested_size = InvalidAllocSize; /* mark it free */
917#endif
918 /* push this chunk onto the free list */
920
922 link->next = set->freelist[a_fidx];
924
925 set->freelist[a_fidx] = chunk;
926 }
927
928 /*
929 * The first such block has size initBlockSize, and we double the space in
930 * each succeeding block, but not more than maxBlockSize.
931 */
932 blksize = set->nextBlockSize;
933 set->nextBlockSize <<= 1;
934 if (set->nextBlockSize > set->maxBlockSize)
935 set->nextBlockSize = set->maxBlockSize;
936
937 /* Choose the actual chunk size to allocate */
938 chunk_size = GetChunkSizeFromFreeListIdx(fidx);
939 Assert(chunk_size >= size);
940
941 /*
942 * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
943 * space... but try to keep it a power of 2.
944 */
946 while (blksize < required_size)
947 blksize <<= 1;
948
949 /* Try to allocate it */
950 block = (AllocBlock) malloc(blksize);
951
952 /*
953 * We could be asking for pretty big blocks here, so cope if malloc fails.
954 * But give up if there's less than 1 MB or so available...
955 */
956 while (block == NULL && blksize > 1024 * 1024)
957 {
958 blksize >>= 1;
959 if (blksize < required_size)
960 break;
961 block = (AllocBlock) malloc(blksize);
962 }
963
964 if (block == NULL)
965 return MemoryContextAllocationFailure(context, size, flags);
966
967 /* Make a vchunk covering the new block's header */
969
970 context->mem_allocated += blksize;
971
972 block->aset = set;
973 block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
974 block->endptr = ((char *) block) + blksize;
975
976 /* Mark unallocated space NOACCESS. */
978 blksize - ALLOC_BLOCKHDRSZ);
979
980 block->prev = NULL;
981 block->next = set->blocks;
982 if (block->next)
983 block->next->prev = block;
984 set->blocks = block;
985
986 return AllocSetAllocChunkFromBlock(context, block, size, chunk_size, fidx);
987}
uint32 maxBlockSize
Definition aset.c:166
uint32 nextBlockSize
Definition aset.c:167

References ALLOC_BLOCKHDRSZ, ALLOC_CHUNKHDRSZ, ALLOC_MINBITS, AllocSetAllocChunkFromBlock(), AllocSetFreeIndex(), AllocBlockData::aset, Assert, AllocSetContext::blocks, AllocBlockData::endptr, fb(), AllocSetContext::freelist, AllocBlockData::freeptr, GetChunkSizeFromFreeListIdx, GetFreeListLink, InvalidAllocSize, malloc, AllocSetContext::maxBlockSize, MCTX_ASET_ID, MemoryContextData::mem_allocated, MemoryChunkSetHdrMask(), MemoryContextAllocationFailure(), AllocBlockData::next, AllocSetContext::nextBlockSize, AllocBlockData::prev, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MAKE_MEM_NOACCESS, VALGRIND_MAKE_MEM_UNDEFINED, and VALGRIND_MEMPOOL_ALLOC.

Referenced by AllocSetAlloc().

◆ AllocSetAllocLarge()

static pg_noinline void * AllocSetAllocLarge ( MemoryContext  context,
Size  size,
int  flags 
)
static

Definition at line 735 of file aset.c.

736{
737 AllocSet set = (AllocSet) context;
738 AllocBlock block;
740 Size chunk_size;
741 Size blksize;
742
743 /* validate 'size' is within the limits for the given 'flags' */
744 MemoryContextCheckSize(context, size, flags);
745
746#ifdef MEMORY_CONTEXT_CHECKING
747 /* ensure there's always space for the sentinel byte */
748 chunk_size = MAXALIGN(size + 1);
749#else
750 chunk_size = MAXALIGN(size);
751#endif
752
753 blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
754 block = (AllocBlock) malloc(blksize);
755 if (block == NULL)
756 return MemoryContextAllocationFailure(context, size, flags);
757
758 /* Make a vchunk covering the new block's header */
760
761 context->mem_allocated += blksize;
762
763 block->aset = set;
764 block->freeptr = block->endptr = ((char *) block) + blksize;
765
766 chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
767
768 /* mark the MemoryChunk as externally managed */
770
771#ifdef MEMORY_CONTEXT_CHECKING
772 chunk->requested_size = size;
773 /* set mark to catch clobber of "unused" space */
774 Assert(size < chunk_size);
776#endif
777#ifdef RANDOMIZE_ALLOCATED_MEMORY
778 /* fill the allocated space with junk */
780#endif
781
782 /*
783 * Stick the new block underneath the active allocation block, if any, so
784 * that we don't lose the use of the space remaining therein.
785 */
786 if (set->blocks != NULL)
787 {
788 block->prev = set->blocks;
789 block->next = set->blocks->next;
790 if (block->next)
791 block->next->prev = block;
792 set->blocks->next = block;
793 }
794 else
795 {
796 block->prev = NULL;
797 block->next = NULL;
798 set->blocks = block;
799 }
800
801 /* Ensure any padding bytes are marked NOACCESS. */
803 chunk_size - size);
804
805 /* Disallow access to the chunk header. */
807
809}

References ALLOC_BLOCKHDRSZ, ALLOC_CHUNKHDRSZ, AllocBlockData::aset, Assert, AllocSetContext::blocks, AllocBlockData::endptr, fb(), AllocBlockData::freeptr, malloc, MAXALIGN, MCTX_ASET_ID, MemoryContextData::mem_allocated, MemoryChunkGetPointer, MemoryChunkSetHdrMaskExternal(), MemoryContextAllocationFailure(), MemoryContextCheckSize(), AllocBlockData::next, AllocBlockData::prev, VALGRIND_MAKE_MEM_NOACCESS, and VALGRIND_MEMPOOL_ALLOC.

Referenced by AllocSetAlloc().

◆ AllocSetContextCreateInternal()

MemoryContext AllocSetContextCreateInternal ( MemoryContext  parent,
const char name,
Size  minContextSize,
Size  initBlockSize,
Size  maxBlockSize 
)

Definition at line 347 of file aset.c.

352{
353 int freeListIndex;
355 AllocSet set;
356 AllocBlock block;
357
358 /* ensure MemoryChunk's size is properly maxaligned */
360 "sizeof(MemoryChunk) is not maxaligned");
361 /* check we have enough space to store the freelist link */
363 "sizeof(AllocFreeListLink) larger than minimum allocation size");
364
365 /*
366 * First, validate allocation parameters. Once these were regular runtime
367 * tests and elog's, but in practice Asserts seem sufficient because
368 * nobody varies their parameters at runtime. We somewhat arbitrarily
369 * enforce a minimum 1K block size. We restrict the maximum block size to
370 * MEMORYCHUNK_MAX_BLOCKOFFSET as MemoryChunks are limited to this in
371 * regards to addressing the offset between the chunk and the block that
372 * the chunk is stored on. We would be unable to store the offset between
373 * the chunk and block for any chunks that were beyond
374 * MEMORYCHUNK_MAX_BLOCKOFFSET bytes into the block if the block was to be
375 * larger than this.
376 */
377 Assert(initBlockSize == MAXALIGN(initBlockSize) &&
378 initBlockSize >= 1024);
379 Assert(maxBlockSize == MAXALIGN(maxBlockSize) &&
380 maxBlockSize >= initBlockSize &&
381 AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
382 Assert(minContextSize == 0 ||
384 minContextSize >= 1024 &&
385 minContextSize <= maxBlockSize));
386 Assert(maxBlockSize <= MEMORYCHUNK_MAX_BLOCKOFFSET);
387
388 /*
389 * Check whether the parameters match either available freelist. We do
390 * not need to demand a match of maxBlockSize.
391 */
393 initBlockSize == ALLOCSET_DEFAULT_INITSIZE)
394 freeListIndex = 0;
396 initBlockSize == ALLOCSET_SMALL_INITSIZE)
397 freeListIndex = 1;
398 else
399 freeListIndex = -1;
400
401 /*
402 * If a suitable freelist entry exists, just recycle that context.
403 */
404 if (freeListIndex >= 0)
405 {
406 AllocSetFreeList *freelist = &context_freelists[freeListIndex];
407
408 if (freelist->first_free != NULL)
409 {
410 /* Remove entry from freelist */
411 set = freelist->first_free;
412 freelist->first_free = (AllocSet) set->header.nextchild;
413 freelist->num_free--;
414
415 /* Update its maxBlockSize; everything else should be OK */
416 set->maxBlockSize = maxBlockSize;
417
418 /* Reinitialize its header, installing correct name and parent */
422 parent,
423 name);
424
425 ((MemoryContext) set)->mem_allocated =
426 KeeperBlock(set)->endptr - ((char *) set);
427
428 return (MemoryContext) set;
429 }
430 }
431
432 /* Determine size of initial block */
435 if (minContextSize != 0)
437 else
438 firstBlockSize = Max(firstBlockSize, initBlockSize);
439
440 /*
441 * Allocate the initial block. Unlike other aset.c blocks, it starts with
442 * the context header and its block header follows that.
443 */
445 if (set == NULL)
446 {
451 errmsg("out of memory"),
452 errdetail("Failed while creating memory context \"%s\".",
453 name)));
454 }
455
456 /*
457 * Avoid writing code that can fail between here and MemoryContextCreate;
458 * we'd leak the header/initial block if we ereport in this stretch.
459 */
460
461 /* Create a vpool associated with the context */
462 VALGRIND_CREATE_MEMPOOL(set, 0, false);
463
464 /*
465 * Create a vchunk covering both the AllocSetContext struct and the keeper
466 * block's header. (Perhaps it would be more sensible for these to be two
467 * separate vchunks, but doing that seems to tickle bugs in some versions
468 * of Valgrind.) We must have these vchunks, and also a vchunk for each
469 * subsequently-added block header, so that Valgrind considers the
470 * pointers within them while checking for leaked memory. Note that
471 * Valgrind doesn't distinguish between these vchunks and those created by
472 * mcxt.c for the user-accessible-data chunks we allocate.
473 */
475
476 /* Fill in the initial block's block header */
477 block = KeeperBlock(set);
478 block->aset = set;
479 block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
480 block->endptr = ((char *) set) + firstBlockSize;
481 block->prev = NULL;
482 block->next = NULL;
483
484 /* Mark unallocated space NOACCESS; leave the block header alone. */
485 VALGRIND_MAKE_MEM_NOACCESS(block->freeptr, block->endptr - block->freeptr);
486
487 /* Remember block as part of block list */
488 set->blocks = block;
489
490 /* Finish filling in aset-specific parts of the context header */
491 MemSetAligned(set->freelist, 0, sizeof(set->freelist));
492
493 set->initBlockSize = (uint32) initBlockSize;
494 set->maxBlockSize = (uint32) maxBlockSize;
495 set->nextBlockSize = (uint32) initBlockSize;
496 set->freeListIndex = freeListIndex;
497
498 /*
499 * Compute the allocation chunk size limit for this context. It can't be
500 * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
501 * If maxBlockSize is small then requests exceeding the maxBlockSize, or
502 * even a significant fraction of it, should be treated as large chunks
503 * too. For the typical case of maxBlockSize a power of 2, the chunk size
504 * limit will be at most 1/8th maxBlockSize, so that given a stream of
505 * requests that are all the maximum chunk size we will waste at most
506 * 1/8th of the allocated space.
507 *
508 * Determine the maximum size that a chunk can be before we allocate an
509 * entire AllocBlock dedicated for that chunk. We set the absolute limit
510 * of that size as ALLOC_CHUNK_LIMIT but we reduce it further so that we
511 * can fit about ALLOC_CHUNK_FRACTION chunks this size on a maximally
512 * sized block. (We opt to keep allocChunkLimit a power-of-2 value
513 * primarily for legacy reasons rather than calculating it so that exactly
514 * ALLOC_CHUNK_FRACTION chunks fit on a maximally sized block.)
515 */
517 while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
518 (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
519 set->allocChunkLimit >>= 1;
520
521 /* Finally, do the type-independent part of context creation */
525 parent,
526 name);
527
528 ((MemoryContext) set)->mem_allocated = firstBlockSize;
529
530 return (MemoryContext) set;
531}
uint32 initBlockSize
Definition aset.c:165
int freeListIndex
Definition aset.c:170

References ALLOC_BLOCKHDRSZ, ALLOC_CHUNK_FRACTION, ALLOC_CHUNK_LIMIT, ALLOC_CHUNKHDRSZ, ALLOC_MINBITS, AllocSetContext::allocChunkLimit, AllocHugeSizeIsValid, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_SMALL_INITSIZE, ALLOCSET_SMALL_MINSIZE, AllocBlockData::aset, Assert, AllocSetContext::blocks, context_freelists, AllocBlockData::endptr, ereport, errcode(), errdetail(), errmsg(), ERROR, fb(), FIRST_BLOCKHDRSZ, AllocSetFreeList::first_free, AllocSetContext::freelist, AllocSetContext::freeListIndex, AllocBlockData::freeptr, AllocSetContext::header, AllocSetContext::initBlockSize, KeeperBlock, malloc, Max, MAXALIGN, AllocSetContext::maxBlockSize, MCTX_ASET_ID, MEMORYCHUNK_MAX_BLOCKOFFSET, MemoryContextCreate(), MemoryContextStats(), MemSetAligned, name, AllocBlockData::next, AllocSetContext::nextBlockSize, MemoryContextData::nextchild, AllocSetFreeList::num_free, AllocBlockData::prev, StaticAssertDecl, TopMemoryContext, VALGRIND_CREATE_MEMPOOL, VALGRIND_MAKE_MEM_NOACCESS, and VALGRIND_MEMPOOL_ALLOC.

◆ AllocSetDelete()

void AllocSetDelete ( MemoryContext  context)

Definition at line 632 of file aset.c.

633{
634 AllocSet set = (AllocSet) context;
635 AllocBlock block = set->blocks;
637
639
640#ifdef MEMORY_CONTEXT_CHECKING
641 /* Check for corruption and leaks before freeing */
642 AllocSetCheck(context);
643#endif
644
645 /* Remember keeper block size for Assert below */
646 keepersize = KeeperBlock(set)->endptr - ((char *) set);
647
648 /*
649 * If the context is a candidate for a freelist, put it into that freelist
650 * instead of destroying it.
651 */
652 if (set->freeListIndex >= 0)
653 {
655
656 /*
657 * Reset the context, if it needs it, so that we aren't hanging on to
658 * more than the initial malloc chunk.
659 */
660 if (!context->isReset)
661 MemoryContextResetOnly(context);
662
663 /*
664 * If the freelist is full, just discard what's already in it. See
665 * comments with context_freelists[].
666 */
667 if (freelist->num_free >= MAX_FREE_CONTEXTS)
668 {
669 while (freelist->first_free != NULL)
670 {
671 AllocSetContext *oldset = freelist->first_free;
672
674 freelist->num_free--;
675
676 /* Destroy the context's vpool --- see notes below */
678
679 /* All that remains is to free the header/initial block */
680 free(oldset);
681 }
682 Assert(freelist->num_free == 0);
683 }
684
685 /* Now add the just-deleted context to the freelist. */
686 set->header.nextchild = (MemoryContext) freelist->first_free;
687 freelist->first_free = set;
688 freelist->num_free++;
689
690 return;
691 }
692
693 /* Free all blocks, except the keeper which is part of context header */
694 while (block != NULL)
695 {
696 AllocBlock next = block->next;
697
698 if (!IsKeeperBlock(set, block))
699 context->mem_allocated -= block->endptr - ((char *) block);
700
701#ifdef CLOBBER_FREED_MEMORY
702 wipe_mem(block, block->freeptr - ((char *) block));
703#endif
704
705 if (!IsKeeperBlock(set, block))
706 {
707 /* As in AllocSetReset, free block-header vchunks explicitly */
708 VALGRIND_MEMPOOL_FREE(set, block);
709 free(block);
710 }
711
712 block = next;
713 }
714
715 Assert(context->mem_allocated == keepersize);
716
717 /*
718 * Destroy the vpool. We don't seem to need to explicitly free the
719 * initial block's header vchunk, nor any user-data vchunks that Valgrind
720 * still knows about; they'll all go away automatically.
721 */
723
724 /* Finally, free the context header, including the keeper block */
725 free(set);
726}

References AllocSetIsValid, Assert, AllocSetContext::blocks, context_freelists, AllocBlockData::endptr, fb(), AllocSetFreeList::first_free, free, AllocSetContext::freeListIndex, AllocBlockData::freeptr, AllocSetContext::header, IsKeeperBlock, MemoryContextData::isReset, KeeperBlock, MAX_FREE_CONTEXTS, MemoryContextData::mem_allocated, MemoryContextResetOnly(), next, AllocBlockData::next, MemoryContextData::nextchild, AllocSetFreeList::num_free, PG_USED_FOR_ASSERTS_ONLY, VALGRIND_DESTROY_MEMPOOL, and VALGRIND_MEMPOOL_FREE.

◆ AllocSetFree()

void AllocSetFree ( void pointer)

Definition at line 1107 of file aset.c.

1108{
1109 AllocSet set;
1111
1112 /* Allow access to the chunk header. */
1114
1116 {
1117 /* Release single-chunk block. */
1119
1120 /*
1121 * Try to verify that we have a sane block pointer: the block header
1122 * should reference an aset and the freeptr should match the endptr.
1123 */
1124 if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
1125 elog(ERROR, "could not find block containing chunk %p", chunk);
1126
1127 set = block->aset;
1128
1129#ifdef MEMORY_CONTEXT_CHECKING
1130 {
1131 /* Test for someone scribbling on unused space in chunk */
1132 Assert(chunk->requested_size < (block->endptr - (char *) pointer));
1133 if (!sentinel_ok(pointer, chunk->requested_size))
1134 elog(WARNING, "detected write past chunk end in %s %p",
1135 set->header.name, chunk);
1136 }
1137#endif
1138
1139 /* OK, remove block from aset's list and free it */
1140 if (block->prev)
1141 block->prev->next = block->next;
1142 else
1143 set->blocks = block->next;
1144 if (block->next)
1145 block->next->prev = block->prev;
1146
1147 set->header.mem_allocated -= block->endptr - ((char *) block);
1148
1149#ifdef CLOBBER_FREED_MEMORY
1150 wipe_mem(block, block->freeptr - ((char *) block));
1151#endif
1152
1153 /* As in AllocSetReset, free block-header vchunks explicitly */
1154 VALGRIND_MEMPOOL_FREE(set, block);
1155
1156 free(block);
1157 }
1158 else
1159 {
1161 int fidx;
1163
1164 /*
1165 * In this path, for speed reasons we just Assert that the referenced
1166 * block is good. We can also Assert that the value field is sane.
1167 * Future field experience may show that these Asserts had better
1168 * become regular runtime test-and-elog checks.
1169 */
1170 Assert(AllocBlockIsValid(block));
1171 set = block->aset;
1172
1176
1177#ifdef MEMORY_CONTEXT_CHECKING
1178 /* Test for someone scribbling on unused space in chunk */
1179 if (chunk->requested_size < GetChunkSizeFromFreeListIdx(fidx))
1180 if (!sentinel_ok(pointer, chunk->requested_size))
1181 elog(WARNING, "detected write past chunk end in %s %p",
1182 set->header.name, chunk);
1183#endif
1184
1185#ifdef CLOBBER_FREED_MEMORY
1187#endif
1188 /* push this chunk onto the top of the free list */
1190 link->next = set->freelist[fidx];
1192 set->freelist[fidx] = chunk;
1193
1194#ifdef MEMORY_CONTEXT_CHECKING
1195
1196 /*
1197 * Reset requested_size to InvalidAllocSize in chunks that are on free
1198 * list.
1199 */
1200 chunk->requested_size = InvalidAllocSize;
1201#endif
1202 }
1203}

References ALLOC_CHUNKHDRSZ, AllocBlockIsValid, AllocBlockData::aset, Assert, AllocSetContext::blocks, elog, AllocBlockData::endptr, ERROR, ExternalChunkGetBlock, fb(), free, AllocSetContext::freelist, FreeListIdxIsValid, AllocBlockData::freeptr, GetChunkSizeFromFreeListIdx, GetFreeListLink, AllocSetContext::header, InvalidAllocSize, MemoryContextData::mem_allocated, MemoryChunkGetBlock(), MemoryChunkGetValue(), MemoryChunkIsExternal(), MemoryContextData::name, AllocBlockData::next, PointerGetMemoryChunk, AllocBlockData::prev, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MAKE_MEM_NOACCESS, VALGRIND_MEMPOOL_FREE, and WARNING.

Referenced by AllocSetRealloc().

◆ AllocSetFreeIndex()

static int AllocSetFreeIndex ( Size  size)
inlinestatic

Definition at line 277 of file aset.c.

278{
279 int idx;
280
281 if (size > (1 << ALLOC_MINBITS))
282 {
283 /*----------
284 * At this point we must compute ceil(log2(size >> ALLOC_MINBITS)).
285 * This is the same as
286 * pg_leftmost_one_pos32((size - 1) >> ALLOC_MINBITS) + 1
287 * or equivalently
288 * pg_leftmost_one_pos32(size - 1) - ALLOC_MINBITS + 1
289 *
290 * However, for platforms without intrinsic support, we duplicate the
291 * logic here, allowing an additional optimization. It's reasonable
292 * to assume that ALLOC_CHUNK_LIMIT fits in 16 bits, so we can unroll
293 * the byte-at-a-time loop in pg_leftmost_one_pos32 and just handle
294 * the last two bytes.
295 *
296 * Yes, this function is enough of a hot-spot to make it worth this
297 * much trouble.
298 *----------
299 */
300#ifdef HAVE_BITSCAN_REVERSE
301 idx = pg_leftmost_one_pos32((uint32) size - 1) - ALLOC_MINBITS + 1;
302#else
303 uint32 t,
304 tsize;
305
306 /* Statically assert that we only have a 16-bit input value. */
308 "ALLOC_CHUNK_LIMIT must be less than 64kB");
309
310 tsize = size - 1;
311 t = tsize >> 8;
313 idx -= ALLOC_MINBITS - 1;
314#endif
315
317 }
318 else
319 idx = 0;
320
321 return idx;
322}

References ALLOC_CHUNK_LIMIT, ALLOC_MINBITS, ALLOCSET_NUM_FREELISTS, Assert, fb(), idx(), pg_leftmost_one_pos, pg_leftmost_one_pos32(), and StaticAssertDecl.

Referenced by AllocSetAlloc(), and AllocSetAllocFromNewBlock().

◆ AllocSetGetChunkContext()

MemoryContext AllocSetGetChunkContext ( void pointer)

Definition at line 1490 of file aset.c.

1491{
1493 AllocBlock block;
1494 AllocSet set;
1495
1496 /* Allow access to the chunk header. */
1498
1501 else
1503
1504 /* Disallow access to the chunk header. */
1506
1507 Assert(AllocBlockIsValid(block));
1508 set = block->aset;
1509
1510 return &set->header;
1511}

References ALLOC_CHUNKHDRSZ, AllocBlockIsValid, AllocBlockData::aset, Assert, ExternalChunkGetBlock, fb(), AllocSetContext::header, MemoryChunkGetBlock(), MemoryChunkIsExternal(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ AllocSetGetChunkSpace()

Size AllocSetGetChunkSpace ( void pointer)

Definition at line 1519 of file aset.c.

1520{
1522 int fidx;
1523
1524 /* Allow access to the chunk header. */
1526
1528 {
1530
1531 /* Disallow access to the chunk header. */
1533
1534 Assert(AllocBlockIsValid(block));
1535
1536 return block->endptr - (char *) chunk;
1537 }
1538
1541
1542 /* Disallow access to the chunk header. */
1544
1546}

References ALLOC_CHUNKHDRSZ, AllocBlockIsValid, Assert, AllocBlockData::endptr, ExternalChunkGetBlock, fb(), FreeListIdxIsValid, GetChunkSizeFromFreeListIdx, MemoryChunkGetValue(), MemoryChunkIsExternal(), PointerGetMemoryChunk, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ AllocSetIsEmpty()

bool AllocSetIsEmpty ( MemoryContext  context)

Definition at line 1553 of file aset.c.

1554{
1555 Assert(AllocSetIsValid(context));
1556
1557 /*
1558 * For now, we say "empty" only if the context is new or just reset. We
1559 * could examine the freelists to determine if all space has been freed,
1560 * but it's not really worth the trouble for present uses of this
1561 * functionality.
1562 */
1563 if (context->isReset)
1564 return true;
1565 return false;
1566}

References AllocSetIsValid, Assert, and MemoryContextData::isReset.

◆ AllocSetRealloc()

void * AllocSetRealloc ( void pointer,
Size  size,
int  flags 
)

Definition at line 1218 of file aset.c.

1219{
1220 AllocBlock block;
1221 AllocSet set;
1224 int fidx;
1225
1226 /* Allow access to the chunk header. */
1228
1230 {
1231 /*
1232 * The chunk must have been allocated as a single-chunk block. Use
1233 * realloc() to make the containing block bigger, or smaller, with
1234 * minimum space wastage.
1235 */
1237 Size chksize;
1238 Size blksize;
1240
1242
1243 /*
1244 * Try to verify that we have a sane block pointer: the block header
1245 * should reference an aset and the freeptr should match the endptr.
1246 */
1247 if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
1248 elog(ERROR, "could not find block containing chunk %p", chunk);
1249
1250 set = block->aset;
1251
1252 /* only check size in paths where the limits could be hit */
1253 MemoryContextCheckSize((MemoryContext) set, size, flags);
1254
1255 oldchksize = block->endptr - (char *) pointer;
1256
1257#ifdef MEMORY_CONTEXT_CHECKING
1258 /* Test for someone scribbling on unused space in chunk */
1259 Assert(chunk->requested_size < oldchksize);
1260 if (!sentinel_ok(pointer, chunk->requested_size))
1261 elog(WARNING, "detected write past chunk end in %s %p",
1262 set->header.name, chunk);
1263#endif
1264
1265#ifdef MEMORY_CONTEXT_CHECKING
1266 /* ensure there's always space for the sentinel byte */
1267 chksize = MAXALIGN(size + 1);
1268#else
1269 chksize = MAXALIGN(size);
1270#endif
1271
1272 /* Do the realloc */
1274 oldblksize = block->endptr - ((char *) block);
1275
1276 newblock = (AllocBlock) realloc(block, blksize);
1277 if (newblock == NULL)
1278 {
1279 /* Disallow access to the chunk header. */
1281 return MemoryContextAllocationFailure(&set->header, size, flags);
1282 }
1283
1284 /*
1285 * Move the block-header vchunk explicitly. (mcxt.c will take care of
1286 * moving the vchunk for the user data.)
1287 */
1289 block = newblock;
1290
1291 /* updated separately, not to underflow when (oldblksize > blksize) */
1293 set->header.mem_allocated += blksize;
1294
1295 block->freeptr = block->endptr = ((char *) block) + blksize;
1296
1297 /* Update pointers since block has likely been moved */
1298 chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
1299 pointer = MemoryChunkGetPointer(chunk);
1300 if (block->prev)
1301 block->prev->next = block;
1302 else
1303 set->blocks = block;
1304 if (block->next)
1305 block->next->prev = block;
1306
1307#ifdef MEMORY_CONTEXT_CHECKING
1308#ifdef RANDOMIZE_ALLOCATED_MEMORY
1309
1310 /*
1311 * We can only randomize the extra space if we know the prior request.
1312 * When using Valgrind, randomize_mem() also marks memory UNDEFINED.
1313 */
1314 if (size > chunk->requested_size)
1315 randomize_mem((char *) pointer + chunk->requested_size,
1316 size - chunk->requested_size);
1317#else
1318
1319 /*
1320 * If this is an increase, realloc() will have marked any
1321 * newly-allocated part (from oldchksize to chksize) UNDEFINED, but we
1322 * also need to adjust trailing bytes from the old allocation (from
1323 * chunk->requested_size to oldchksize) as they are marked NOACCESS.
1324 * Make sure not to mark too many bytes in case chunk->requested_size
1325 * < size < oldchksize.
1326 */
1327#ifdef USE_VALGRIND
1328 if (Min(size, oldchksize) > chunk->requested_size)
1329 VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1330 Min(size, oldchksize) - chunk->requested_size);
1331#endif
1332#endif
1333
1334 chunk->requested_size = size;
1335 /* set mark to catch clobber of "unused" space */
1336 Assert(size < chksize);
1337 set_sentinel(pointer, size);
1338#else /* !MEMORY_CONTEXT_CHECKING */
1339
1340 /*
1341 * We may need to adjust marking of bytes from the old allocation as
1342 * some of them may be marked NOACCESS. We don't know how much of the
1343 * old chunk size was the requested size; it could have been as small
1344 * as one byte. We have to be conservative and just mark the entire
1345 * old portion DEFINED. Make sure not to mark memory beyond the new
1346 * allocation in case it's smaller than the old one.
1347 */
1348 VALGRIND_MAKE_MEM_DEFINED(pointer, Min(size, oldchksize));
1349#endif
1350
1351 /* Ensure any padding bytes are marked NOACCESS. */
1352 VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1353
1354 /* Disallow access to the chunk header. */
1356
1357 return pointer;
1358 }
1359
1360 block = MemoryChunkGetBlock(chunk);
1361
1362 /*
1363 * In this path, for speed reasons we just Assert that the referenced
1364 * block is good. We can also Assert that the value field is sane. Future
1365 * field experience may show that these Asserts had better become regular
1366 * runtime test-and-elog checks.
1367 */
1368 Assert(AllocBlockIsValid(block));
1369 set = block->aset;
1370
1374
1375#ifdef MEMORY_CONTEXT_CHECKING
1376 /* Test for someone scribbling on unused space in chunk */
1377 if (chunk->requested_size < oldchksize)
1378 if (!sentinel_ok(pointer, chunk->requested_size))
1379 elog(WARNING, "detected write past chunk end in %s %p",
1380 set->header.name, chunk);
1381#endif
1382
1383 /*
1384 * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1385 * allocated area already is >= the new size. (In particular, we will
1386 * fall out here if the requested size is a decrease.)
1387 */
1388 if (oldchksize >= size)
1389 {
1390#ifdef MEMORY_CONTEXT_CHECKING
1391 Size oldrequest = chunk->requested_size;
1392
1393#ifdef RANDOMIZE_ALLOCATED_MEMORY
1394 /* We can only fill the extra space if we know the prior request */
1395 if (size > oldrequest)
1396 randomize_mem((char *) pointer + oldrequest,
1397 size - oldrequest);
1398#endif
1399
1400 chunk->requested_size = size;
1401
1402 /*
1403 * If this is an increase, mark any newly-available part UNDEFINED.
1404 * Otherwise, mark the obsolete part NOACCESS.
1405 */
1406 if (size > oldrequest)
1407 VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1408 size - oldrequest);
1409 else
1410 VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1411 oldchksize - size);
1412
1413 /* set mark to catch clobber of "unused" space */
1414 if (size < oldchksize)
1415 set_sentinel(pointer, size);
1416#else /* !MEMORY_CONTEXT_CHECKING */
1417
1418 /*
1419 * We don't have the information to determine whether we're growing
1420 * the old request or shrinking it, so we conservatively mark the
1421 * entire new allocation DEFINED.
1422 */
1424 VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1425#endif
1426
1427 /* Disallow access to the chunk header. */
1429
1430 return pointer;
1431 }
1432 else
1433 {
1434 /*
1435 * Enlarge-a-small-chunk case. We just do this by brute force, ie,
1436 * allocate a new chunk and copy the data. Since we know the existing
1437 * data isn't huge, this won't involve any great memcpy expense, so
1438 * it's not worth being smarter. (At one time we tried to avoid
1439 * memcpy when it was possible to enlarge the chunk in-place, but that
1440 * turns out to misbehave unpleasantly for repeated cycles of
1441 * palloc/repalloc/pfree: the eventually freed chunks go into the
1442 * wrong freelist for the next initial palloc request, and so we leak
1443 * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1444 */
1446 Size oldsize;
1447
1448 /* allocate new chunk (this also checks size is valid) */
1449 newPointer = AllocSetAlloc((MemoryContext) set, size, flags);
1450
1451 /* leave immediately if request was not completed */
1452 if (newPointer == NULL)
1453 {
1454 /* Disallow access to the chunk header. */
1456 return MemoryContextAllocationFailure((MemoryContext) set, size, flags);
1457 }
1458
1459 /*
1460 * AllocSetAlloc() may have returned a region that is still NOACCESS.
1461 * Change it to UNDEFINED for the moment; memcpy() will then transfer
1462 * definedness from the old allocation to the new. If we know the old
1463 * allocation, copy just that much. Otherwise, make the entire old
1464 * chunk defined to avoid errors as we copy the currently-NOACCESS
1465 * trailing bytes.
1466 */
1468#ifdef MEMORY_CONTEXT_CHECKING
1469 oldsize = chunk->requested_size;
1470#else
1473#endif
1474
1475 /* transfer existing data (certain to fit) */
1476 memcpy(newPointer, pointer, oldsize);
1477
1478 /* free old chunk */
1479 AllocSetFree(pointer);
1480
1481 return newPointer;
1482 }
1483}

References ALLOC_BLOCKHDRSZ, ALLOC_CHUNKHDRSZ, AllocBlockIsValid, AllocSetAlloc(), AllocSetFree(), AllocBlockData::aset, Assert, elog, AllocBlockData::endptr, ERROR, ExternalChunkGetBlock, fb(), FreeListIdxIsValid, AllocBlockData::freeptr, GetChunkSizeFromFreeListIdx, AllocSetContext::header, MAXALIGN, MemoryContextData::mem_allocated, MemoryChunkGetBlock(), MemoryChunkGetPointer, MemoryChunkGetValue(), MemoryChunkIsExternal(), MemoryContextAllocationFailure(), MemoryContextCheckSize(), Min, MemoryContextData::name, AllocBlockData::next, PointerGetMemoryChunk, AllocBlockData::prev, realloc, VALGRIND_MAKE_MEM_DEFINED, VALGRIND_MAKE_MEM_NOACCESS, VALGRIND_MAKE_MEM_UNDEFINED, VALGRIND_MEMPOOL_CHANGE, and WARNING.

◆ AllocSetReset()

void AllocSetReset ( MemoryContext  context)

Definition at line 546 of file aset.c.

547{
548 AllocSet set = (AllocSet) context;
549 AllocBlock block;
551
553
554#ifdef MEMORY_CONTEXT_CHECKING
555 /* Check for corruption and leaks before freeing */
556 AllocSetCheck(context);
557#endif
558
559 /* Remember keeper block size for Assert below */
560 keepersize = KeeperBlock(set)->endptr - ((char *) set);
561
562 /* Clear chunk freelists */
563 MemSetAligned(set->freelist, 0, sizeof(set->freelist));
564
565 block = set->blocks;
566
567 /* New blocks list will be just the keeper block */
568 set->blocks = KeeperBlock(set);
569
570 while (block != NULL)
571 {
572 AllocBlock next = block->next;
573
574 if (IsKeeperBlock(set, block))
575 {
576 /* Reset the block, but don't return it to malloc */
577 char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
578
579#ifdef CLOBBER_FREED_MEMORY
581#else
582 /* wipe_mem() would have done this */
584#endif
585 block->freeptr = datastart;
586 block->prev = NULL;
587 block->next = NULL;
588 }
589 else
590 {
591 /* Normal case, release the block */
592 context->mem_allocated -= block->endptr - ((char *) block);
593
594#ifdef CLOBBER_FREED_MEMORY
595 wipe_mem(block, block->freeptr - ((char *) block));
596#endif
597
598 /*
599 * We need to free the block header's vchunk explicitly, although
600 * the user-data vchunks within will go away in the TRIM below.
601 * Otherwise Valgrind complains about leaked allocations.
602 */
603 VALGRIND_MEMPOOL_FREE(set, block);
604
605 free(block);
606 }
607 block = next;
608 }
609
610 Assert(context->mem_allocated == keepersize);
611
612 /*
613 * Instruct Valgrind to throw away all the vchunks associated with this
614 * context, except for the one covering the AllocSetContext and
615 * keeper-block header. This gets rid of the vchunks for whatever user
616 * data is getting discarded by the context reset.
617 */
619
620 /* Reset block size allocation sequence, too */
621 set->nextBlockSize = set->initBlockSize;
622}

References ALLOC_BLOCKHDRSZ, AllocSetIsValid, Assert, AllocSetContext::blocks, AllocBlockData::endptr, fb(), FIRST_BLOCKHDRSZ, free, AllocSetContext::freelist, AllocBlockData::freeptr, AllocSetContext::initBlockSize, IsKeeperBlock, KeeperBlock, MemoryContextData::mem_allocated, MemSetAligned, next, AllocBlockData::next, AllocSetContext::nextBlockSize, PG_USED_FOR_ASSERTS_ONLY, AllocBlockData::prev, VALGRIND_MAKE_MEM_NOACCESS, VALGRIND_MEMPOOL_FREE, and VALGRIND_MEMPOOL_TRIM.

◆ AllocSetStats()

void AllocSetStats ( MemoryContext  context,
MemoryStatsPrintFunc  printfunc,
void passthru,
MemoryContextCounters totals,
bool  print_to_stderr 
)

Definition at line 1578 of file aset.c.

1581{
1582 AllocSet set = (AllocSet) context;
1583 Size nblocks = 0;
1584 Size freechunks = 0;
1585 Size totalspace;
1586 Size freespace = 0;
1587 AllocBlock block;
1588 int fidx;
1589
1590 Assert(AllocSetIsValid(set));
1591
1592 /* Include context header in totalspace */
1593 totalspace = MAXALIGN(sizeof(AllocSetContext));
1594
1595 for (block = set->blocks; block != NULL; block = block->next)
1596 {
1597 nblocks++;
1598 totalspace += block->endptr - ((char *) block);
1599 freespace += block->endptr - block->freeptr;
1600 }
1601 for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1602 {
1604 MemoryChunk *chunk = set->freelist[fidx];
1605
1606 while (chunk != NULL)
1607 {
1609
1610 /* Allow access to the chunk header. */
1614
1615 freechunks++;
1616 freespace += chksz + ALLOC_CHUNKHDRSZ;
1617
1619 chunk = link->next;
1621 }
1622 }
1623
1624 if (printfunc)
1625 {
1626 char stats_string[200];
1627
1629 "%zu total in %zu blocks; %zu free (%zu chunks); %zu used",
1630 totalspace, nblocks, freespace, freechunks,
1631 totalspace - freespace);
1633 }
1634
1635 if (totals)
1636 {
1637 totals->nblocks += nblocks;
1638 totals->freechunks += freechunks;
1639 totals->totalspace += totalspace;
1640 totals->freespace += freespace;
1641 }
1642}

References ALLOC_CHUNKHDRSZ, ALLOCSET_NUM_FREELISTS, AllocSetIsValid, Assert, AllocSetContext::blocks, AllocBlockData::endptr, fb(), AllocSetContext::freelist, AllocBlockData::freeptr, GetChunkSizeFromFreeListIdx, GetFreeListLink, MAXALIGN, MemoryChunkGetValue(), AllocBlockData::next, snprintf, VALGRIND_MAKE_MEM_DEFINED, and VALGRIND_MAKE_MEM_NOACCESS.

◆ StaticAssertDecl()

Variable Documentation

◆ context_freelists

AllocSetFreeList context_freelists[2]
static
Initial value:
=
{
{
0, NULL
},
{
0, NULL
}
}

Definition at line 257 of file aset.c.

258{
259 {
260 0, NULL
261 },
262 {
263 0, NULL
264 }
265};

Referenced by AllocSetContextCreateInternal(), and AllocSetDelete().