PostgreSQL Source Code  git master
aset.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * aset.c
4  * Allocation set definitions.
5  *
6  * AllocSet is our standard implementation of the abstract MemoryContext
7  * type.
8  *
9  *
10  * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
11  * Portions Copyright (c) 1994, Regents of the University of California
12  *
13  * IDENTIFICATION
14  * src/backend/utils/mmgr/aset.c
15  *
16  * NOTE:
17  * This is a new (Feb. 05, 1999) implementation of the allocation set
18  * routines. AllocSet...() does not use OrderedSet...() any more.
19  * Instead it manages allocations in a block pool by itself, combining
20  * many small allocations in a few bigger blocks. AllocSetFree() normally
21  * doesn't free() memory really. It just add's the free'd area to some
22  * list for later reuse by AllocSetAlloc(). All memory blocks are free()'d
23  * at once on AllocSetReset(), which happens when the memory context gets
24  * destroyed.
25  * Jan Wieck
26  *
27  * Performance improvement from Tom Lane, 8/99: for extremely large request
28  * sizes, we do want to be able to give the memory back to free() as soon
29  * as it is pfree()'d. Otherwise we risk tying up a lot of memory in
30  * freelist entries that might never be usable. This is specially needed
31  * when the caller is repeatedly repalloc()'ing a block bigger and bigger;
32  * the previous instances of the block were guaranteed to be wasted until
33  * AllocSetReset() under the old way.
34  *
35  * Further improvement 12/00: as the code stood, request sizes in the
36  * midrange between "small" and "large" were handled very inefficiently,
37  * because any sufficiently large free chunk would be used to satisfy a
38  * request, even if it was much larger than necessary. This led to more
39  * and more wasted space in allocated chunks over time. To fix, get rid
40  * of the midrange behavior: we now handle only "small" power-of-2-size
41  * chunks as chunks. Anything "large" is passed off to malloc(). Change
42  * the number of freelists to change the small/large boundary.
43  *
44  *-------------------------------------------------------------------------
45  */
46 
47 #include "postgres.h"
48 
49 #include "port/pg_bitutils.h"
50 #include "utils/memdebug.h"
51 #include "utils/memutils.h"
54 
55 /*--------------------
56  * Chunk freelist k holds chunks of size 1 << (k + ALLOC_MINBITS),
57  * for k = 0 .. ALLOCSET_NUM_FREELISTS-1.
58  *
59  * Note that all chunks in the freelists have power-of-2 sizes. This
60  * improves recyclability: we may waste some space, but the wasted space
61  * should stay pretty constant as requests are made and released.
62  *
63  * A request too large for the last freelist is handled by allocating a
64  * dedicated block from malloc(). The block still has a block header and
65  * chunk header, but when the chunk is freed we'll return the whole block
66  * to malloc(), not put it on our freelists.
67  *
68  * CAUTION: ALLOC_MINBITS must be large enough so that
69  * 1<<ALLOC_MINBITS is at least MAXALIGN,
70  * or we may fail to align the smallest chunks adequately.
71  * 8-byte alignment is enough on all currently known machines. This 8-byte
72  * minimum also allows us to store a pointer to the next freelist item within
73  * the chunk of memory itself.
74  *
75  * With the current parameters, request sizes up to 8K are treated as chunks,
76  * larger requests go into dedicated blocks. Change ALLOCSET_NUM_FREELISTS
77  * to adjust the boundary point; and adjust ALLOCSET_SEPARATE_THRESHOLD in
78  * memutils.h to agree. (Note: in contexts with small maxBlockSize, we may
79  * set the allocChunkLimit to less than 8K, so as to avoid space wastage.)
80  *--------------------
81  */
82 
83 #define ALLOC_MINBITS 3 /* smallest chunk size is 8 bytes */
84 #define ALLOCSET_NUM_FREELISTS 11
85 #define ALLOC_CHUNK_LIMIT (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
86 /* Size of largest chunk that we use a fixed size for */
87 #define ALLOC_CHUNK_FRACTION 4
88 /* We allow chunks to be at most 1/4 of maxBlockSize (less overhead) */
89 
90 /*--------------------
91  * The first block allocated for an allocset has size initBlockSize.
92  * Each time we have to allocate another block, we double the block size
93  * (if possible, and without exceeding maxBlockSize), so as to reduce
94  * the bookkeeping load on malloc().
95  *
96  * Blocks allocated to hold oversize chunks do not follow this rule, however;
97  * they are just however big they need to be to hold that single chunk.
98  *
99  * Also, if a minContextSize is specified, the first block has that size,
100  * and then initBlockSize is used for the next one.
101  *--------------------
102  */
103 
104 #define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData))
105 #define ALLOC_CHUNKHDRSZ sizeof(MemoryChunk)
106 
107 typedef struct AllocBlockData *AllocBlock; /* forward reference */
108 
109 /*
110  * AllocPointer
111  * Aligned pointer which may be a member of an allocation set.
112  */
113 typedef void *AllocPointer;
114 
115 /*
116  * AllocFreeListLink
117  * When pfreeing memory, if we maintain a freelist for the given chunk's
118  * size then we use a AllocFreeListLink to point to the current item in
119  * the AllocSetContext's freelist and then set the given freelist element
120  * to point to the chunk being freed.
121  */
122 typedef struct AllocFreeListLink
123 {
126 
127 /*
128  * Obtain a AllocFreeListLink for the given chunk. Allocation sizes are
129  * always at least sizeof(AllocFreeListLink), so we reuse the pointer's memory
130  * itself to store the freelist link.
131  */
132 #define GetFreeListLink(chkptr) \
133  (AllocFreeListLink *) ((char *) (chkptr) + ALLOC_CHUNKHDRSZ)
134 
135 /* Validate a freelist index retrieved from a chunk header */
136 #define FreeListIdxIsValid(fidx) \
137  ((fidx) >= 0 && (fidx) < ALLOCSET_NUM_FREELISTS)
138 
139 /* Determine the size of the chunk based on the freelist index */
140 #define GetChunkSizeFromFreeListIdx(fidx) \
141  ((((Size) 1) << ALLOC_MINBITS) << (fidx))
142 
143 /*
144  * AllocSetContext is our standard implementation of MemoryContext.
145  *
146  * Note: header.isReset means there is nothing for AllocSetReset to do.
147  * This is different from the aset being physically empty (empty blocks list)
148  * because we will still have a keeper block. It's also different from the set
149  * being logically empty, because we don't attempt to detect pfree'ing the
150  * last active chunk.
151  */
152 typedef struct AllocSetContext
153 {
154  MemoryContextData header; /* Standard memory-context fields */
155  /* Info about storage allocated in this context: */
156  AllocBlock blocks; /* head of list of blocks in this set */
157  MemoryChunk *freelist[ALLOCSET_NUM_FREELISTS]; /* free chunk lists */
158  /* Allocation parameters for this context: */
159  Size initBlockSize; /* initial block size */
160  Size maxBlockSize; /* maximum block size */
161  Size nextBlockSize; /* next block size to allocate */
162  Size allocChunkLimit; /* effective chunk size limit */
163  AllocBlock keeper; /* keep this block over resets */
164  /* freelist this context could be put in, or -1 if not a candidate: */
165  int freeListIndex; /* index in context_freelists[], or -1 */
167 
169 
170 /*
171  * AllocBlock
172  * An AllocBlock is the unit of memory that is obtained by aset.c
173  * from malloc(). It contains one or more MemoryChunks, which are
174  * the units requested by palloc() and freed by pfree(). MemoryChunks
175  * cannot be returned to malloc() individually, instead they are put
176  * on freelists by pfree() and re-used by the next palloc() that has
177  * a matching request size.
178  *
179  * AllocBlockData is the header data for a block --- the usable space
180  * within the block begins at the next alignment boundary.
181  */
182 typedef struct AllocBlockData
183 {
184  AllocSet aset; /* aset that owns this block */
185  AllocBlock prev; /* prev block in aset's blocks list, if any */
186  AllocBlock next; /* next block in aset's blocks list, if any */
187  char *freeptr; /* start of free space in this block */
188  char *endptr; /* end of space in this block */
190 
191 /*
192  * AllocPointerIsValid
193  * True iff pointer is valid allocation pointer.
194  */
195 #define AllocPointerIsValid(pointer) PointerIsValid(pointer)
196 
197 /*
198  * AllocSetIsValid
199  * True iff set is valid allocation set.
200  */
201 #define AllocSetIsValid(set) \
202  (PointerIsValid(set) && IsA(set, AllocSetContext))
203 
204 /*
205  * AllocBlockIsValid
206  * True iff block is valid block of allocation set.
207  */
208 #define AllocBlockIsValid(block) \
209  (PointerIsValid(block) && AllocSetIsValid((block)->aset))
210 
211 /*
212  * We always store external chunks on a dedicated block. This makes fetching
213  * the block from an external chunk easy since it's always the first and only
214  * chunk on the block.
215  */
216 #define ExternalChunkGetBlock(chunk) \
217  (AllocBlock) ((char *) chunk - ALLOC_BLOCKHDRSZ)
218 
219 /*
220  * Rather than repeatedly creating and deleting memory contexts, we keep some
221  * freed contexts in freelists so that we can hand them out again with little
222  * work. Before putting a context in a freelist, we reset it so that it has
223  * only its initial malloc chunk and no others. To be a candidate for a
224  * freelist, a context must have the same minContextSize/initBlockSize as
225  * other contexts in the list; but its maxBlockSize is irrelevant since that
226  * doesn't affect the size of the initial chunk.
227  *
228  * We currently provide one freelist for ALLOCSET_DEFAULT_SIZES contexts
229  * and one for ALLOCSET_SMALL_SIZES contexts; the latter works for
230  * ALLOCSET_START_SMALL_SIZES too, since only the maxBlockSize differs.
231  *
232  * Ordinarily, we re-use freelist contexts in last-in-first-out order, in
233  * hopes of improving locality of reference. But if there get to be too
234  * many contexts in the list, we'd prefer to drop the most-recently-created
235  * contexts in hopes of keeping the process memory map compact.
236  * We approximate that by simply deleting all existing entries when the list
237  * overflows, on the assumption that queries that allocate a lot of contexts
238  * will probably free them in more or less reverse order of allocation.
239  *
240  * Contexts in a freelist are chained via their nextchild pointers.
241  */
242 #define MAX_FREE_CONTEXTS 100 /* arbitrary limit on freelist length */
243 
244 typedef struct AllocSetFreeList
245 {
246  int num_free; /* current list length */
247  AllocSetContext *first_free; /* list header */
249 
250 /* context_freelists[0] is for default params, [1] for small params */
252 {
253  {
254  0, NULL
255  },
256  {
257  0, NULL
258  }
259 };
260 
261 
262 /* ----------
263  * AllocSetFreeIndex -
264  *
265  * Depending on the size of an allocation compute which freechunk
266  * list of the alloc set it belongs to. Caller must have verified
267  * that size <= ALLOC_CHUNK_LIMIT.
268  * ----------
269  */
270 static inline int
272 {
273  int idx;
274 
275  if (size > (1 << ALLOC_MINBITS))
276  {
277  /*----------
278  * At this point we must compute ceil(log2(size >> ALLOC_MINBITS)).
279  * This is the same as
280  * pg_leftmost_one_pos32((size - 1) >> ALLOC_MINBITS) + 1
281  * or equivalently
282  * pg_leftmost_one_pos32(size - 1) - ALLOC_MINBITS + 1
283  *
284  * However, for platforms without intrinsic support, we duplicate the
285  * logic here, allowing an additional optimization. It's reasonable
286  * to assume that ALLOC_CHUNK_LIMIT fits in 16 bits, so we can unroll
287  * the byte-at-a-time loop in pg_leftmost_one_pos32 and just handle
288  * the last two bytes.
289  *
290  * Yes, this function is enough of a hot-spot to make it worth this
291  * much trouble.
292  *----------
293  */
294 #ifdef HAVE_BITSCAN_REVERSE
295  idx = pg_leftmost_one_pos32((uint32) size - 1) - ALLOC_MINBITS + 1;
296 #else
297  uint32 t,
298  tsize;
299 
300  /* Statically assert that we only have a 16-bit input value. */
302  "ALLOC_CHUNK_LIMIT must be less than 64kB");
303 
304  tsize = size - 1;
305  t = tsize >> 8;
306  idx = t ? pg_leftmost_one_pos[t] + 8 : pg_leftmost_one_pos[tsize];
307  idx -= ALLOC_MINBITS - 1;
308 #endif
309 
311  }
312  else
313  idx = 0;
314 
315  return idx;
316 }
317 
318 
319 /*
320  * Public routines
321  */
322 
323 
324 /*
325  * AllocSetContextCreateInternal
326  * Create a new AllocSet context.
327  *
328  * parent: parent context, or NULL if top-level context
329  * name: name of context (must be statically allocated)
330  * minContextSize: minimum context size
331  * initBlockSize: initial allocation block size
332  * maxBlockSize: maximum allocation block size
333  *
334  * Most callers should abstract the context size parameters using a macro
335  * such as ALLOCSET_DEFAULT_SIZES.
336  *
337  * Note: don't call this directly; go through the wrapper macro
338  * AllocSetContextCreate.
339  */
342  const char *name,
343  Size minContextSize,
344  Size initBlockSize,
345  Size maxBlockSize)
346 {
347  int freeListIndex;
348  Size firstBlockSize;
349  AllocSet set;
350  AllocBlock block;
351 
352  /* ensure MemoryChunk's size is properly maxaligned */
354  "sizeof(MemoryChunk) is not maxaligned");
355  /* check we have enough space to store the freelist link */
357  "sizeof(AllocFreeListLink) larger than minimum allocation size");
358 
359  /*
360  * First, validate allocation parameters. Once these were regular runtime
361  * tests and elog's, but in practice Asserts seem sufficient because
362  * nobody varies their parameters at runtime. We somewhat arbitrarily
363  * enforce a minimum 1K block size. We restrict the maximum block size to
364  * MEMORYCHUNK_MAX_BLOCKOFFSET as MemoryChunks are limited to this in
365  * regards to addressing the offset between the chunk and the block that
366  * the chunk is stored on. We would be unable to store the offset between
367  * the chunk and block for any chunks that were beyond
368  * MEMORYCHUNK_MAX_BLOCKOFFSET bytes into the block if the block was to be
369  * larger than this.
370  */
371  Assert(initBlockSize == MAXALIGN(initBlockSize) &&
372  initBlockSize >= 1024);
373  Assert(maxBlockSize == MAXALIGN(maxBlockSize) &&
374  maxBlockSize >= initBlockSize &&
375  AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
376  Assert(minContextSize == 0 ||
377  (minContextSize == MAXALIGN(minContextSize) &&
378  minContextSize >= 1024 &&
379  minContextSize <= maxBlockSize));
380  Assert(maxBlockSize <= MEMORYCHUNK_MAX_BLOCKOFFSET);
381 
382  /*
383  * Check whether the parameters match either available freelist. We do
384  * not need to demand a match of maxBlockSize.
385  */
386  if (minContextSize == ALLOCSET_DEFAULT_MINSIZE &&
387  initBlockSize == ALLOCSET_DEFAULT_INITSIZE)
388  freeListIndex = 0;
389  else if (minContextSize == ALLOCSET_SMALL_MINSIZE &&
390  initBlockSize == ALLOCSET_SMALL_INITSIZE)
391  freeListIndex = 1;
392  else
393  freeListIndex = -1;
394 
395  /*
396  * If a suitable freelist entry exists, just recycle that context.
397  */
398  if (freeListIndex >= 0)
399  {
400  AllocSetFreeList *freelist = &context_freelists[freeListIndex];
401 
402  if (freelist->first_free != NULL)
403  {
404  /* Remove entry from freelist */
405  set = freelist->first_free;
406  freelist->first_free = (AllocSet) set->header.nextchild;
407  freelist->num_free--;
408 
409  /* Update its maxBlockSize; everything else should be OK */
410  set->maxBlockSize = maxBlockSize;
411 
412  /* Reinitialize its header, installing correct name and parent */
414  T_AllocSetContext,
415  MCTX_ASET_ID,
416  parent,
417  name);
418 
419  ((MemoryContext) set)->mem_allocated =
420  set->keeper->endptr - ((char *) set);
421 
422  return (MemoryContext) set;
423  }
424  }
425 
426  /* Determine size of initial block */
427  firstBlockSize = MAXALIGN(sizeof(AllocSetContext)) +
429  if (minContextSize != 0)
430  firstBlockSize = Max(firstBlockSize, minContextSize);
431  else
432  firstBlockSize = Max(firstBlockSize, initBlockSize);
433 
434  /*
435  * Allocate the initial block. Unlike other aset.c blocks, it starts with
436  * the context header and its block header follows that.
437  */
438  set = (AllocSet) malloc(firstBlockSize);
439  if (set == NULL)
440  {
441  if (TopMemoryContext)
443  ereport(ERROR,
444  (errcode(ERRCODE_OUT_OF_MEMORY),
445  errmsg("out of memory"),
446  errdetail("Failed while creating memory context \"%s\".",
447  name)));
448  }
449 
450  /*
451  * Avoid writing code that can fail between here and MemoryContextCreate;
452  * we'd leak the header/initial block if we ereport in this stretch.
453  */
454 
455  /* Fill in the initial block's block header */
456  block = (AllocBlock) (((char *) set) + MAXALIGN(sizeof(AllocSetContext)));
457  block->aset = set;
458  block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
459  block->endptr = ((char *) set) + firstBlockSize;
460  block->prev = NULL;
461  block->next = NULL;
462 
463  /* Mark unallocated space NOACCESS; leave the block header alone. */
464  VALGRIND_MAKE_MEM_NOACCESS(block->freeptr, block->endptr - block->freeptr);
465 
466  /* Remember block as part of block list */
467  set->blocks = block;
468  /* Mark block as not to be released at reset time */
469  set->keeper = block;
470 
471  /* Finish filling in aset-specific parts of the context header */
472  MemSetAligned(set->freelist, 0, sizeof(set->freelist));
473 
474  set->initBlockSize = initBlockSize;
475  set->maxBlockSize = maxBlockSize;
476  set->nextBlockSize = initBlockSize;
477  set->freeListIndex = freeListIndex;
478 
479  /*
480  * Compute the allocation chunk size limit for this context. It can't be
481  * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
482  * If maxBlockSize is small then requests exceeding the maxBlockSize, or
483  * even a significant fraction of it, should be treated as large chunks
484  * too. For the typical case of maxBlockSize a power of 2, the chunk size
485  * limit will be at most 1/8th maxBlockSize, so that given a stream of
486  * requests that are all the maximum chunk size we will waste at most
487  * 1/8th of the allocated space.
488  *
489  * Also, allocChunkLimit must not exceed ALLOCSET_SEPARATE_THRESHOLD.
490  */
492  "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD");
493 
494  /*
495  * Determine the maximum size that a chunk can be before we allocate an
496  * entire AllocBlock dedicated for that chunk. We set the absolute limit
497  * of that size as ALLOC_CHUNK_LIMIT but we reduce it further so that we
498  * can fit about ALLOC_CHUNK_FRACTION chunks this size on a maximally
499  * sized block. (We opt to keep allocChunkLimit a power-of-2 value
500  * primarily for legacy reasons rather than calculating it so that exactly
501  * ALLOC_CHUNK_FRACTION chunks fit on a maximally sized block.)
502  */
504  while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
505  (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
506  set->allocChunkLimit >>= 1;
507 
508  /* Finally, do the type-independent part of context creation */
510  T_AllocSetContext,
511  MCTX_ASET_ID,
512  parent,
513  name);
514 
515  ((MemoryContext) set)->mem_allocated = firstBlockSize;
516 
517  return (MemoryContext) set;
518 }
519 
520 /*
521  * AllocSetReset
522  * Frees all memory which is allocated in the given set.
523  *
524  * Actually, this routine has some discretion about what to do.
525  * It should mark all allocated chunks freed, but it need not necessarily
526  * give back all the resources the set owns. Our actual implementation is
527  * that we give back all but the "keeper" block (which we must keep, since
528  * it shares a malloc chunk with the context header). In this way, we don't
529  * thrash malloc() when a context is repeatedly reset after small allocations,
530  * which is typical behavior for per-tuple contexts.
531  */
532 void
534 {
535  AllocSet set = (AllocSet) context;
536  AllocBlock block;
537  Size keepersize PG_USED_FOR_ASSERTS_ONLY;
538 
539  Assert(AllocSetIsValid(set));
540 
541 #ifdef MEMORY_CONTEXT_CHECKING
542  /* Check for corruption and leaks before freeing */
543  AllocSetCheck(context);
544 #endif
545 
546  /* Remember keeper block size for Assert below */
547  keepersize = set->keeper->endptr - ((char *) set);
548 
549  /* Clear chunk freelists */
550  MemSetAligned(set->freelist, 0, sizeof(set->freelist));
551 
552  block = set->blocks;
553 
554  /* New blocks list will be just the keeper block */
555  set->blocks = set->keeper;
556 
557  while (block != NULL)
558  {
559  AllocBlock next = block->next;
560 
561  if (block == set->keeper)
562  {
563  /* Reset the block, but don't return it to malloc */
564  char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
565 
566 #ifdef CLOBBER_FREED_MEMORY
567  wipe_mem(datastart, block->freeptr - datastart);
568 #else
569  /* wipe_mem() would have done this */
570  VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
571 #endif
572  block->freeptr = datastart;
573  block->prev = NULL;
574  block->next = NULL;
575  }
576  else
577  {
578  /* Normal case, release the block */
579  context->mem_allocated -= block->endptr - ((char *) block);
580 
581 #ifdef CLOBBER_FREED_MEMORY
582  wipe_mem(block, block->freeptr - ((char *) block));
583 #endif
584  free(block);
585  }
586  block = next;
587  }
588 
589  Assert(context->mem_allocated == keepersize);
590 
591  /* Reset block size allocation sequence, too */
592  set->nextBlockSize = set->initBlockSize;
593 }
594 
595 /*
596  * AllocSetDelete
597  * Frees all memory which is allocated in the given set,
598  * in preparation for deletion of the set.
599  *
600  * Unlike AllocSetReset, this *must* free all resources of the set.
601  */
602 void
604 {
605  AllocSet set = (AllocSet) context;
606  AllocBlock block = set->blocks;
607  Size keepersize PG_USED_FOR_ASSERTS_ONLY;
608 
609  Assert(AllocSetIsValid(set));
610 
611 #ifdef MEMORY_CONTEXT_CHECKING
612  /* Check for corruption and leaks before freeing */
613  AllocSetCheck(context);
614 #endif
615 
616  /* Remember keeper block size for Assert below */
617  keepersize = set->keeper->endptr - ((char *) set);
618 
619  /*
620  * If the context is a candidate for a freelist, put it into that freelist
621  * instead of destroying it.
622  */
623  if (set->freeListIndex >= 0)
624  {
626 
627  /*
628  * Reset the context, if it needs it, so that we aren't hanging on to
629  * more than the initial malloc chunk.
630  */
631  if (!context->isReset)
632  MemoryContextResetOnly(context);
633 
634  /*
635  * If the freelist is full, just discard what's already in it. See
636  * comments with context_freelists[].
637  */
638  if (freelist->num_free >= MAX_FREE_CONTEXTS)
639  {
640  while (freelist->first_free != NULL)
641  {
642  AllocSetContext *oldset = freelist->first_free;
643 
644  freelist->first_free = (AllocSetContext *) oldset->header.nextchild;
645  freelist->num_free--;
646 
647  /* All that remains is to free the header/initial block */
648  free(oldset);
649  }
650  Assert(freelist->num_free == 0);
651  }
652 
653  /* Now add the just-deleted context to the freelist. */
654  set->header.nextchild = (MemoryContext) freelist->first_free;
655  freelist->first_free = set;
656  freelist->num_free++;
657 
658  return;
659  }
660 
661  /* Free all blocks, except the keeper which is part of context header */
662  while (block != NULL)
663  {
664  AllocBlock next = block->next;
665 
666  if (block != set->keeper)
667  context->mem_allocated -= block->endptr - ((char *) block);
668 
669 #ifdef CLOBBER_FREED_MEMORY
670  wipe_mem(block, block->freeptr - ((char *) block));
671 #endif
672 
673  if (block != set->keeper)
674  free(block);
675 
676  block = next;
677  }
678 
679  Assert(context->mem_allocated == keepersize);
680 
681  /* Finally, free the context header, including the keeper block */
682  free(set);
683 }
684 
685 /*
686  * AllocSetAlloc
687  * Returns pointer to allocated memory of given size or NULL if
688  * request could not be completed; memory is added to the set.
689  *
690  * No request may exceed:
691  * MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
692  * All callers use a much-lower limit.
693  *
694  * Note: when using valgrind, it doesn't matter how the returned allocation
695  * is marked, as mcxt.c will set it to UNDEFINED. In some paths we will
696  * return space that is marked NOACCESS - AllocSetRealloc has to beware!
697  */
698 void *
700 {
701  AllocSet set = (AllocSet) context;
702  AllocBlock block;
703  MemoryChunk *chunk;
704  int fidx;
705  Size chunk_size;
706  Size blksize;
707 
708  Assert(AllocSetIsValid(set));
709 
710  /*
711  * If requested size exceeds maximum for chunks, allocate an entire block
712  * for this request.
713  */
714  if (size > set->allocChunkLimit)
715  {
716 #ifdef MEMORY_CONTEXT_CHECKING
717  /* ensure there's always space for the sentinel byte */
718  chunk_size = MAXALIGN(size + 1);
719 #else
720  chunk_size = MAXALIGN(size);
721 #endif
722 
723  blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
724  block = (AllocBlock) malloc(blksize);
725  if (block == NULL)
726  return NULL;
727 
728  context->mem_allocated += blksize;
729 
730  block->aset = set;
731  block->freeptr = block->endptr = ((char *) block) + blksize;
732 
733  chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
734 
735  /* mark the MemoryChunk as externally managed */
737 
738 #ifdef MEMORY_CONTEXT_CHECKING
739  chunk->requested_size = size;
740  /* set mark to catch clobber of "unused" space */
741  Assert(size < chunk_size);
742  set_sentinel(MemoryChunkGetPointer(chunk), size);
743 #endif
744 #ifdef RANDOMIZE_ALLOCATED_MEMORY
745  /* fill the allocated space with junk */
746  randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
747 #endif
748 
749  /*
750  * Stick the new block underneath the active allocation block, if any,
751  * so that we don't lose the use of the space remaining therein.
752  */
753  if (set->blocks != NULL)
754  {
755  block->prev = set->blocks;
756  block->next = set->blocks->next;
757  if (block->next)
758  block->next->prev = block;
759  set->blocks->next = block;
760  }
761  else
762  {
763  block->prev = NULL;
764  block->next = NULL;
765  set->blocks = block;
766  }
767 
768  /* Ensure any padding bytes are marked NOACCESS. */
769  VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
770  chunk_size - size);
771 
772  /* Disallow access to the chunk header. */
774 
775  return MemoryChunkGetPointer(chunk);
776  }
777 
778  /*
779  * Request is small enough to be treated as a chunk. Look in the
780  * corresponding free list to see if there is a free chunk we could reuse.
781  * If one is found, remove it from the free list, make it again a member
782  * of the alloc set and return its data address.
783  *
784  * Note that we don't attempt to ensure there's space for the sentinel
785  * byte here. We expect a large proportion of allocations to be for sizes
786  * which are already a power of 2. If we were to always make space for a
787  * sentinel byte in MEMORY_CONTEXT_CHECKING builds, then we'd end up
788  * doubling the memory requirements for such allocations.
789  */
790  fidx = AllocSetFreeIndex(size);
791  chunk = set->freelist[fidx];
792  if (chunk != NULL)
793  {
795 
796  /* Allow access to the chunk header. */
798 
799  Assert(fidx == MemoryChunkGetValue(chunk));
800 
801  /* pop this chunk off the freelist */
803  set->freelist[fidx] = link->next;
805 
806 #ifdef MEMORY_CONTEXT_CHECKING
807  chunk->requested_size = size;
808  /* set mark to catch clobber of "unused" space */
809  if (size < GetChunkSizeFromFreeListIdx(fidx))
810  set_sentinel(MemoryChunkGetPointer(chunk), size);
811 #endif
812 #ifdef RANDOMIZE_ALLOCATED_MEMORY
813  /* fill the allocated space with junk */
814  randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
815 #endif
816 
817  /* Ensure any padding bytes are marked NOACCESS. */
818  VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
819  GetChunkSizeFromFreeListIdx(fidx) - size);
820 
821  /* Disallow access to the chunk header. */
823 
824  return MemoryChunkGetPointer(chunk);
825  }
826 
827  /*
828  * Choose the actual chunk size to allocate.
829  */
830  chunk_size = GetChunkSizeFromFreeListIdx(fidx);
831  Assert(chunk_size >= size);
832 
833  /*
834  * If there is enough room in the active allocation block, we will put the
835  * chunk into that block. Else must start a new one.
836  */
837  if ((block = set->blocks) != NULL)
838  {
839  Size availspace = block->endptr - block->freeptr;
840 
841  if (availspace < (chunk_size + ALLOC_CHUNKHDRSZ))
842  {
843  /*
844  * The existing active (top) block does not have enough room for
845  * the requested allocation, but it might still have a useful
846  * amount of space in it. Once we push it down in the block list,
847  * we'll never try to allocate more space from it. So, before we
848  * do that, carve up its free space into chunks that we can put on
849  * the set's freelists.
850  *
851  * Because we can only get here when there's less than
852  * ALLOC_CHUNK_LIMIT left in the block, this loop cannot iterate
853  * more than ALLOCSET_NUM_FREELISTS-1 times.
854  */
855  while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
856  {
858  Size availchunk = availspace - ALLOC_CHUNKHDRSZ;
859  int a_fidx = AllocSetFreeIndex(availchunk);
860 
861  /*
862  * In most cases, we'll get back the index of the next larger
863  * freelist than the one we need to put this chunk on. The
864  * exception is when availchunk is exactly a power of 2.
865  */
866  if (availchunk != GetChunkSizeFromFreeListIdx(a_fidx))
867  {
868  a_fidx--;
869  Assert(a_fidx >= 0);
870  availchunk = GetChunkSizeFromFreeListIdx(a_fidx);
871  }
872 
873  chunk = (MemoryChunk *) (block->freeptr);
874 
875  /* Prepare to initialize the chunk header. */
877  block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
878  availspace -= (availchunk + ALLOC_CHUNKHDRSZ);
879 
880  /* store the freelist index in the value field */
881  MemoryChunkSetHdrMask(chunk, block, a_fidx, MCTX_ASET_ID);
882 #ifdef MEMORY_CONTEXT_CHECKING
883  chunk->requested_size = InvalidAllocSize; /* mark it free */
884 #endif
885  /* push this chunk onto the free list */
886  link = GetFreeListLink(chunk);
887 
889  link->next = set->freelist[a_fidx];
891 
892  set->freelist[a_fidx] = chunk;
893  }
894  /* Mark that we need to create a new block */
895  block = NULL;
896  }
897  }
898 
899  /*
900  * Time to create a new regular (multi-chunk) block?
901  */
902  if (block == NULL)
903  {
904  Size required_size;
905 
906  /*
907  * The first such block has size initBlockSize, and we double the
908  * space in each succeeding block, but not more than maxBlockSize.
909  */
910  blksize = set->nextBlockSize;
911  set->nextBlockSize <<= 1;
912  if (set->nextBlockSize > set->maxBlockSize)
913  set->nextBlockSize = set->maxBlockSize;
914 
915  /*
916  * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
917  * space... but try to keep it a power of 2.
918  */
919  required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
920  while (blksize < required_size)
921  blksize <<= 1;
922 
923  /* Try to allocate it */
924  block = (AllocBlock) malloc(blksize);
925 
926  /*
927  * We could be asking for pretty big blocks here, so cope if malloc
928  * fails. But give up if there's less than 1 MB or so available...
929  */
930  while (block == NULL && blksize > 1024 * 1024)
931  {
932  blksize >>= 1;
933  if (blksize < required_size)
934  break;
935  block = (AllocBlock) malloc(blksize);
936  }
937 
938  if (block == NULL)
939  return NULL;
940 
941  context->mem_allocated += blksize;
942 
943  block->aset = set;
944  block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
945  block->endptr = ((char *) block) + blksize;
946 
947  /* Mark unallocated space NOACCESS. */
949  blksize - ALLOC_BLOCKHDRSZ);
950 
951  block->prev = NULL;
952  block->next = set->blocks;
953  if (block->next)
954  block->next->prev = block;
955  set->blocks = block;
956  }
957 
958  /*
959  * OK, do the allocation
960  */
961  chunk = (MemoryChunk *) (block->freeptr);
962 
963  /* Prepare to initialize the chunk header. */
965 
966  block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
967  Assert(block->freeptr <= block->endptr);
968 
969  /* store the free list index in the value field */
970  MemoryChunkSetHdrMask(chunk, block, fidx, MCTX_ASET_ID);
971 
972 #ifdef MEMORY_CONTEXT_CHECKING
973  chunk->requested_size = size;
974  /* set mark to catch clobber of "unused" space */
975  if (size < chunk_size)
976  set_sentinel(MemoryChunkGetPointer(chunk), size);
977 #endif
978 #ifdef RANDOMIZE_ALLOCATED_MEMORY
979  /* fill the allocated space with junk */
980  randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
981 #endif
982 
983  /* Ensure any padding bytes are marked NOACCESS. */
984  VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
985  chunk_size - size);
986 
987  /* Disallow access to the chunk header. */
989 
990  return MemoryChunkGetPointer(chunk);
991 }
992 
993 /*
994  * AllocSetFree
995  * Frees allocated memory; memory is removed from the set.
996  */
997 void
998 AllocSetFree(void *pointer)
999 {
1000  AllocSet set;
1001  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1002 
1003  /* Allow access to the chunk header. */
1005 
1006  if (MemoryChunkIsExternal(chunk))
1007  {
1008  /* Release single-chunk block. */
1009  AllocBlock block = ExternalChunkGetBlock(chunk);
1010 
1011  /*
1012  * Try to verify that we have a sane block pointer: the block header
1013  * should reference an aset and the freeptr should match the endptr.
1014  */
1015  if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
1016  elog(ERROR, "could not find block containing chunk %p", chunk);
1017 
1018  set = block->aset;
1019 
1020 #ifdef MEMORY_CONTEXT_CHECKING
1021  {
1022  /* Test for someone scribbling on unused space in chunk */
1023  Assert(chunk->requested_size < (block->endptr - (char *) pointer));
1024  if (!sentinel_ok(pointer, chunk->requested_size))
1025  elog(WARNING, "detected write past chunk end in %s %p",
1026  set->header.name, chunk);
1027  }
1028 #endif
1029 
1030  /* OK, remove block from aset's list and free it */
1031  if (block->prev)
1032  block->prev->next = block->next;
1033  else
1034  set->blocks = block->next;
1035  if (block->next)
1036  block->next->prev = block->prev;
1037 
1038  set->header.mem_allocated -= block->endptr - ((char *) block);
1039 
1040 #ifdef CLOBBER_FREED_MEMORY
1041  wipe_mem(block, block->freeptr - ((char *) block));
1042 #endif
1043  free(block);
1044  }
1045  else
1046  {
1047  AllocBlock block = MemoryChunkGetBlock(chunk);
1048  int fidx;
1050 
1051  /*
1052  * In this path, for speed reasons we just Assert that the referenced
1053  * block is good. We can also Assert that the value field is sane.
1054  * Future field experience may show that these Asserts had better
1055  * become regular runtime test-and-elog checks.
1056  */
1057  Assert(AllocBlockIsValid(block));
1058  set = block->aset;
1059 
1060  fidx = MemoryChunkGetValue(chunk);
1061  Assert(FreeListIdxIsValid(fidx));
1062  link = GetFreeListLink(chunk);
1063 
1064 #ifdef MEMORY_CONTEXT_CHECKING
1065  /* Test for someone scribbling on unused space in chunk */
1066  if (chunk->requested_size < GetChunkSizeFromFreeListIdx(fidx))
1067  if (!sentinel_ok(pointer, chunk->requested_size))
1068  elog(WARNING, "detected write past chunk end in %s %p",
1069  set->header.name, chunk);
1070 #endif
1071 
1072 #ifdef CLOBBER_FREED_MEMORY
1073  wipe_mem(pointer, GetChunkSizeFromFreeListIdx(fidx));
1074 #endif
1075  /* push this chunk onto the top of the free list */
1077  link->next = set->freelist[fidx];
1079  set->freelist[fidx] = chunk;
1080 
1081 #ifdef MEMORY_CONTEXT_CHECKING
1082 
1083  /*
1084  * Reset requested_size to InvalidAllocSize in chunks that are on free
1085  * list.
1086  */
1087  chunk->requested_size = InvalidAllocSize;
1088 #endif
1089  }
1090 }
1091 
1092 /*
1093  * AllocSetRealloc
1094  * Returns new pointer to allocated memory of given size or NULL if
1095  * request could not be completed; this memory is added to the set.
1096  * Memory associated with given pointer is copied into the new memory,
1097  * and the old memory is freed.
1098  *
1099  * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size. This
1100  * makes our Valgrind client requests less-precise, hazarding false negatives.
1101  * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old
1102  * request size.)
1103  */
1104 void *
1105 AllocSetRealloc(void *pointer, Size size)
1106 {
1107  AllocBlock block;
1108  AllocSet set;
1109  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1110  Size oldchksize;
1111  int fidx;
1112 
1113  /* Allow access to the chunk header. */
1115 
1116  if (MemoryChunkIsExternal(chunk))
1117  {
1118  /*
1119  * The chunk must have been allocated as a single-chunk block. Use
1120  * realloc() to make the containing block bigger, or smaller, with
1121  * minimum space wastage.
1122  */
1123  Size chksize;
1124  Size blksize;
1125  Size oldblksize;
1126 
1127  block = ExternalChunkGetBlock(chunk);
1128 
1129  /*
1130  * Try to verify that we have a sane block pointer: the block header
1131  * should reference an aset and the freeptr should match the endptr.
1132  */
1133  if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
1134  elog(ERROR, "could not find block containing chunk %p", chunk);
1135 
1136  set = block->aset;
1137 
1138  oldchksize = block->endptr - (char *) pointer;
1139 
1140 #ifdef MEMORY_CONTEXT_CHECKING
1141  /* Test for someone scribbling on unused space in chunk */
1142  Assert(chunk->requested_size < oldchksize);
1143  if (!sentinel_ok(pointer, chunk->requested_size))
1144  elog(WARNING, "detected write past chunk end in %s %p",
1145  set->header.name, chunk);
1146 #endif
1147 
1148 #ifdef MEMORY_CONTEXT_CHECKING
1149  /* ensure there's always space for the sentinel byte */
1150  chksize = MAXALIGN(size + 1);
1151 #else
1152  chksize = MAXALIGN(size);
1153 #endif
1154 
1155  /* Do the realloc */
1156  blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
1157  oldblksize = block->endptr - ((char *) block);
1158 
1159  block = (AllocBlock) realloc(block, blksize);
1160  if (block == NULL)
1161  {
1162  /* Disallow access to the chunk header. */
1164  return NULL;
1165  }
1166 
1167  /* updated separately, not to underflow when (oldblksize > blksize) */
1168  set->header.mem_allocated -= oldblksize;
1169  set->header.mem_allocated += blksize;
1170 
1171  block->freeptr = block->endptr = ((char *) block) + blksize;
1172 
1173  /* Update pointers since block has likely been moved */
1174  chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
1175  pointer = MemoryChunkGetPointer(chunk);
1176  if (block->prev)
1177  block->prev->next = block;
1178  else
1179  set->blocks = block;
1180  if (block->next)
1181  block->next->prev = block;
1182 
1183 #ifdef MEMORY_CONTEXT_CHECKING
1184 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1185 
1186  /*
1187  * We can only randomize the extra space if we know the prior request.
1188  * When using Valgrind, randomize_mem() also marks memory UNDEFINED.
1189  */
1190  if (size > chunk->requested_size)
1191  randomize_mem((char *) pointer + chunk->requested_size,
1192  size - chunk->requested_size);
1193 #else
1194 
1195  /*
1196  * If this is an increase, realloc() will have marked any
1197  * newly-allocated part (from oldchksize to chksize) UNDEFINED, but we
1198  * also need to adjust trailing bytes from the old allocation (from
1199  * chunk->requested_size to oldchksize) as they are marked NOACCESS.
1200  * Make sure not to mark too many bytes in case chunk->requested_size
1201  * < size < oldchksize.
1202  */
1203 #ifdef USE_VALGRIND
1204  if (Min(size, oldchksize) > chunk->requested_size)
1205  VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1206  Min(size, oldchksize) - chunk->requested_size);
1207 #endif
1208 #endif
1209 
1210  chunk->requested_size = size;
1211  /* set mark to catch clobber of "unused" space */
1212  Assert(size < chksize);
1213  set_sentinel(pointer, size);
1214 #else /* !MEMORY_CONTEXT_CHECKING */
1215 
1216  /*
1217  * We may need to adjust marking of bytes from the old allocation as
1218  * some of them may be marked NOACCESS. We don't know how much of the
1219  * old chunk size was the requested size; it could have been as small
1220  * as one byte. We have to be conservative and just mark the entire
1221  * old portion DEFINED. Make sure not to mark memory beyond the new
1222  * allocation in case it's smaller than the old one.
1223  */
1224  VALGRIND_MAKE_MEM_DEFINED(pointer, Min(size, oldchksize));
1225 #endif
1226 
1227  /* Ensure any padding bytes are marked NOACCESS. */
1228  VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1229 
1230  /* Disallow access to the chunk header . */
1232 
1233  return pointer;
1234  }
1235 
1236  block = MemoryChunkGetBlock(chunk);
1237 
1238  /*
1239  * In this path, for speed reasons we just Assert that the referenced
1240  * block is good. We can also Assert that the value field is sane. Future
1241  * field experience may show that these Asserts had better become regular
1242  * runtime test-and-elog checks.
1243  */
1244  Assert(AllocBlockIsValid(block));
1245  set = block->aset;
1246 
1247  fidx = MemoryChunkGetValue(chunk);
1248  Assert(FreeListIdxIsValid(fidx));
1249  oldchksize = GetChunkSizeFromFreeListIdx(fidx);
1250 
1251 #ifdef MEMORY_CONTEXT_CHECKING
1252  /* Test for someone scribbling on unused space in chunk */
1253  if (chunk->requested_size < oldchksize)
1254  if (!sentinel_ok(pointer, chunk->requested_size))
1255  elog(WARNING, "detected write past chunk end in %s %p",
1256  set->header.name, chunk);
1257 #endif
1258 
1259  /*
1260  * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1261  * allocated area already is >= the new size. (In particular, we will
1262  * fall out here if the requested size is a decrease.)
1263  */
1264  if (oldchksize >= size)
1265  {
1266 #ifdef MEMORY_CONTEXT_CHECKING
1267  Size oldrequest = chunk->requested_size;
1268 
1269 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1270  /* We can only fill the extra space if we know the prior request */
1271  if (size > oldrequest)
1272  randomize_mem((char *) pointer + oldrequest,
1273  size - oldrequest);
1274 #endif
1275 
1276  chunk->requested_size = size;
1277 
1278  /*
1279  * If this is an increase, mark any newly-available part UNDEFINED.
1280  * Otherwise, mark the obsolete part NOACCESS.
1281  */
1282  if (size > oldrequest)
1283  VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1284  size - oldrequest);
1285  else
1286  VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1287  oldchksize - size);
1288 
1289  /* set mark to catch clobber of "unused" space */
1290  if (size < oldchksize)
1291  set_sentinel(pointer, size);
1292 #else /* !MEMORY_CONTEXT_CHECKING */
1293 
1294  /*
1295  * We don't have the information to determine whether we're growing
1296  * the old request or shrinking it, so we conservatively mark the
1297  * entire new allocation DEFINED.
1298  */
1299  VALGRIND_MAKE_MEM_NOACCESS(pointer, oldchksize);
1300  VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1301 #endif
1302 
1303  /* Disallow access to the chunk header. */
1305 
1306  return pointer;
1307  }
1308  else
1309  {
1310  /*
1311  * Enlarge-a-small-chunk case. We just do this by brute force, ie,
1312  * allocate a new chunk and copy the data. Since we know the existing
1313  * data isn't huge, this won't involve any great memcpy expense, so
1314  * it's not worth being smarter. (At one time we tried to avoid
1315  * memcpy when it was possible to enlarge the chunk in-place, but that
1316  * turns out to misbehave unpleasantly for repeated cycles of
1317  * palloc/repalloc/pfree: the eventually freed chunks go into the
1318  * wrong freelist for the next initial palloc request, and so we leak
1319  * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1320  */
1321  AllocPointer newPointer;
1322  Size oldsize;
1323 
1324  /* allocate new chunk */
1325  newPointer = AllocSetAlloc((MemoryContext) set, size);
1326 
1327  /* leave immediately if request was not completed */
1328  if (newPointer == NULL)
1329  {
1330  /* Disallow access to the chunk header. */
1332  return NULL;
1333  }
1334 
1335  /*
1336  * AllocSetAlloc() may have returned a region that is still NOACCESS.
1337  * Change it to UNDEFINED for the moment; memcpy() will then transfer
1338  * definedness from the old allocation to the new. If we know the old
1339  * allocation, copy just that much. Otherwise, make the entire old
1340  * chunk defined to avoid errors as we copy the currently-NOACCESS
1341  * trailing bytes.
1342  */
1343  VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
1344 #ifdef MEMORY_CONTEXT_CHECKING
1345  oldsize = chunk->requested_size;
1346 #else
1347  oldsize = oldchksize;
1348  VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1349 #endif
1350 
1351  /* transfer existing data (certain to fit) */
1352  memcpy(newPointer, pointer, oldsize);
1353 
1354  /* free old chunk */
1355  AllocSetFree(pointer);
1356 
1357  return newPointer;
1358  }
1359 }
1360 
1361 /*
1362  * AllocSetGetChunkContext
1363  * Return the MemoryContext that 'pointer' belongs to.
1364  */
1367 {
1368  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1369  AllocBlock block;
1370  AllocSet set;
1371 
1372  /* Allow access to the chunk header. */
1374 
1375  if (MemoryChunkIsExternal(chunk))
1376  block = ExternalChunkGetBlock(chunk);
1377  else
1378  block = (AllocBlock) MemoryChunkGetBlock(chunk);
1379 
1380  /* Disallow access to the chunk header. */
1382 
1383  Assert(AllocBlockIsValid(block));
1384  set = block->aset;
1385 
1386  return &set->header;
1387 }
1388 
1389 /*
1390  * AllocSetGetChunkSpace
1391  * Given a currently-allocated chunk, determine the total space
1392  * it occupies (including all memory-allocation overhead).
1393  */
1394 Size
1396 {
1397  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1398  int fidx;
1399 
1400  /* Allow access to the chunk header. */
1402 
1403  if (MemoryChunkIsExternal(chunk))
1404  {
1405  AllocBlock block = ExternalChunkGetBlock(chunk);
1406 
1407  /* Disallow access to the chunk header. */
1409 
1410  Assert(AllocBlockIsValid(block));
1411 
1412  return block->endptr - (char *) chunk;
1413  }
1414 
1415  fidx = MemoryChunkGetValue(chunk);
1416  Assert(FreeListIdxIsValid(fidx));
1417 
1418  /* Disallow access to the chunk header. */
1420 
1422 }
1423 
1424 /*
1425  * AllocSetIsEmpty
1426  * Is an allocset empty of any allocated space?
1427  */
1428 bool
1430 {
1431  Assert(AllocSetIsValid(context));
1432 
1433  /*
1434  * For now, we say "empty" only if the context is new or just reset. We
1435  * could examine the freelists to determine if all space has been freed,
1436  * but it's not really worth the trouble for present uses of this
1437  * functionality.
1438  */
1439  if (context->isReset)
1440  return true;
1441  return false;
1442 }
1443 
1444 /*
1445  * AllocSetStats
1446  * Compute stats about memory consumption of an allocset.
1447  *
1448  * printfunc: if not NULL, pass a human-readable stats string to this.
1449  * passthru: pass this pointer through to printfunc.
1450  * totals: if not NULL, add stats about this context into *totals.
1451  * print_to_stderr: print stats to stderr if true, elog otherwise.
1452  */
1453 void
1455  MemoryStatsPrintFunc printfunc, void *passthru,
1456  MemoryContextCounters *totals, bool print_to_stderr)
1457 {
1458  AllocSet set = (AllocSet) context;
1459  Size nblocks = 0;
1460  Size freechunks = 0;
1461  Size totalspace;
1462  Size freespace = 0;
1463  AllocBlock block;
1464  int fidx;
1465 
1466  Assert(AllocSetIsValid(set));
1467 
1468  /* Include context header in totalspace */
1469  totalspace = MAXALIGN(sizeof(AllocSetContext));
1470 
1471  for (block = set->blocks; block != NULL; block = block->next)
1472  {
1473  nblocks++;
1474  totalspace += block->endptr - ((char *) block);
1475  freespace += block->endptr - block->freeptr;
1476  }
1477  for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1478  {
1479  Size chksz = GetChunkSizeFromFreeListIdx(fidx);
1480  MemoryChunk *chunk = set->freelist[fidx];
1481 
1482  while (chunk != NULL)
1483  {
1485 
1486  /* Allow access to the chunk header. */
1488  Assert(MemoryChunkGetValue(chunk) == fidx);
1490 
1491  freechunks++;
1492  freespace += chksz + ALLOC_CHUNKHDRSZ;
1493 
1495  chunk = link->next;
1497  }
1498  }
1499 
1500  if (printfunc)
1501  {
1502  char stats_string[200];
1503 
1504  snprintf(stats_string, sizeof(stats_string),
1505  "%zu total in %zu blocks; %zu free (%zu chunks); %zu used",
1506  totalspace, nblocks, freespace, freechunks,
1507  totalspace - freespace);
1508  printfunc(context, passthru, stats_string, print_to_stderr);
1509  }
1510 
1511  if (totals)
1512  {
1513  totals->nblocks += nblocks;
1514  totals->freechunks += freechunks;
1515  totals->totalspace += totalspace;
1516  totals->freespace += freespace;
1517  }
1518 }
1519 
1520 
1521 #ifdef MEMORY_CONTEXT_CHECKING
1522 
1523 /*
1524  * AllocSetCheck
1525  * Walk through chunks and check consistency of memory.
1526  *
1527  * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
1528  * find yourself in an infinite loop when trouble occurs, because this
1529  * routine will be entered again when elog cleanup tries to release memory!
1530  */
1531 void
1532 AllocSetCheck(MemoryContext context)
1533 {
1534  AllocSet set = (AllocSet) context;
1535  const char *name = set->header.name;
1536  AllocBlock prevblock;
1537  AllocBlock block;
1538  Size total_allocated = 0;
1539 
1540  for (prevblock = NULL, block = set->blocks;
1541  block != NULL;
1542  prevblock = block, block = block->next)
1543  {
1544  char *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
1545  long blk_used = block->freeptr - bpoz;
1546  long blk_data = 0;
1547  long nchunks = 0;
1548  bool has_external_chunk = false;
1549 
1550  if (set->keeper == block)
1551  total_allocated += block->endptr - ((char *) set);
1552  else
1553  total_allocated += block->endptr - ((char *) block);
1554 
1555  /*
1556  * Empty block - empty can be keeper-block only
1557  */
1558  if (!blk_used)
1559  {
1560  if (set->keeper != block)
1561  elog(WARNING, "problem in alloc set %s: empty block %p",
1562  name, block);
1563  }
1564 
1565  /*
1566  * Check block header fields
1567  */
1568  if (block->aset != set ||
1569  block->prev != prevblock ||
1570  block->freeptr < bpoz ||
1571  block->freeptr > block->endptr)
1572  elog(WARNING, "problem in alloc set %s: corrupt header in block %p",
1573  name, block);
1574 
1575  /*
1576  * Chunk walker
1577  */
1578  while (bpoz < block->freeptr)
1579  {
1580  MemoryChunk *chunk = (MemoryChunk *) bpoz;
1581  Size chsize,
1582  dsize;
1583 
1584  /* Allow access to the chunk header. */
1586 
1587  if (MemoryChunkIsExternal(chunk))
1588  {
1589  chsize = block->endptr - (char *) MemoryChunkGetPointer(chunk); /* aligned chunk size */
1590  has_external_chunk = true;
1591 
1592  /* make sure this chunk consumes the entire block */
1593  if (chsize + ALLOC_CHUNKHDRSZ != blk_used)
1594  elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p",
1595  name, chunk, block);
1596  }
1597  else
1598  {
1599  int fidx = MemoryChunkGetValue(chunk);
1600 
1601  if (!FreeListIdxIsValid(fidx))
1602  elog(WARNING, "problem in alloc set %s: bad chunk size for chunk %p in block %p",
1603  name, chunk, block);
1604 
1605  chsize = GetChunkSizeFromFreeListIdx(fidx); /* aligned chunk size */
1606 
1607  /*
1608  * Check the stored block offset correctly references this
1609  * block.
1610  */
1611  if (block != MemoryChunkGetBlock(chunk))
1612  elog(WARNING, "problem in alloc set %s: bad block offset for chunk %p in block %p",
1613  name, chunk, block);
1614  }
1615  dsize = chunk->requested_size; /* real data */
1616 
1617  /* an allocated chunk's requested size must be <= the chsize */
1618  if (dsize != InvalidAllocSize && dsize > chsize)
1619  elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
1620  name, chunk, block);
1621 
1622  /* chsize must not be smaller than the first freelist's size */
1623  if (chsize < (1 << ALLOC_MINBITS))
1624  elog(WARNING, "problem in alloc set %s: bad size %zu for chunk %p in block %p",
1625  name, chsize, chunk, block);
1626 
1627  /*
1628  * Check for overwrite of padding space in an allocated chunk.
1629  */
1630  if (dsize != InvalidAllocSize && dsize < chsize &&
1631  !sentinel_ok(chunk, ALLOC_CHUNKHDRSZ + dsize))
1632  elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
1633  name, block, chunk);
1634 
1635  /* if chunk is allocated, disallow access to the chunk header */
1636  if (dsize != InvalidAllocSize)
1638 
1639  blk_data += chsize;
1640  nchunks++;
1641 
1642  bpoz += ALLOC_CHUNKHDRSZ + chsize;
1643  }
1644 
1645  if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used)
1646  elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p",
1647  name, block);
1648 
1649  if (has_external_chunk && nchunks > 1)
1650  elog(WARNING, "problem in alloc set %s: external chunk on non-dedicated block %p",
1651  name, block);
1652  }
1653 
1654  Assert(total_allocated == context->mem_allocated);
1655 }
1656 
1657 #endif /* MEMORY_CONTEXT_CHECKING */
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:259
void AllocSetReset(MemoryContext context)
Definition: aset.c:533
#define AllocSetIsValid(set)
Definition: aset.c:201
#define AllocBlockIsValid(block)
Definition: aset.c:208
#define GetFreeListLink(chkptr)
Definition: aset.c:132
#define FreeListIdxIsValid(fidx)
Definition: aset.c:136
Size AllocSetGetChunkSpace(void *pointer)
Definition: aset.c:1395
#define ALLOC_CHUNKHDRSZ
Definition: aset.c:105
MemoryContext AllocSetGetChunkContext(void *pointer)
Definition: aset.c:1366
void AllocSetStats(MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
Definition: aset.c:1454
#define GetChunkSizeFromFreeListIdx(fidx)
Definition: aset.c:140
#define ALLOC_MINBITS
Definition: aset.c:83
struct AllocBlockData * AllocBlock
Definition: aset.c:107
#define MAX_FREE_CONTEXTS
Definition: aset.c:242
static int AllocSetFreeIndex(Size size)
Definition: aset.c:271
bool AllocSetIsEmpty(MemoryContext context)
Definition: aset.c:1429
#define ALLOC_BLOCKHDRSZ
Definition: aset.c:104
struct AllocBlockData AllocBlockData
void * AllocSetRealloc(void *pointer, Size size)
Definition: aset.c:1105
void * AllocPointer
Definition: aset.c:113
#define ALLOCSET_NUM_FREELISTS
Definition: aset.c:84
struct AllocSetContext AllocSetContext
#define ALLOC_CHUNK_FRACTION
Definition: aset.c:87
void AllocSetFree(void *pointer)
Definition: aset.c:998
void AllocSetDelete(MemoryContext context)
Definition: aset.c:603
struct AllocSetFreeList AllocSetFreeList
#define ALLOC_CHUNK_LIMIT
Definition: aset.c:85
struct AllocFreeListLink AllocFreeListLink
static AllocSetFreeList context_freelists[2]
Definition: aset.c:251
#define ExternalChunkGetBlock(chunk)
Definition: aset.c:216
void * AllocSetAlloc(MemoryContext context, Size size)
Definition: aset.c:699
MemoryContext AllocSetContextCreateInternal(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition: aset.c:341
AllocSetContext * AllocSet
Definition: aset.c:168
static int32 next
Definition: blutils.c:219
unsigned int uint32
Definition: c.h:490
#define Min(x, y)
Definition: c.h:988
#define MAXALIGN(LEN)
Definition: c.h:795
#define Max(x, y)
Definition: c.h:982
#define MemSetAligned(start, val, len)
Definition: c.h:1034
#define StaticAssertDecl(condition, errmessage)
Definition: c.h:920
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:922
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:166
size_t Size
Definition: c.h:589
int errdetail(const char *fmt,...)
Definition: elog.c:1202
int errcode(int sqlerrcode)
Definition: elog.c:858
int errmsg(const char *fmt,...)
Definition: elog.c:1069
#define WARNING
Definition: elog.h:36
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
const char * name
Definition: encode.c:571
#define realloc(a, b)
Definition: header.h:60
#define free(a)
Definition: header.h:65
#define malloc(a)
Definition: header.h:50
Assert(fmt[strlen(fmt) - 1] !='\n')
void MemoryContextCreate(MemoryContext node, NodeTag tag, MemoryContextMethodID method_id, MemoryContext parent, const char *name)
Definition: mcxt.c:973
MemoryContext TopMemoryContext
Definition: mcxt.c:141
void MemoryContextStats(MemoryContext context)
Definition: mcxt.c:699
void MemoryContextResetOnly(MemoryContext context)
Definition: mcxt.c:349
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition: memdebug.h:27
#define VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
Definition: memdebug.h:28
void(* MemoryStatsPrintFunc)(MemoryContext context, void *passthru, const char *stats_string, bool print_to_stderr)
Definition: memnodes.h:54
#define ALLOCSET_SMALL_MINSIZE
Definition: memutils.h:160
#define ALLOCSET_DEFAULT_MINSIZE
Definition: memutils.h:150
#define AllocHugeSizeIsValid(size)
Definition: memutils.h:49
#define InvalidAllocSize
Definition: memutils.h:47
#define ALLOCSET_SEPARATE_THRESHOLD
Definition: memutils.h:180
#define ALLOCSET_SMALL_INITSIZE
Definition: memutils.h:161
#define ALLOCSET_DEFAULT_INITSIZE
Definition: memutils.h:151
@ MCTX_ASET_ID
#define MEMORYCHUNK_MAX_BLOCKOFFSET
static Size MemoryChunkGetValue(MemoryChunk *chunk)
#define MemoryChunkGetPointer(c)
static bool MemoryChunkIsExternal(MemoryChunk *chunk)
static void MemoryChunkSetHdrMaskExternal(MemoryChunk *chunk, MemoryContextMethodID methodid)
static void * MemoryChunkGetBlock(MemoryChunk *chunk)
#define PointerGetMemoryChunk(p)
static void MemoryChunkSetHdrMask(MemoryChunk *chunk, void *block, Size value, MemoryContextMethodID methodid)
struct MemoryContextData * MemoryContext
Definition: palloc.h:36
static int pg_leftmost_one_pos32(uint32 word)
Definition: pg_bitutils.h:41
PGDLLIMPORT const uint8 pg_leftmost_one_pos[256]
Definition: pg_bitutils.c:34
#define snprintf
Definition: port.h:238
AllocBlock prev
Definition: aset.c:185
AllocSet aset
Definition: aset.c:184
char * freeptr
Definition: aset.c:187
AllocBlock next
Definition: aset.c:186
char * endptr
Definition: aset.c:188
MemoryContextData header
Definition: aset.c:154
int freeListIndex
Definition: aset.c:165
AllocBlock blocks
Definition: aset.c:156
Size maxBlockSize
Definition: aset.c:160
AllocBlock keeper
Definition: aset.c:163
MemoryChunk * freelist[ALLOCSET_NUM_FREELISTS]
Definition: aset.c:157
Size initBlockSize
Definition: aset.c:159
Size nextBlockSize
Definition: aset.c:161
Size allocChunkLimit
Definition: aset.c:162
int num_free
Definition: aset.c:246
AllocSetContext * first_free
Definition: aset.c:247
MemoryContext nextchild
Definition: memnodes.h:92
Size mem_allocated
Definition: memnodes.h:87
const char * name
Definition: memnodes.h:93