PostgreSQL Source Code  git master
aset.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * aset.c
4  * Allocation set definitions.
5  *
6  * AllocSet is our standard implementation of the abstract MemoryContext
7  * type.
8  *
9  *
10  * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
11  * Portions Copyright (c) 1994, Regents of the University of California
12  *
13  * IDENTIFICATION
14  * src/backend/utils/mmgr/aset.c
15  *
16  * NOTE:
17  * This is a new (Feb. 05, 1999) implementation of the allocation set
18  * routines. AllocSet...() does not use OrderedSet...() any more.
19  * Instead it manages allocations in a block pool by itself, combining
20  * many small allocations in a few bigger blocks. AllocSetFree() normally
21  * doesn't free() memory really. It just add's the free'd area to some
22  * list for later reuse by AllocSetAlloc(). All memory blocks are free()'d
23  * at once on AllocSetReset(), which happens when the memory context gets
24  * destroyed.
25  * Jan Wieck
26  *
27  * Performance improvement from Tom Lane, 8/99: for extremely large request
28  * sizes, we do want to be able to give the memory back to free() as soon
29  * as it is pfree()'d. Otherwise we risk tying up a lot of memory in
30  * freelist entries that might never be usable. This is specially needed
31  * when the caller is repeatedly repalloc()'ing a block bigger and bigger;
32  * the previous instances of the block were guaranteed to be wasted until
33  * AllocSetReset() under the old way.
34  *
35  * Further improvement 12/00: as the code stood, request sizes in the
36  * midrange between "small" and "large" were handled very inefficiently,
37  * because any sufficiently large free chunk would be used to satisfy a
38  * request, even if it was much larger than necessary. This led to more
39  * and more wasted space in allocated chunks over time. To fix, get rid
40  * of the midrange behavior: we now handle only "small" power-of-2-size
41  * chunks as chunks. Anything "large" is passed off to malloc(). Change
42  * the number of freelists to change the small/large boundary.
43  *
44  *-------------------------------------------------------------------------
45  */
46 
47 #include "postgres.h"
48 
49 #include "port/pg_bitutils.h"
50 #include "utils/memdebug.h"
51 #include "utils/memutils.h"
54 
55 /*--------------------
56  * Chunk freelist k holds chunks of size 1 << (k + ALLOC_MINBITS),
57  * for k = 0 .. ALLOCSET_NUM_FREELISTS-1.
58  *
59  * Note that all chunks in the freelists have power-of-2 sizes. This
60  * improves recyclability: we may waste some space, but the wasted space
61  * should stay pretty constant as requests are made and released.
62  *
63  * A request too large for the last freelist is handled by allocating a
64  * dedicated block from malloc(). The block still has a block header and
65  * chunk header, but when the chunk is freed we'll return the whole block
66  * to malloc(), not put it on our freelists.
67  *
68  * CAUTION: ALLOC_MINBITS must be large enough so that
69  * 1<<ALLOC_MINBITS is at least MAXALIGN,
70  * or we may fail to align the smallest chunks adequately.
71  * 8-byte alignment is enough on all currently known machines. This 8-byte
72  * minimum also allows us to store a pointer to the next freelist item within
73  * the chunk of memory itself.
74  *
75  * With the current parameters, request sizes up to 8K are treated as chunks,
76  * larger requests go into dedicated blocks. Change ALLOCSET_NUM_FREELISTS
77  * to adjust the boundary point; and adjust ALLOCSET_SEPARATE_THRESHOLD in
78  * memutils.h to agree. (Note: in contexts with small maxBlockSize, we may
79  * set the allocChunkLimit to less than 8K, so as to avoid space wastage.)
80  *--------------------
81  */
82 
83 #define ALLOC_MINBITS 3 /* smallest chunk size is 8 bytes */
84 #define ALLOCSET_NUM_FREELISTS 11
85 #define ALLOC_CHUNK_LIMIT (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
86 /* Size of largest chunk that we use a fixed size for */
87 #define ALLOC_CHUNK_FRACTION 4
88 /* We allow chunks to be at most 1/4 of maxBlockSize (less overhead) */
89 
90 /*--------------------
91  * The first block allocated for an allocset has size initBlockSize.
92  * Each time we have to allocate another block, we double the block size
93  * (if possible, and without exceeding maxBlockSize), so as to reduce
94  * the bookkeeping load on malloc().
95  *
96  * Blocks allocated to hold oversize chunks do not follow this rule, however;
97  * they are just however big they need to be to hold that single chunk.
98  *
99  * Also, if a minContextSize is specified, the first block has that size,
100  * and then initBlockSize is used for the next one.
101  *--------------------
102  */
103 
104 #define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData))
105 #define ALLOC_CHUNKHDRSZ sizeof(MemoryChunk)
106 
107 typedef struct AllocBlockData *AllocBlock; /* forward reference */
108 
109 /*
110  * AllocPointer
111  * Aligned pointer which may be a member of an allocation set.
112  */
113 typedef void *AllocPointer;
114 
115 /*
116  * AllocFreeListLink
117  * When pfreeing memory, if we maintain a freelist for the given chunk's
118  * size then we use a AllocFreeListLink to point to the current item in
119  * the AllocSetContext's freelist and then set the given freelist element
120  * to point to the chunk being freed.
121  */
122 typedef struct AllocFreeListLink
123 {
126 
127 /*
128  * Obtain a AllocFreeListLink for the given chunk. Allocation sizes are
129  * always at least sizeof(AllocFreeListLink), so we reuse the pointer's memory
130  * itself to store the freelist link.
131  */
132 #define GetFreeListLink(chkptr) \
133  (AllocFreeListLink *) ((char *) (chkptr) + ALLOC_CHUNKHDRSZ)
134 
135 /* Validate a freelist index retrieved from a chunk header */
136 #define FreeListIdxIsValid(fidx) \
137  ((fidx) >= 0 && (fidx) < ALLOCSET_NUM_FREELISTS)
138 
139 /* Determine the size of the chunk based on the freelist index */
140 #define GetChunkSizeFromFreeListIdx(fidx) \
141  ((((Size) 1) << ALLOC_MINBITS) << (fidx))
142 
143 /*
144  * AllocSetContext is our standard implementation of MemoryContext.
145  *
146  * Note: header.isReset means there is nothing for AllocSetReset to do.
147  * This is different from the aset being physically empty (empty blocks list)
148  * because we will still have a keeper block. It's also different from the set
149  * being logically empty, because we don't attempt to detect pfree'ing the
150  * last active chunk.
151  */
152 typedef struct AllocSetContext
153 {
154  MemoryContextData header; /* Standard memory-context fields */
155  /* Info about storage allocated in this context: */
156  AllocBlock blocks; /* head of list of blocks in this set */
157  MemoryChunk *freelist[ALLOCSET_NUM_FREELISTS]; /* free chunk lists */
158  /* Allocation parameters for this context: */
159  Size initBlockSize; /* initial block size */
160  Size maxBlockSize; /* maximum block size */
161  Size nextBlockSize; /* next block size to allocate */
162  Size allocChunkLimit; /* effective chunk size limit */
163  AllocBlock keeper; /* keep this block over resets */
164  /* freelist this context could be put in, or -1 if not a candidate: */
165  int freeListIndex; /* index in context_freelists[], or -1 */
167 
169 
170 /*
171  * AllocBlock
172  * An AllocBlock is the unit of memory that is obtained by aset.c
173  * from malloc(). It contains one or more MemoryChunks, which are
174  * the units requested by palloc() and freed by pfree(). MemoryChunks
175  * cannot be returned to malloc() individually, instead they are put
176  * on freelists by pfree() and re-used by the next palloc() that has
177  * a matching request size.
178  *
179  * AllocBlockData is the header data for a block --- the usable space
180  * within the block begins at the next alignment boundary.
181  */
182 typedef struct AllocBlockData
183 {
184  AllocSet aset; /* aset that owns this block */
185  AllocBlock prev; /* prev block in aset's blocks list, if any */
186  AllocBlock next; /* next block in aset's blocks list, if any */
187  char *freeptr; /* start of free space in this block */
188  char *endptr; /* end of space in this block */
190 
191 /*
192  * Only the "hdrmask" field should be accessed outside this module.
193  * We keep the rest of an allocated chunk's header marked NOACCESS when using
194  * valgrind. But note that chunk headers that are in a freelist are kept
195  * accessible, for simplicity.
196  */
197 #define ALLOCCHUNK_PRIVATE_LEN offsetof(MemoryChunk, hdrmask)
198 
199 /*
200  * AllocPointerIsValid
201  * True iff pointer is valid allocation pointer.
202  */
203 #define AllocPointerIsValid(pointer) PointerIsValid(pointer)
204 
205 /*
206  * AllocSetIsValid
207  * True iff set is valid allocation set.
208  */
209 #define AllocSetIsValid(set) \
210  (PointerIsValid(set) && IsA(set, AllocSetContext))
211 
212 /*
213  * AllocBlockIsValid
214  * True iff block is valid block of allocation set.
215  */
216 #define AllocBlockIsValid(block) \
217  (PointerIsValid(block) && AllocSetIsValid((block)->aset))
218 
219 /*
220  * We always store external chunks on a dedicated block. This makes fetching
221  * the block from an external chunk easy since it's always the first and only
222  * chunk on the block.
223  */
224 #define ExternalChunkGetBlock(chunk) \
225  (AllocBlock) ((char *) chunk - ALLOC_BLOCKHDRSZ)
226 
227 /*
228  * Rather than repeatedly creating and deleting memory contexts, we keep some
229  * freed contexts in freelists so that we can hand them out again with little
230  * work. Before putting a context in a freelist, we reset it so that it has
231  * only its initial malloc chunk and no others. To be a candidate for a
232  * freelist, a context must have the same minContextSize/initBlockSize as
233  * other contexts in the list; but its maxBlockSize is irrelevant since that
234  * doesn't affect the size of the initial chunk.
235  *
236  * We currently provide one freelist for ALLOCSET_DEFAULT_SIZES contexts
237  * and one for ALLOCSET_SMALL_SIZES contexts; the latter works for
238  * ALLOCSET_START_SMALL_SIZES too, since only the maxBlockSize differs.
239  *
240  * Ordinarily, we re-use freelist contexts in last-in-first-out order, in
241  * hopes of improving locality of reference. But if there get to be too
242  * many contexts in the list, we'd prefer to drop the most-recently-created
243  * contexts in hopes of keeping the process memory map compact.
244  * We approximate that by simply deleting all existing entries when the list
245  * overflows, on the assumption that queries that allocate a lot of contexts
246  * will probably free them in more or less reverse order of allocation.
247  *
248  * Contexts in a freelist are chained via their nextchild pointers.
249  */
250 #define MAX_FREE_CONTEXTS 100 /* arbitrary limit on freelist length */
251 
252 typedef struct AllocSetFreeList
253 {
254  int num_free; /* current list length */
255  AllocSetContext *first_free; /* list header */
257 
258 /* context_freelists[0] is for default params, [1] for small params */
260 {
261  {
262  0, NULL
263  },
264  {
265  0, NULL
266  }
267 };
268 
269 
270 /* ----------
271  * AllocSetFreeIndex -
272  *
273  * Depending on the size of an allocation compute which freechunk
274  * list of the alloc set it belongs to. Caller must have verified
275  * that size <= ALLOC_CHUNK_LIMIT.
276  * ----------
277  */
278 static inline int
280 {
281  int idx;
282 
283  if (size > (1 << ALLOC_MINBITS))
284  {
285  /*----------
286  * At this point we must compute ceil(log2(size >> ALLOC_MINBITS)).
287  * This is the same as
288  * pg_leftmost_one_pos32((size - 1) >> ALLOC_MINBITS) + 1
289  * or equivalently
290  * pg_leftmost_one_pos32(size - 1) - ALLOC_MINBITS + 1
291  *
292  * However, rather than just calling that function, we duplicate the
293  * logic here, allowing an additional optimization. It's reasonable
294  * to assume that ALLOC_CHUNK_LIMIT fits in 16 bits, so we can unroll
295  * the byte-at-a-time loop in pg_leftmost_one_pos32 and just handle
296  * the last two bytes.
297  *
298  * Yes, this function is enough of a hot-spot to make it worth this
299  * much trouble.
300  *----------
301  */
302 #ifdef HAVE__BUILTIN_CLZ
303  idx = 31 - __builtin_clz((uint32) size - 1) - ALLOC_MINBITS + 1;
304 #else
305  uint32 t,
306  tsize;
307 
308  /* Statically assert that we only have a 16-bit input value. */
310  "ALLOC_CHUNK_LIMIT must be less than 64kB");
311 
312  tsize = size - 1;
313  t = tsize >> 8;
314  idx = t ? pg_leftmost_one_pos[t] + 8 : pg_leftmost_one_pos[tsize];
315  idx -= ALLOC_MINBITS - 1;
316 #endif
317 
319  }
320  else
321  idx = 0;
322 
323  return idx;
324 }
325 
326 
327 /*
328  * Public routines
329  */
330 
331 
332 /*
333  * AllocSetContextCreateInternal
334  * Create a new AllocSet context.
335  *
336  * parent: parent context, or NULL if top-level context
337  * name: name of context (must be statically allocated)
338  * minContextSize: minimum context size
339  * initBlockSize: initial allocation block size
340  * maxBlockSize: maximum allocation block size
341  *
342  * Most callers should abstract the context size parameters using a macro
343  * such as ALLOCSET_DEFAULT_SIZES.
344  *
345  * Note: don't call this directly; go through the wrapper macro
346  * AllocSetContextCreate.
347  */
350  const char *name,
351  Size minContextSize,
352  Size initBlockSize,
353  Size maxBlockSize)
354 {
355  int freeListIndex;
356  Size firstBlockSize;
357  AllocSet set;
358  AllocBlock block;
359 
360  /* ensure MemoryChunk's size is properly maxaligned */
362  "sizeof(MemoryChunk) is not maxaligned");
363  /* check we have enough space to store the freelist link */
365  "sizeof(AllocFreeListLink) larger than minimum allocation size");
366 
367  /*
368  * First, validate allocation parameters. Once these were regular runtime
369  * tests and elog's, but in practice Asserts seem sufficient because
370  * nobody varies their parameters at runtime. We somewhat arbitrarily
371  * enforce a minimum 1K block size. We restrict the maximum block size to
372  * MEMORYCHUNK_MAX_BLOCKOFFSET as MemoryChunks are limited to this in
373  * regards to addressing the offset between the chunk and the block that
374  * the chunk is stored on. We would be unable to store the offset between
375  * the chunk and block for any chunks that were beyond
376  * MEMORYCHUNK_MAX_BLOCKOFFSET bytes into the block if the block was to be
377  * larger than this.
378  */
379  Assert(initBlockSize == MAXALIGN(initBlockSize) &&
380  initBlockSize >= 1024);
381  Assert(maxBlockSize == MAXALIGN(maxBlockSize) &&
382  maxBlockSize >= initBlockSize &&
383  AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
384  Assert(minContextSize == 0 ||
385  (minContextSize == MAXALIGN(minContextSize) &&
386  minContextSize >= 1024 &&
387  minContextSize <= maxBlockSize));
388  Assert(maxBlockSize <= MEMORYCHUNK_MAX_BLOCKOFFSET);
389 
390  /*
391  * Check whether the parameters match either available freelist. We do
392  * not need to demand a match of maxBlockSize.
393  */
394  if (minContextSize == ALLOCSET_DEFAULT_MINSIZE &&
395  initBlockSize == ALLOCSET_DEFAULT_INITSIZE)
396  freeListIndex = 0;
397  else if (minContextSize == ALLOCSET_SMALL_MINSIZE &&
398  initBlockSize == ALLOCSET_SMALL_INITSIZE)
399  freeListIndex = 1;
400  else
401  freeListIndex = -1;
402 
403  /*
404  * If a suitable freelist entry exists, just recycle that context.
405  */
406  if (freeListIndex >= 0)
407  {
408  AllocSetFreeList *freelist = &context_freelists[freeListIndex];
409 
410  if (freelist->first_free != NULL)
411  {
412  /* Remove entry from freelist */
413  set = freelist->first_free;
414  freelist->first_free = (AllocSet) set->header.nextchild;
415  freelist->num_free--;
416 
417  /* Update its maxBlockSize; everything else should be OK */
418  set->maxBlockSize = maxBlockSize;
419 
420  /* Reinitialize its header, installing correct name and parent */
422  T_AllocSetContext,
423  MCTX_ASET_ID,
424  parent,
425  name);
426 
427  ((MemoryContext) set)->mem_allocated =
428  set->keeper->endptr - ((char *) set);
429 
430  return (MemoryContext) set;
431  }
432  }
433 
434  /* Determine size of initial block */
435  firstBlockSize = MAXALIGN(sizeof(AllocSetContext)) +
437  if (minContextSize != 0)
438  firstBlockSize = Max(firstBlockSize, minContextSize);
439  else
440  firstBlockSize = Max(firstBlockSize, initBlockSize);
441 
442  /*
443  * Allocate the initial block. Unlike other aset.c blocks, it starts with
444  * the context header and its block header follows that.
445  */
446  set = (AllocSet) malloc(firstBlockSize);
447  if (set == NULL)
448  {
449  if (TopMemoryContext)
451  ereport(ERROR,
452  (errcode(ERRCODE_OUT_OF_MEMORY),
453  errmsg("out of memory"),
454  errdetail("Failed while creating memory context \"%s\".",
455  name)));
456  }
457 
458  /*
459  * Avoid writing code that can fail between here and MemoryContextCreate;
460  * we'd leak the header/initial block if we ereport in this stretch.
461  */
462 
463  /* Fill in the initial block's block header */
464  block = (AllocBlock) (((char *) set) + MAXALIGN(sizeof(AllocSetContext)));
465  block->aset = set;
466  block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
467  block->endptr = ((char *) set) + firstBlockSize;
468  block->prev = NULL;
469  block->next = NULL;
470 
471  /* Mark unallocated space NOACCESS; leave the block header alone. */
472  VALGRIND_MAKE_MEM_NOACCESS(block->freeptr, block->endptr - block->freeptr);
473 
474  /* Remember block as part of block list */
475  set->blocks = block;
476  /* Mark block as not to be released at reset time */
477  set->keeper = block;
478 
479  /* Finish filling in aset-specific parts of the context header */
480  MemSetAligned(set->freelist, 0, sizeof(set->freelist));
481 
482  set->initBlockSize = initBlockSize;
483  set->maxBlockSize = maxBlockSize;
484  set->nextBlockSize = initBlockSize;
485  set->freeListIndex = freeListIndex;
486 
487  /*
488  * Compute the allocation chunk size limit for this context. It can't be
489  * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
490  * If maxBlockSize is small then requests exceeding the maxBlockSize, or
491  * even a significant fraction of it, should be treated as large chunks
492  * too. For the typical case of maxBlockSize a power of 2, the chunk size
493  * limit will be at most 1/8th maxBlockSize, so that given a stream of
494  * requests that are all the maximum chunk size we will waste at most
495  * 1/8th of the allocated space.
496  *
497  * Also, allocChunkLimit must not exceed ALLOCSET_SEPARATE_THRESHOLD.
498  */
500  "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD");
501 
502  /*
503  * Determine the maximum size that a chunk can be before we allocate an
504  * entire AllocBlock dedicated for that chunk. We set the absolute limit
505  * of that size as ALLOC_CHUNK_LIMIT but we reduce it further so that we
506  * can fit about ALLOC_CHUNK_FRACTION chunks this size on a maximally
507  * sized block. (We opt to keep allocChunkLimit a power-of-2 value
508  * primarily for legacy reasons rather than calculating it so that exactly
509  * ALLOC_CHUNK_FRACTION chunks fit on a maximally sized block.)
510  */
512  while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
513  (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
514  set->allocChunkLimit >>= 1;
515 
516  /* Finally, do the type-independent part of context creation */
518  T_AllocSetContext,
519  MCTX_ASET_ID,
520  parent,
521  name);
522 
523  ((MemoryContext) set)->mem_allocated = firstBlockSize;
524 
525  return (MemoryContext) set;
526 }
527 
528 /*
529  * AllocSetReset
530  * Frees all memory which is allocated in the given set.
531  *
532  * Actually, this routine has some discretion about what to do.
533  * It should mark all allocated chunks freed, but it need not necessarily
534  * give back all the resources the set owns. Our actual implementation is
535  * that we give back all but the "keeper" block (which we must keep, since
536  * it shares a malloc chunk with the context header). In this way, we don't
537  * thrash malloc() when a context is repeatedly reset after small allocations,
538  * which is typical behavior for per-tuple contexts.
539  */
540 void
542 {
543  AllocSet set = (AllocSet) context;
544  AllocBlock block;
545  Size keepersize PG_USED_FOR_ASSERTS_ONLY;
546 
547  Assert(AllocSetIsValid(set));
548 
549 #ifdef MEMORY_CONTEXT_CHECKING
550  /* Check for corruption and leaks before freeing */
551  AllocSetCheck(context);
552 #endif
553 
554  /* Remember keeper block size for Assert below */
555  keepersize = set->keeper->endptr - ((char *) set);
556 
557  /* Clear chunk freelists */
558  MemSetAligned(set->freelist, 0, sizeof(set->freelist));
559 
560  block = set->blocks;
561 
562  /* New blocks list will be just the keeper block */
563  set->blocks = set->keeper;
564 
565  while (block != NULL)
566  {
567  AllocBlock next = block->next;
568 
569  if (block == set->keeper)
570  {
571  /* Reset the block, but don't return it to malloc */
572  char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
573 
574 #ifdef CLOBBER_FREED_MEMORY
575  wipe_mem(datastart, block->freeptr - datastart);
576 #else
577  /* wipe_mem() would have done this */
578  VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
579 #endif
580  block->freeptr = datastart;
581  block->prev = NULL;
582  block->next = NULL;
583  }
584  else
585  {
586  /* Normal case, release the block */
587  context->mem_allocated -= block->endptr - ((char *) block);
588 
589 #ifdef CLOBBER_FREED_MEMORY
590  wipe_mem(block, block->freeptr - ((char *) block));
591 #endif
592  free(block);
593  }
594  block = next;
595  }
596 
597  Assert(context->mem_allocated == keepersize);
598 
599  /* Reset block size allocation sequence, too */
600  set->nextBlockSize = set->initBlockSize;
601 }
602 
603 /*
604  * AllocSetDelete
605  * Frees all memory which is allocated in the given set,
606  * in preparation for deletion of the set.
607  *
608  * Unlike AllocSetReset, this *must* free all resources of the set.
609  */
610 void
612 {
613  AllocSet set = (AllocSet) context;
614  AllocBlock block = set->blocks;
615  Size keepersize PG_USED_FOR_ASSERTS_ONLY;
616 
617  Assert(AllocSetIsValid(set));
618 
619 #ifdef MEMORY_CONTEXT_CHECKING
620  /* Check for corruption and leaks before freeing */
621  AllocSetCheck(context);
622 #endif
623 
624  /* Remember keeper block size for Assert below */
625  keepersize = set->keeper->endptr - ((char *) set);
626 
627  /*
628  * If the context is a candidate for a freelist, put it into that freelist
629  * instead of destroying it.
630  */
631  if (set->freeListIndex >= 0)
632  {
634 
635  /*
636  * Reset the context, if it needs it, so that we aren't hanging on to
637  * more than the initial malloc chunk.
638  */
639  if (!context->isReset)
640  MemoryContextResetOnly(context);
641 
642  /*
643  * If the freelist is full, just discard what's already in it. See
644  * comments with context_freelists[].
645  */
646  if (freelist->num_free >= MAX_FREE_CONTEXTS)
647  {
648  while (freelist->first_free != NULL)
649  {
650  AllocSetContext *oldset = freelist->first_free;
651 
652  freelist->first_free = (AllocSetContext *) oldset->header.nextchild;
653  freelist->num_free--;
654 
655  /* All that remains is to free the header/initial block */
656  free(oldset);
657  }
658  Assert(freelist->num_free == 0);
659  }
660 
661  /* Now add the just-deleted context to the freelist. */
662  set->header.nextchild = (MemoryContext) freelist->first_free;
663  freelist->first_free = set;
664  freelist->num_free++;
665 
666  return;
667  }
668 
669  /* Free all blocks, except the keeper which is part of context header */
670  while (block != NULL)
671  {
672  AllocBlock next = block->next;
673 
674  if (block != set->keeper)
675  context->mem_allocated -= block->endptr - ((char *) block);
676 
677 #ifdef CLOBBER_FREED_MEMORY
678  wipe_mem(block, block->freeptr - ((char *) block));
679 #endif
680 
681  if (block != set->keeper)
682  free(block);
683 
684  block = next;
685  }
686 
687  Assert(context->mem_allocated == keepersize);
688 
689  /* Finally, free the context header, including the keeper block */
690  free(set);
691 }
692 
693 /*
694  * AllocSetAlloc
695  * Returns pointer to allocated memory of given size or NULL if
696  * request could not be completed; memory is added to the set.
697  *
698  * No request may exceed:
699  * MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
700  * All callers use a much-lower limit.
701  *
702  * Note: when using valgrind, it doesn't matter how the returned allocation
703  * is marked, as mcxt.c will set it to UNDEFINED. In some paths we will
704  * return space that is marked NOACCESS - AllocSetRealloc has to beware!
705  */
706 void *
708 {
709  AllocSet set = (AllocSet) context;
710  AllocBlock block;
711  MemoryChunk *chunk;
712  int fidx;
713  Size chunk_size;
714  Size blksize;
715 
716  Assert(AllocSetIsValid(set));
717 
718  /*
719  * If requested size exceeds maximum for chunks, allocate an entire block
720  * for this request.
721  */
722  if (size > set->allocChunkLimit)
723  {
724 #ifdef MEMORY_CONTEXT_CHECKING
725  /* ensure there's always space for the sentinel byte */
726  chunk_size = MAXALIGN(size + 1);
727 #else
728  chunk_size = MAXALIGN(size);
729 #endif
730 
731  blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
732  block = (AllocBlock) malloc(blksize);
733  if (block == NULL)
734  return NULL;
735 
736  context->mem_allocated += blksize;
737 
738  block->aset = set;
739  block->freeptr = block->endptr = ((char *) block) + blksize;
740 
741  chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
742 
743  /* mark the MemoryChunk as externally managed */
745 
746 #ifdef MEMORY_CONTEXT_CHECKING
747  chunk->requested_size = size;
748  /* set mark to catch clobber of "unused" space */
749  Assert(size < chunk_size);
750  set_sentinel(MemoryChunkGetPointer(chunk), size);
751 #endif
752 #ifdef RANDOMIZE_ALLOCATED_MEMORY
753  /* fill the allocated space with junk */
754  randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
755 #endif
756 
757  /*
758  * Stick the new block underneath the active allocation block, if any,
759  * so that we don't lose the use of the space remaining therein.
760  */
761  if (set->blocks != NULL)
762  {
763  block->prev = set->blocks;
764  block->next = set->blocks->next;
765  if (block->next)
766  block->next->prev = block;
767  set->blocks->next = block;
768  }
769  else
770  {
771  block->prev = NULL;
772  block->next = NULL;
773  set->blocks = block;
774  }
775 
776  /* Ensure any padding bytes are marked NOACCESS. */
777  VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
778  chunk_size - size);
779 
780  /* Disallow external access to private part of chunk header. */
782 
783  return MemoryChunkGetPointer(chunk);
784  }
785 
786  /*
787  * Request is small enough to be treated as a chunk. Look in the
788  * corresponding free list to see if there is a free chunk we could reuse.
789  * If one is found, remove it from the free list, make it again a member
790  * of the alloc set and return its data address.
791  *
792  * Note that we don't attempt to ensure there's space for the sentinel
793  * byte here. We expect a large proportion of allocations to be for sizes
794  * which are already a power of 2. If we were to always make space for a
795  * sentinel byte in MEMORY_CONTEXT_CHECKING builds, then we'd end up
796  * doubling the memory requirements for such allocations.
797  */
798  fidx = AllocSetFreeIndex(size);
799  chunk = set->freelist[fidx];
800  if (chunk != NULL)
801  {
803 
804  Assert(fidx == MemoryChunkGetValue(chunk));
805 
806  /* pop this chunk off the freelist */
808  set->freelist[fidx] = link->next;
810 
811 #ifdef MEMORY_CONTEXT_CHECKING
812  chunk->requested_size = size;
813  /* set mark to catch clobber of "unused" space */
814  if (size < GetChunkSizeFromFreeListIdx(fidx))
815  set_sentinel(MemoryChunkGetPointer(chunk), size);
816 #endif
817 #ifdef RANDOMIZE_ALLOCATED_MEMORY
818  /* fill the allocated space with junk */
819  randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
820 #endif
821 
822  /* Ensure any padding bytes are marked NOACCESS. */
823  VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
824  GetChunkSizeFromFreeListIdx(fidx) - size);
825 
826  /* Disallow external access to private part of chunk header. */
828 
829  return MemoryChunkGetPointer(chunk);
830  }
831 
832  /*
833  * Choose the actual chunk size to allocate.
834  */
835  chunk_size = GetChunkSizeFromFreeListIdx(fidx);
836  Assert(chunk_size >= size);
837 
838  /*
839  * If there is enough room in the active allocation block, we will put the
840  * chunk into that block. Else must start a new one.
841  */
842  if ((block = set->blocks) != NULL)
843  {
844  Size availspace = block->endptr - block->freeptr;
845 
846  if (availspace < (chunk_size + ALLOC_CHUNKHDRSZ))
847  {
848  /*
849  * The existing active (top) block does not have enough room for
850  * the requested allocation, but it might still have a useful
851  * amount of space in it. Once we push it down in the block list,
852  * we'll never try to allocate more space from it. So, before we
853  * do that, carve up its free space into chunks that we can put on
854  * the set's freelists.
855  *
856  * Because we can only get here when there's less than
857  * ALLOC_CHUNK_LIMIT left in the block, this loop cannot iterate
858  * more than ALLOCSET_NUM_FREELISTS-1 times.
859  */
860  while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
861  {
863  Size availchunk = availspace - ALLOC_CHUNKHDRSZ;
864  int a_fidx = AllocSetFreeIndex(availchunk);
865 
866  /*
867  * In most cases, we'll get back the index of the next larger
868  * freelist than the one we need to put this chunk on. The
869  * exception is when availchunk is exactly a power of 2.
870  */
871  if (availchunk != GetChunkSizeFromFreeListIdx(a_fidx))
872  {
873  a_fidx--;
874  Assert(a_fidx >= 0);
875  availchunk = GetChunkSizeFromFreeListIdx(a_fidx);
876  }
877 
878  chunk = (MemoryChunk *) (block->freeptr);
879 
880  /* Prepare to initialize the chunk header. */
882  block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
883  availspace -= (availchunk + ALLOC_CHUNKHDRSZ);
884 
885  /* store the freelist index in the value field */
886  MemoryChunkSetHdrMask(chunk, block, a_fidx, MCTX_ASET_ID);
887 #ifdef MEMORY_CONTEXT_CHECKING
888  chunk->requested_size = InvalidAllocSize; /* mark it free */
889 #endif
890  /* push this chunk onto the free list */
891  link = GetFreeListLink(chunk);
892 
894  link->next = set->freelist[a_fidx];
896 
897  set->freelist[a_fidx] = chunk;
898  }
899  /* Mark that we need to create a new block */
900  block = NULL;
901  }
902  }
903 
904  /*
905  * Time to create a new regular (multi-chunk) block?
906  */
907  if (block == NULL)
908  {
909  Size required_size;
910 
911  /*
912  * The first such block has size initBlockSize, and we double the
913  * space in each succeeding block, but not more than maxBlockSize.
914  */
915  blksize = set->nextBlockSize;
916  set->nextBlockSize <<= 1;
917  if (set->nextBlockSize > set->maxBlockSize)
918  set->nextBlockSize = set->maxBlockSize;
919 
920  /*
921  * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
922  * space... but try to keep it a power of 2.
923  */
924  required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
925  while (blksize < required_size)
926  blksize <<= 1;
927 
928  /* Try to allocate it */
929  block = (AllocBlock) malloc(blksize);
930 
931  /*
932  * We could be asking for pretty big blocks here, so cope if malloc
933  * fails. But give up if there's less than 1 MB or so available...
934  */
935  while (block == NULL && blksize > 1024 * 1024)
936  {
937  blksize >>= 1;
938  if (blksize < required_size)
939  break;
940  block = (AllocBlock) malloc(blksize);
941  }
942 
943  if (block == NULL)
944  return NULL;
945 
946  context->mem_allocated += blksize;
947 
948  block->aset = set;
949  block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
950  block->endptr = ((char *) block) + blksize;
951 
952  /* Mark unallocated space NOACCESS. */
954  blksize - ALLOC_BLOCKHDRSZ);
955 
956  block->prev = NULL;
957  block->next = set->blocks;
958  if (block->next)
959  block->next->prev = block;
960  set->blocks = block;
961  }
962 
963  /*
964  * OK, do the allocation
965  */
966  chunk = (MemoryChunk *) (block->freeptr);
967 
968  /* Prepare to initialize the chunk header. */
970 
971  block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
972  Assert(block->freeptr <= block->endptr);
973 
974  /* store the free list index in the value field */
975  MemoryChunkSetHdrMask(chunk, block, fidx, MCTX_ASET_ID);
976 
977 #ifdef MEMORY_CONTEXT_CHECKING
978  chunk->requested_size = size;
979  /* set mark to catch clobber of "unused" space */
980  if (size < chunk_size)
981  set_sentinel(MemoryChunkGetPointer(chunk), size);
982 #endif
983 #ifdef RANDOMIZE_ALLOCATED_MEMORY
984  /* fill the allocated space with junk */
985  randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
986 #endif
987 
988  /* Ensure any padding bytes are marked NOACCESS. */
989  VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
990  chunk_size - size);
991 
992  /* Disallow external access to private part of chunk header. */
994 
995  return MemoryChunkGetPointer(chunk);
996 }
997 
998 /*
999  * AllocSetFree
1000  * Frees allocated memory; memory is removed from the set.
1001  */
1002 void
1003 AllocSetFree(void *pointer)
1004 {
1005  AllocSet set;
1006  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1007 
1008  /* Allow access to private part of chunk header. */
1010 
1011  if (MemoryChunkIsExternal(chunk))
1012  {
1013  /* Release single-chunk block. */
1014  AllocBlock block = ExternalChunkGetBlock(chunk);
1015 
1016  /*
1017  * Try to verify that we have a sane block pointer: the block header
1018  * should reference an aset and the freeptr should match the endptr.
1019  */
1020  if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
1021  elog(ERROR, "could not find block containing chunk %p", chunk);
1022 
1023  set = block->aset;
1024 
1025 #ifdef MEMORY_CONTEXT_CHECKING
1026  {
1027  Size chunk_size = block->endptr - (char *) pointer;
1028 
1029  /* Test for someone scribbling on unused space in chunk */
1030  Assert(chunk->requested_size < chunk_size);
1031  if (!sentinel_ok(pointer, chunk->requested_size))
1032  elog(WARNING, "detected write past chunk end in %s %p",
1033  set->header.name, chunk);
1034  }
1035 #endif
1036 
1037  /* OK, remove block from aset's list and free it */
1038  if (block->prev)
1039  block->prev->next = block->next;
1040  else
1041  set->blocks = block->next;
1042  if (block->next)
1043  block->next->prev = block->prev;
1044 
1045  set->header.mem_allocated -= block->endptr - ((char *) block);
1046 
1047 #ifdef CLOBBER_FREED_MEMORY
1048  wipe_mem(block, block->freeptr - ((char *) block));
1049 #endif
1050  free(block);
1051  }
1052  else
1053  {
1054  AllocBlock block = MemoryChunkGetBlock(chunk);
1055  int fidx;
1057 
1058  /*
1059  * In this path, for speed reasons we just Assert that the referenced
1060  * block is good. We can also Assert that the value field is sane.
1061  * Future field experience may show that these Asserts had better
1062  * become regular runtime test-and-elog checks.
1063  */
1064  Assert(AllocBlockIsValid(block));
1065  set = block->aset;
1066 
1067  fidx = MemoryChunkGetValue(chunk);
1068  Assert(FreeListIdxIsValid(fidx));
1069  link = GetFreeListLink(chunk);
1070 
1071 #ifdef MEMORY_CONTEXT_CHECKING
1072  /* Test for someone scribbling on unused space in chunk */
1073  if (chunk->requested_size < GetChunkSizeFromFreeListIdx(fidx))
1074  if (!sentinel_ok(pointer, chunk->requested_size))
1075  elog(WARNING, "detected write past chunk end in %s %p",
1076  set->header.name, chunk);
1077 #endif
1078 
1079 #ifdef CLOBBER_FREED_MEMORY
1080  wipe_mem(pointer, GetChunkSizeFromFreeListIdx(fidx));
1081 #endif
1082  /* push this chunk onto the top of the free list */
1084  link->next = set->freelist[fidx];
1086  set->freelist[fidx] = chunk;
1087 
1088 #ifdef MEMORY_CONTEXT_CHECKING
1089 
1090  /*
1091  * Reset requested_size to InvalidAllocSize in chunks that are on free
1092  * list.
1093  */
1094  chunk->requested_size = InvalidAllocSize;
1095 #endif
1096  }
1097 }
1098 
1099 /*
1100  * AllocSetRealloc
1101  * Returns new pointer to allocated memory of given size or NULL if
1102  * request could not be completed; this memory is added to the set.
1103  * Memory associated with given pointer is copied into the new memory,
1104  * and the old memory is freed.
1105  *
1106  * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size. This
1107  * makes our Valgrind client requests less-precise, hazarding false negatives.
1108  * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old
1109  * request size.)
1110  */
1111 void *
1112 AllocSetRealloc(void *pointer, Size size)
1113 {
1114  AllocBlock block;
1115  AllocSet set;
1116  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1117  Size oldsize;
1118  int fidx;
1119 
1120  /* Allow access to private part of chunk header. */
1122 
1123  if (MemoryChunkIsExternal(chunk))
1124  {
1125  /*
1126  * The chunk must have been allocated as a single-chunk block. Use
1127  * realloc() to make the containing block bigger, or smaller, with
1128  * minimum space wastage.
1129  */
1130  Size chksize;
1131  Size blksize;
1132  Size oldblksize;
1133 
1134  block = ExternalChunkGetBlock(chunk);
1135 
1136  /*
1137  * Try to verify that we have a sane block pointer: the block header
1138  * should reference an aset and the freeptr should match the endptr.
1139  */
1140  if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
1141  elog(ERROR, "could not find block containing chunk %p", chunk);
1142 
1143  set = block->aset;
1144 
1145  oldsize = block->endptr - (char *) pointer;
1146 
1147 #ifdef MEMORY_CONTEXT_CHECKING
1148  /* Test for someone scribbling on unused space in chunk */
1149  Assert(chunk->requested_size < oldsize);
1150  if (!sentinel_ok(pointer, chunk->requested_size))
1151  elog(WARNING, "detected write past chunk end in %s %p",
1152  set->header.name, chunk);
1153 #endif
1154 
1155 #ifdef MEMORY_CONTEXT_CHECKING
1156  /* ensure there's always space for the sentinel byte */
1157  chksize = MAXALIGN(size + 1);
1158 #else
1159  chksize = MAXALIGN(size);
1160 #endif
1161 
1162  /* Do the realloc */
1163  blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
1164  oldblksize = block->endptr - ((char *) block);
1165 
1166  block = (AllocBlock) realloc(block, blksize);
1167  if (block == NULL)
1168  {
1169  /* Disallow external access to private part of chunk header. */
1171  return NULL;
1172  }
1173 
1174  /* updated separately, not to underflow when (oldblksize > blksize) */
1175  set->header.mem_allocated -= oldblksize;
1176  set->header.mem_allocated += blksize;
1177 
1178  block->freeptr = block->endptr = ((char *) block) + blksize;
1179 
1180  /* Update pointers since block has likely been moved */
1181  chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
1182  pointer = MemoryChunkGetPointer(chunk);
1183  if (block->prev)
1184  block->prev->next = block;
1185  else
1186  set->blocks = block;
1187  if (block->next)
1188  block->next->prev = block;
1189 
1190 #ifdef MEMORY_CONTEXT_CHECKING
1191 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1192  /* We can only fill the extra space if we know the prior request */
1193  if (size > chunk->requested_size)
1194  randomize_mem((char *) pointer + chunk->requested_size,
1195  size - chunk->requested_size);
1196 #endif
1197 
1198  /*
1199  * realloc() (or randomize_mem()) will have left any newly-allocated
1200  * part UNDEFINED, but we may need to adjust trailing bytes from the
1201  * old allocation.
1202  */
1203 #ifdef USE_VALGRIND
1204  if (oldsize > chunk->requested_size)
1205  VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1206  oldsize - chunk->requested_size);
1207 #endif
1208 
1209  chunk->requested_size = size;
1210  /* set mark to catch clobber of "unused" space */
1211  Assert(size < chksize);
1212  set_sentinel(pointer, size);
1213 #else /* !MEMORY_CONTEXT_CHECKING */
1214 
1215  /*
1216  * We don't know how much of the old chunk size was the actual
1217  * allocation; it could have been as small as one byte. We have to be
1218  * conservative and just mark the entire old portion DEFINED.
1219  */
1220  VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1221 #endif
1222 
1223  /* Ensure any padding bytes are marked NOACCESS. */
1224  VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1225 
1226  /* Disallow external access to private part of chunk header. */
1228 
1229  return pointer;
1230  }
1231 
1232  block = MemoryChunkGetBlock(chunk);
1233 
1234  /*
1235  * In this path, for speed reasons we just Assert that the referenced
1236  * block is good. We can also Assert that the value field is sane. Future
1237  * field experience may show that these Asserts had better become regular
1238  * runtime test-and-elog checks.
1239  */
1240  Assert(AllocBlockIsValid(block));
1241  set = block->aset;
1242 
1243  fidx = MemoryChunkGetValue(chunk);
1244  Assert(FreeListIdxIsValid(fidx));
1245  oldsize = GetChunkSizeFromFreeListIdx(fidx);
1246 
1247 #ifdef MEMORY_CONTEXT_CHECKING
1248  /* Test for someone scribbling on unused space in chunk */
1249  if (chunk->requested_size < oldsize)
1250  if (!sentinel_ok(pointer, chunk->requested_size))
1251  elog(WARNING, "detected write past chunk end in %s %p",
1252  set->header.name, chunk);
1253 #endif
1254 
1255  /*
1256  * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1257  * allocated area already is >= the new size. (In particular, we will
1258  * fall out here if the requested size is a decrease.)
1259  */
1260  if (oldsize >= size)
1261  {
1262 #ifdef MEMORY_CONTEXT_CHECKING
1263  Size oldrequest = chunk->requested_size;
1264 
1265 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1266  /* We can only fill the extra space if we know the prior request */
1267  if (size > oldrequest)
1268  randomize_mem((char *) pointer + oldrequest,
1269  size - oldrequest);
1270 #endif
1271 
1272  chunk->requested_size = size;
1273 
1274  /*
1275  * If this is an increase, mark any newly-available part UNDEFINED.
1276  * Otherwise, mark the obsolete part NOACCESS.
1277  */
1278  if (size > oldrequest)
1279  VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1280  size - oldrequest);
1281  else
1282  VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1283  oldsize - size);
1284 
1285  /* set mark to catch clobber of "unused" space */
1286  if (size < oldsize)
1287  set_sentinel(pointer, size);
1288 #else /* !MEMORY_CONTEXT_CHECKING */
1289 
1290  /*
1291  * We don't have the information to determine whether we're growing
1292  * the old request or shrinking it, so we conservatively mark the
1293  * entire new allocation DEFINED.
1294  */
1295  VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
1296  VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1297 #endif
1298 
1299  /* Disallow external access to private part of chunk header. */
1301 
1302  return pointer;
1303  }
1304  else
1305  {
1306  /*
1307  * Enlarge-a-small-chunk case. We just do this by brute force, ie,
1308  * allocate a new chunk and copy the data. Since we know the existing
1309  * data isn't huge, this won't involve any great memcpy expense, so
1310  * it's not worth being smarter. (At one time we tried to avoid
1311  * memcpy when it was possible to enlarge the chunk in-place, but that
1312  * turns out to misbehave unpleasantly for repeated cycles of
1313  * palloc/repalloc/pfree: the eventually freed chunks go into the
1314  * wrong freelist for the next initial palloc request, and so we leak
1315  * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1316  */
1317  AllocPointer newPointer;
1318 
1319  /* allocate new chunk */
1320  newPointer = AllocSetAlloc((MemoryContext) set, size);
1321 
1322  /* leave immediately if request was not completed */
1323  if (newPointer == NULL)
1324  {
1325  /* Disallow external access to private part of chunk header. */
1327  return NULL;
1328  }
1329 
1330  /*
1331  * AllocSetAlloc() may have returned a region that is still NOACCESS.
1332  * Change it to UNDEFINED for the moment; memcpy() will then transfer
1333  * definedness from the old allocation to the new. If we know the old
1334  * allocation, copy just that much. Otherwise, make the entire old
1335  * chunk defined to avoid errors as we copy the currently-NOACCESS
1336  * trailing bytes.
1337  */
1338  VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
1339 #ifdef MEMORY_CONTEXT_CHECKING
1340  oldsize = chunk->requested_size;
1341 #else
1342  VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1343 #endif
1344 
1345  /* transfer existing data (certain to fit) */
1346  memcpy(newPointer, pointer, oldsize);
1347 
1348  /* free old chunk */
1349  AllocSetFree(pointer);
1350 
1351  return newPointer;
1352  }
1353 }
1354 
1355 /*
1356  * AllocSetGetChunkContext
1357  * Return the MemoryContext that 'pointer' belongs to.
1358  */
1361 {
1362  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1363  AllocBlock block;
1364  AllocSet set;
1365 
1366  if (MemoryChunkIsExternal(chunk))
1367  block = ExternalChunkGetBlock(chunk);
1368  else
1369  block = (AllocBlock) MemoryChunkGetBlock(chunk);
1370 
1371  Assert(AllocBlockIsValid(block));
1372  set = block->aset;
1373 
1374  return &set->header;
1375 }
1376 
1377 /*
1378  * AllocSetGetChunkSpace
1379  * Given a currently-allocated chunk, determine the total space
1380  * it occupies (including all memory-allocation overhead).
1381  */
1382 Size
1384 {
1385  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1386  int fidx;
1387 
1388  if (MemoryChunkIsExternal(chunk))
1389  {
1390  AllocBlock block = ExternalChunkGetBlock(chunk);
1391 
1392  Assert(AllocBlockIsValid(block));
1393  return block->endptr - (char *) chunk;
1394  }
1395 
1396  fidx = MemoryChunkGetValue(chunk);
1397  Assert(FreeListIdxIsValid(fidx));
1399 }
1400 
1401 /*
1402  * AllocSetIsEmpty
1403  * Is an allocset empty of any allocated space?
1404  */
1405 bool
1407 {
1408  Assert(AllocSetIsValid(context));
1409 
1410  /*
1411  * For now, we say "empty" only if the context is new or just reset. We
1412  * could examine the freelists to determine if all space has been freed,
1413  * but it's not really worth the trouble for present uses of this
1414  * functionality.
1415  */
1416  if (context->isReset)
1417  return true;
1418  return false;
1419 }
1420 
1421 /*
1422  * AllocSetStats
1423  * Compute stats about memory consumption of an allocset.
1424  *
1425  * printfunc: if not NULL, pass a human-readable stats string to this.
1426  * passthru: pass this pointer through to printfunc.
1427  * totals: if not NULL, add stats about this context into *totals.
1428  * print_to_stderr: print stats to stderr if true, elog otherwise.
1429  */
1430 void
1432  MemoryStatsPrintFunc printfunc, void *passthru,
1433  MemoryContextCounters *totals, bool print_to_stderr)
1434 {
1435  AllocSet set = (AllocSet) context;
1436  Size nblocks = 0;
1437  Size freechunks = 0;
1438  Size totalspace;
1439  Size freespace = 0;
1440  AllocBlock block;
1441  int fidx;
1442 
1443  Assert(AllocSetIsValid(set));
1444 
1445  /* Include context header in totalspace */
1446  totalspace = MAXALIGN(sizeof(AllocSetContext));
1447 
1448  for (block = set->blocks; block != NULL; block = block->next)
1449  {
1450  nblocks++;
1451  totalspace += block->endptr - ((char *) block);
1452  freespace += block->endptr - block->freeptr;
1453  }
1454  for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1455  {
1456  Size chksz = GetChunkSizeFromFreeListIdx(fidx);
1457  MemoryChunk *chunk = set->freelist[fidx];
1458 
1459  while (chunk != NULL)
1460  {
1462 
1463  Assert(MemoryChunkGetValue(chunk) == fidx);
1464 
1465  freechunks++;
1466  freespace += chksz + ALLOC_CHUNKHDRSZ;
1467 
1469  chunk = link->next;
1471  }
1472  }
1473 
1474  if (printfunc)
1475  {
1476  char stats_string[200];
1477 
1478  snprintf(stats_string, sizeof(stats_string),
1479  "%zu total in %zu blocks; %zu free (%zu chunks); %zu used",
1480  totalspace, nblocks, freespace, freechunks,
1481  totalspace - freespace);
1482  printfunc(context, passthru, stats_string, print_to_stderr);
1483  }
1484 
1485  if (totals)
1486  {
1487  totals->nblocks += nblocks;
1488  totals->freechunks += freechunks;
1489  totals->totalspace += totalspace;
1490  totals->freespace += freespace;
1491  }
1492 }
1493 
1494 
1495 #ifdef MEMORY_CONTEXT_CHECKING
1496 
1497 /*
1498  * AllocSetCheck
1499  * Walk through chunks and check consistency of memory.
1500  *
1501  * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
1502  * find yourself in an infinite loop when trouble occurs, because this
1503  * routine will be entered again when elog cleanup tries to release memory!
1504  */
1505 void
1506 AllocSetCheck(MemoryContext context)
1507 {
1508  AllocSet set = (AllocSet) context;
1509  const char *name = set->header.name;
1510  AllocBlock prevblock;
1511  AllocBlock block;
1512  Size total_allocated = 0;
1513 
1514  for (prevblock = NULL, block = set->blocks;
1515  block != NULL;
1516  prevblock = block, block = block->next)
1517  {
1518  char *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
1519  long blk_used = block->freeptr - bpoz;
1520  long blk_data = 0;
1521  long nchunks = 0;
1522  bool has_external_chunk = false;
1523 
1524  if (set->keeper == block)
1525  total_allocated += block->endptr - ((char *) set);
1526  else
1527  total_allocated += block->endptr - ((char *) block);
1528 
1529  /*
1530  * Empty block - empty can be keeper-block only
1531  */
1532  if (!blk_used)
1533  {
1534  if (set->keeper != block)
1535  elog(WARNING, "problem in alloc set %s: empty block %p",
1536  name, block);
1537  }
1538 
1539  /*
1540  * Check block header fields
1541  */
1542  if (block->aset != set ||
1543  block->prev != prevblock ||
1544  block->freeptr < bpoz ||
1545  block->freeptr > block->endptr)
1546  elog(WARNING, "problem in alloc set %s: corrupt header in block %p",
1547  name, block);
1548 
1549  /*
1550  * Chunk walker
1551  */
1552  while (bpoz < block->freeptr)
1553  {
1554  MemoryChunk *chunk = (MemoryChunk *) bpoz;
1555  Size chsize,
1556  dsize;
1557 
1558  /* Allow access to private part of chunk header. */
1560 
1561  if (MemoryChunkIsExternal(chunk))
1562  {
1563  chsize = block->endptr - (char *) MemoryChunkGetPointer(chunk); /* aligned chunk size */
1564  has_external_chunk = true;
1565 
1566  /* make sure this chunk consumes the entire block */
1567  if (chsize + ALLOC_CHUNKHDRSZ != blk_used)
1568  elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p",
1569  name, chunk, block);
1570  }
1571  else
1572  {
1573  int fidx = MemoryChunkGetValue(chunk);
1574 
1575  if (!FreeListIdxIsValid(fidx))
1576  elog(WARNING, "problem in alloc set %s: bad chunk size for chunk %p in block %p",
1577  name, chunk, block);
1578 
1579  chsize = GetChunkSizeFromFreeListIdx(fidx); /* aligned chunk size */
1580 
1581  /*
1582  * Check the stored block offset correctly references this
1583  * block.
1584  */
1585  if (block != MemoryChunkGetBlock(chunk))
1586  elog(WARNING, "problem in alloc set %s: bad block offset for chunk %p in block %p",
1587  name, chunk, block);
1588  }
1589  dsize = chunk->requested_size; /* real data */
1590 
1591  /* an allocated chunk's requested size must be <= the chsize */
1592  if (dsize != InvalidAllocSize && dsize > chsize)
1593  elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
1594  name, chunk, block);
1595 
1596  /* chsize must not be smaller than the first freelist's size */
1597  if (chsize < (1 << ALLOC_MINBITS))
1598  elog(WARNING, "problem in alloc set %s: bad size %zu for chunk %p in block %p",
1599  name, chsize, chunk, block);
1600 
1601  /*
1602  * Check for overwrite of padding space in an allocated chunk.
1603  */
1604  if (dsize != InvalidAllocSize && dsize < chsize &&
1605  !sentinel_ok(chunk, ALLOC_CHUNKHDRSZ + dsize))
1606  elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
1607  name, block, chunk);
1608 
1609  /*
1610  * If chunk is allocated, disallow external access to private part
1611  * of chunk header.
1612  */
1613  if (dsize != InvalidAllocSize)
1615 
1616  blk_data += chsize;
1617  nchunks++;
1618 
1619  bpoz += ALLOC_CHUNKHDRSZ + chsize;
1620  }
1621 
1622  if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used)
1623  elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p",
1624  name, block);
1625 
1626  if (has_external_chunk && nchunks > 1)
1627  elog(WARNING, "problem in alloc set %s: external chunk on non-dedicated block %p",
1628  name, block);
1629  }
1630 
1631  Assert(total_allocated == context->mem_allocated);
1632 }
1633 
1634 #endif /* MEMORY_CONTEXT_CHECKING */
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:259
void AllocSetReset(MemoryContext context)
Definition: aset.c:541
#define AllocSetIsValid(set)
Definition: aset.c:209
#define AllocBlockIsValid(block)
Definition: aset.c:216
#define GetFreeListLink(chkptr)
Definition: aset.c:132
#define FreeListIdxIsValid(fidx)
Definition: aset.c:136
Size AllocSetGetChunkSpace(void *pointer)
Definition: aset.c:1383
#define ALLOC_CHUNKHDRSZ
Definition: aset.c:105
MemoryContext AllocSetGetChunkContext(void *pointer)
Definition: aset.c:1360
void AllocSetStats(MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
Definition: aset.c:1431
#define GetChunkSizeFromFreeListIdx(fidx)
Definition: aset.c:140
#define ALLOC_MINBITS
Definition: aset.c:83
struct AllocBlockData * AllocBlock
Definition: aset.c:107
#define MAX_FREE_CONTEXTS
Definition: aset.c:250
static int AllocSetFreeIndex(Size size)
Definition: aset.c:279
bool AllocSetIsEmpty(MemoryContext context)
Definition: aset.c:1406
#define ALLOC_BLOCKHDRSZ
Definition: aset.c:104
struct AllocBlockData AllocBlockData
void * AllocSetRealloc(void *pointer, Size size)
Definition: aset.c:1112
void * AllocPointer
Definition: aset.c:113
#define ALLOCSET_NUM_FREELISTS
Definition: aset.c:84
struct AllocSetContext AllocSetContext
#define ALLOC_CHUNK_FRACTION
Definition: aset.c:87
void AllocSetFree(void *pointer)
Definition: aset.c:1003
void AllocSetDelete(MemoryContext context)
Definition: aset.c:611
struct AllocSetFreeList AllocSetFreeList
#define ALLOC_CHUNK_LIMIT
Definition: aset.c:85
struct AllocFreeListLink AllocFreeListLink
static AllocSetFreeList context_freelists[2]
Definition: aset.c:259
#define ALLOCCHUNK_PRIVATE_LEN
Definition: aset.c:197
#define ExternalChunkGetBlock(chunk)
Definition: aset.c:224
void * AllocSetAlloc(MemoryContext context, Size size)
Definition: aset.c:707
MemoryContext AllocSetContextCreateInternal(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition: aset.c:349
AllocSetContext * AllocSet
Definition: aset.c:168
static int32 next
Definition: blutils.c:219
unsigned int uint32
Definition: c.h:442
#define MAXALIGN(LEN)
Definition: c.h:747
#define Max(x, y)
Definition: c.h:931
#define MemSetAligned(start, val, len)
Definition: c.h:983
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:869
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:166
size_t Size
Definition: c.h:541
int errdetail(const char *fmt,...)
Definition: elog.c:1202
int errcode(int sqlerrcode)
Definition: elog.c:858
int errmsg(const char *fmt,...)
Definition: elog.c:1069
#define WARNING
Definition: elog.h:36
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
const char * name
Definition: encode.c:561
#define realloc(a, b)
Definition: header.h:60
#define free(a)
Definition: header.h:65
#define malloc(a)
Definition: header.h:50
Assert(fmt[strlen(fmt) - 1] !='\n')
void MemoryContextCreate(MemoryContext node, NodeTag tag, MemoryContextMethodID method_id, MemoryContext parent, const char *name)
Definition: mcxt.c:946
MemoryContext TopMemoryContext
Definition: mcxt.c:130
void MemoryContextStats(MemoryContext context)
Definition: mcxt.c:672
void MemoryContextResetOnly(MemoryContext context)
Definition: mcxt.c:322
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition: memdebug.h:27
#define VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
Definition: memdebug.h:28
void(* MemoryStatsPrintFunc)(MemoryContext context, void *passthru, const char *stats_string, bool print_to_stderr)
Definition: memnodes.h:54
#define ALLOCSET_SMALL_MINSIZE
Definition: memutils.h:160
#define ALLOCSET_DEFAULT_MINSIZE
Definition: memutils.h:150
#define AllocHugeSizeIsValid(size)
Definition: memutils.h:49
#define InvalidAllocSize
Definition: memutils.h:47
#define ALLOCSET_SEPARATE_THRESHOLD
Definition: memutils.h:180
#define ALLOCSET_SMALL_INITSIZE
Definition: memutils.h:161
#define ALLOCSET_DEFAULT_INITSIZE
Definition: memutils.h:151
@ MCTX_ASET_ID
#define MEMORYCHUNK_MAX_BLOCKOFFSET
static Size MemoryChunkGetValue(MemoryChunk *chunk)
#define MemoryChunkGetPointer(c)
static bool MemoryChunkIsExternal(MemoryChunk *chunk)
static void MemoryChunkSetHdrMaskExternal(MemoryChunk *chunk, MemoryContextMethodID methodid)
static void * MemoryChunkGetBlock(MemoryChunk *chunk)
#define PointerGetMemoryChunk(p)
static void MemoryChunkSetHdrMask(MemoryChunk *chunk, void *block, Size value, MemoryContextMethodID methodid)
struct MemoryContextData * MemoryContext
Definition: palloc.h:36
PGDLLIMPORT const uint8 pg_leftmost_one_pos[256]
Definition: pg_bitutils.c:34
#define snprintf
Definition: port.h:238
AllocBlock prev
Definition: aset.c:185
AllocSet aset
Definition: aset.c:184
char * freeptr
Definition: aset.c:187
AllocBlock next
Definition: aset.c:186
char * endptr
Definition: aset.c:188
MemoryContextData header
Definition: aset.c:154
int freeListIndex
Definition: aset.c:165
AllocBlock blocks
Definition: aset.c:156
Size maxBlockSize
Definition: aset.c:160
AllocBlock keeper
Definition: aset.c:163
MemoryChunk * freelist[ALLOCSET_NUM_FREELISTS]
Definition: aset.c:157
Size initBlockSize
Definition: aset.c:159
Size nextBlockSize
Definition: aset.c:161
Size allocChunkLimit
Definition: aset.c:162
int num_free
Definition: aset.c:254
AllocSetContext * first_free
Definition: aset.c:255
MemoryContext nextchild
Definition: memnodes.h:92
Size mem_allocated
Definition: memnodes.h:87
const char * name
Definition: memnodes.h:93