PostgreSQL Source Code  git master
aset.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * aset.c
4  * Allocation set definitions.
5  *
6  * AllocSet is our standard implementation of the abstract MemoryContext
7  * type.
8  *
9  *
10  * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
11  * Portions Copyright (c) 1994, Regents of the University of California
12  *
13  * IDENTIFICATION
14  * src/backend/utils/mmgr/aset.c
15  *
16  * NOTE:
17  * This is a new (Feb. 05, 1999) implementation of the allocation set
18  * routines. AllocSet...() does not use OrderedSet...() any more.
19  * Instead it manages allocations in a block pool by itself, combining
20  * many small allocations in a few bigger blocks. AllocSetFree() normally
21  * doesn't free() memory really. It just add's the free'd area to some
22  * list for later reuse by AllocSetAlloc(). All memory blocks are free()'d
23  * at once on AllocSetReset(), which happens when the memory context gets
24  * destroyed.
25  * Jan Wieck
26  *
27  * Performance improvement from Tom Lane, 8/99: for extremely large request
28  * sizes, we do want to be able to give the memory back to free() as soon
29  * as it is pfree()'d. Otherwise we risk tying up a lot of memory in
30  * freelist entries that might never be usable. This is specially needed
31  * when the caller is repeatedly repalloc()'ing a block bigger and bigger;
32  * the previous instances of the block were guaranteed to be wasted until
33  * AllocSetReset() under the old way.
34  *
35  * Further improvement 12/00: as the code stood, request sizes in the
36  * midrange between "small" and "large" were handled very inefficiently,
37  * because any sufficiently large free chunk would be used to satisfy a
38  * request, even if it was much larger than necessary. This led to more
39  * and more wasted space in allocated chunks over time. To fix, get rid
40  * of the midrange behavior: we now handle only "small" power-of-2-size
41  * chunks as chunks. Anything "large" is passed off to malloc(). Change
42  * the number of freelists to change the small/large boundary.
43  *
44  *-------------------------------------------------------------------------
45  */
46 
47 #include "postgres.h"
48 
49 #include "port/pg_bitutils.h"
50 #include "utils/memdebug.h"
51 #include "utils/memutils.h"
54 
55 /*--------------------
56  * Chunk freelist k holds chunks of size 1 << (k + ALLOC_MINBITS),
57  * for k = 0 .. ALLOCSET_NUM_FREELISTS-1.
58  *
59  * Note that all chunks in the freelists have power-of-2 sizes. This
60  * improves recyclability: we may waste some space, but the wasted space
61  * should stay pretty constant as requests are made and released.
62  *
63  * A request too large for the last freelist is handled by allocating a
64  * dedicated block from malloc(). The block still has a block header and
65  * chunk header, but when the chunk is freed we'll return the whole block
66  * to malloc(), not put it on our freelists.
67  *
68  * CAUTION: ALLOC_MINBITS must be large enough so that
69  * 1<<ALLOC_MINBITS is at least MAXALIGN,
70  * or we may fail to align the smallest chunks adequately.
71  * 8-byte alignment is enough on all currently known machines. This 8-byte
72  * minimum also allows us to store a pointer to the next freelist item within
73  * the chunk of memory itself.
74  *
75  * With the current parameters, request sizes up to 8K are treated as chunks,
76  * larger requests go into dedicated blocks. Change ALLOCSET_NUM_FREELISTS
77  * to adjust the boundary point; and adjust ALLOCSET_SEPARATE_THRESHOLD in
78  * memutils.h to agree. (Note: in contexts with small maxBlockSize, we may
79  * set the allocChunkLimit to less than 8K, so as to avoid space wastage.)
80  *--------------------
81  */
82 
83 #define ALLOC_MINBITS 3 /* smallest chunk size is 8 bytes */
84 #define ALLOCSET_NUM_FREELISTS 11
85 #define ALLOC_CHUNK_LIMIT (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
86 /* Size of largest chunk that we use a fixed size for */
87 #define ALLOC_CHUNK_FRACTION 4
88 /* We allow chunks to be at most 1/4 of maxBlockSize (less overhead) */
89 
90 /*--------------------
91  * The first block allocated for an allocset has size initBlockSize.
92  * Each time we have to allocate another block, we double the block size
93  * (if possible, and without exceeding maxBlockSize), so as to reduce
94  * the bookkeeping load on malloc().
95  *
96  * Blocks allocated to hold oversize chunks do not follow this rule, however;
97  * they are just however big they need to be to hold that single chunk.
98  *
99  * Also, if a minContextSize is specified, the first block has that size,
100  * and then initBlockSize is used for the next one.
101  *--------------------
102  */
103 
104 #define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData))
105 #define ALLOC_CHUNKHDRSZ sizeof(MemoryChunk)
106 
107 typedef struct AllocBlockData *AllocBlock; /* forward reference */
108 
109 /*
110  * AllocPointer
111  * Aligned pointer which may be a member of an allocation set.
112  */
113 typedef void *AllocPointer;
114 
115 /*
116  * AllocFreeListLink
117  * When pfreeing memory, if we maintain a freelist for the given chunk's
118  * size then we use a AllocFreeListLink to point to the current item in
119  * the AllocSetContext's freelist and then set the given freelist element
120  * to point to the chunk being freed.
121  */
122 typedef struct AllocFreeListLink
123 {
126 
127 /*
128  * Obtain a AllocFreeListLink for the given chunk. Allocation sizes are
129  * always at least sizeof(AllocFreeListLink), so we reuse the pointer's memory
130  * itself to store the freelist link.
131  */
132 #define GetFreeListLink(chkptr) \
133  (AllocFreeListLink *) ((char *) (chkptr) + ALLOC_CHUNKHDRSZ)
134 
135 /* Validate a freelist index retrieved from a chunk header */
136 #define FreeListIdxIsValid(fidx) \
137  ((fidx) >= 0 && (fidx) < ALLOCSET_NUM_FREELISTS)
138 
139 /* Determine the size of the chunk based on the freelist index */
140 #define GetChunkSizeFromFreeListIdx(fidx) \
141  ((((Size) 1) << ALLOC_MINBITS) << (fidx))
142 
143 /*
144  * AllocSetContext is our standard implementation of MemoryContext.
145  *
146  * Note: header.isReset means there is nothing for AllocSetReset to do.
147  * This is different from the aset being physically empty (empty blocks list)
148  * because we will still have a keeper block. It's also different from the set
149  * being logically empty, because we don't attempt to detect pfree'ing the
150  * last active chunk.
151  */
152 typedef struct AllocSetContext
153 {
154  MemoryContextData header; /* Standard memory-context fields */
155  /* Info about storage allocated in this context: */
156  AllocBlock blocks; /* head of list of blocks in this set */
157  MemoryChunk *freelist[ALLOCSET_NUM_FREELISTS]; /* free chunk lists */
158  /* Allocation parameters for this context: */
159  uint32 initBlockSize; /* initial block size */
160  uint32 maxBlockSize; /* maximum block size */
161  uint32 nextBlockSize; /* next block size to allocate */
162  uint32 allocChunkLimit; /* effective chunk size limit */
163  /* freelist this context could be put in, or -1 if not a candidate: */
164  int freeListIndex; /* index in context_freelists[], or -1 */
166 
168 
169 /*
170  * AllocBlock
171  * An AllocBlock is the unit of memory that is obtained by aset.c
172  * from malloc(). It contains one or more MemoryChunks, which are
173  * the units requested by palloc() and freed by pfree(). MemoryChunks
174  * cannot be returned to malloc() individually, instead they are put
175  * on freelists by pfree() and re-used by the next palloc() that has
176  * a matching request size.
177  *
178  * AllocBlockData is the header data for a block --- the usable space
179  * within the block begins at the next alignment boundary.
180  */
181 typedef struct AllocBlockData
182 {
183  AllocSet aset; /* aset that owns this block */
184  AllocBlock prev; /* prev block in aset's blocks list, if any */
185  AllocBlock next; /* next block in aset's blocks list, if any */
186  char *freeptr; /* start of free space in this block */
187  char *endptr; /* end of space in this block */
189 
190 /*
191  * AllocPointerIsValid
192  * True iff pointer is valid allocation pointer.
193  */
194 #define AllocPointerIsValid(pointer) PointerIsValid(pointer)
195 
196 /*
197  * AllocSetIsValid
198  * True iff set is valid allocation set.
199  */
200 #define AllocSetIsValid(set) \
201  (PointerIsValid(set) && IsA(set, AllocSetContext))
202 
203 /*
204  * AllocBlockIsValid
205  * True iff block is valid block of allocation set.
206  */
207 #define AllocBlockIsValid(block) \
208  (PointerIsValid(block) && AllocSetIsValid((block)->aset))
209 
210 /*
211  * We always store external chunks on a dedicated block. This makes fetching
212  * the block from an external chunk easy since it's always the first and only
213  * chunk on the block.
214  */
215 #define ExternalChunkGetBlock(chunk) \
216  (AllocBlock) ((char *) chunk - ALLOC_BLOCKHDRSZ)
217 
218 /*
219  * Rather than repeatedly creating and deleting memory contexts, we keep some
220  * freed contexts in freelists so that we can hand them out again with little
221  * work. Before putting a context in a freelist, we reset it so that it has
222  * only its initial malloc chunk and no others. To be a candidate for a
223  * freelist, a context must have the same minContextSize/initBlockSize as
224  * other contexts in the list; but its maxBlockSize is irrelevant since that
225  * doesn't affect the size of the initial chunk.
226  *
227  * We currently provide one freelist for ALLOCSET_DEFAULT_SIZES contexts
228  * and one for ALLOCSET_SMALL_SIZES contexts; the latter works for
229  * ALLOCSET_START_SMALL_SIZES too, since only the maxBlockSize differs.
230  *
231  * Ordinarily, we re-use freelist contexts in last-in-first-out order, in
232  * hopes of improving locality of reference. But if there get to be too
233  * many contexts in the list, we'd prefer to drop the most-recently-created
234  * contexts in hopes of keeping the process memory map compact.
235  * We approximate that by simply deleting all existing entries when the list
236  * overflows, on the assumption that queries that allocate a lot of contexts
237  * will probably free them in more or less reverse order of allocation.
238  *
239  * Contexts in a freelist are chained via their nextchild pointers.
240  */
241 #define MAX_FREE_CONTEXTS 100 /* arbitrary limit on freelist length */
242 
243 /* Obtain the keeper block for an allocation set */
244 #define KeeperBlock(set) \
245  ((AllocBlock) (((char *) set) + MAXALIGN(sizeof(AllocSetContext))))
246 
247 /* Check if the block is the keeper block of the given allocation set */
248 #define IsKeeperBlock(set, block) ((block) == (KeeperBlock(set)))
249 
250 typedef struct AllocSetFreeList
251 {
252  int num_free; /* current list length */
253  AllocSetContext *first_free; /* list header */
255 
256 /* context_freelists[0] is for default params, [1] for small params */
258 {
259  {
260  0, NULL
261  },
262  {
263  0, NULL
264  }
265 };
266 
267 
268 /* ----------
269  * AllocSetFreeIndex -
270  *
271  * Depending on the size of an allocation compute which freechunk
272  * list of the alloc set it belongs to. Caller must have verified
273  * that size <= ALLOC_CHUNK_LIMIT.
274  * ----------
275  */
276 static inline int
278 {
279  int idx;
280 
281  if (size > (1 << ALLOC_MINBITS))
282  {
283  /*----------
284  * At this point we must compute ceil(log2(size >> ALLOC_MINBITS)).
285  * This is the same as
286  * pg_leftmost_one_pos32((size - 1) >> ALLOC_MINBITS) + 1
287  * or equivalently
288  * pg_leftmost_one_pos32(size - 1) - ALLOC_MINBITS + 1
289  *
290  * However, for platforms without intrinsic support, we duplicate the
291  * logic here, allowing an additional optimization. It's reasonable
292  * to assume that ALLOC_CHUNK_LIMIT fits in 16 bits, so we can unroll
293  * the byte-at-a-time loop in pg_leftmost_one_pos32 and just handle
294  * the last two bytes.
295  *
296  * Yes, this function is enough of a hot-spot to make it worth this
297  * much trouble.
298  *----------
299  */
300 #ifdef HAVE_BITSCAN_REVERSE
301  idx = pg_leftmost_one_pos32((uint32) size - 1) - ALLOC_MINBITS + 1;
302 #else
303  uint32 t,
304  tsize;
305 
306  /* Statically assert that we only have a 16-bit input value. */
308  "ALLOC_CHUNK_LIMIT must be less than 64kB");
309 
310  tsize = size - 1;
311  t = tsize >> 8;
312  idx = t ? pg_leftmost_one_pos[t] + 8 : pg_leftmost_one_pos[tsize];
313  idx -= ALLOC_MINBITS - 1;
314 #endif
315 
317  }
318  else
319  idx = 0;
320 
321  return idx;
322 }
323 
324 
325 /*
326  * Public routines
327  */
328 
329 
330 /*
331  * AllocSetContextCreateInternal
332  * Create a new AllocSet context.
333  *
334  * parent: parent context, or NULL if top-level context
335  * name: name of context (must be statically allocated)
336  * minContextSize: minimum context size
337  * initBlockSize: initial allocation block size
338  * maxBlockSize: maximum allocation block size
339  *
340  * Most callers should abstract the context size parameters using a macro
341  * such as ALLOCSET_DEFAULT_SIZES.
342  *
343  * Note: don't call this directly; go through the wrapper macro
344  * AllocSetContextCreate.
345  */
348  const char *name,
349  Size minContextSize,
350  Size initBlockSize,
351  Size maxBlockSize)
352 {
353  int freeListIndex;
354  Size firstBlockSize;
355  AllocSet set;
356  AllocBlock block;
357 
358  /* ensure MemoryChunk's size is properly maxaligned */
360  "sizeof(MemoryChunk) is not maxaligned");
361  /* check we have enough space to store the freelist link */
363  "sizeof(AllocFreeListLink) larger than minimum allocation size");
364 
365  /*
366  * First, validate allocation parameters. Once these were regular runtime
367  * tests and elog's, but in practice Asserts seem sufficient because
368  * nobody varies their parameters at runtime. We somewhat arbitrarily
369  * enforce a minimum 1K block size. We restrict the maximum block size to
370  * MEMORYCHUNK_MAX_BLOCKOFFSET as MemoryChunks are limited to this in
371  * regards to addressing the offset between the chunk and the block that
372  * the chunk is stored on. We would be unable to store the offset between
373  * the chunk and block for any chunks that were beyond
374  * MEMORYCHUNK_MAX_BLOCKOFFSET bytes into the block if the block was to be
375  * larger than this.
376  */
377  Assert(initBlockSize == MAXALIGN(initBlockSize) &&
378  initBlockSize >= 1024);
379  Assert(maxBlockSize == MAXALIGN(maxBlockSize) &&
380  maxBlockSize >= initBlockSize &&
381  AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
382  Assert(minContextSize == 0 ||
383  (minContextSize == MAXALIGN(minContextSize) &&
384  minContextSize >= 1024 &&
385  minContextSize <= maxBlockSize));
386  Assert(maxBlockSize <= MEMORYCHUNK_MAX_BLOCKOFFSET);
387 
388  /*
389  * Check whether the parameters match either available freelist. We do
390  * not need to demand a match of maxBlockSize.
391  */
392  if (minContextSize == ALLOCSET_DEFAULT_MINSIZE &&
393  initBlockSize == ALLOCSET_DEFAULT_INITSIZE)
394  freeListIndex = 0;
395  else if (minContextSize == ALLOCSET_SMALL_MINSIZE &&
396  initBlockSize == ALLOCSET_SMALL_INITSIZE)
397  freeListIndex = 1;
398  else
399  freeListIndex = -1;
400 
401  /*
402  * If a suitable freelist entry exists, just recycle that context.
403  */
404  if (freeListIndex >= 0)
405  {
406  AllocSetFreeList *freelist = &context_freelists[freeListIndex];
407 
408  if (freelist->first_free != NULL)
409  {
410  /* Remove entry from freelist */
411  set = freelist->first_free;
412  freelist->first_free = (AllocSet) set->header.nextchild;
413  freelist->num_free--;
414 
415  /* Update its maxBlockSize; everything else should be OK */
416  set->maxBlockSize = maxBlockSize;
417 
418  /* Reinitialize its header, installing correct name and parent */
420  T_AllocSetContext,
421  MCTX_ASET_ID,
422  parent,
423  name);
424 
425  ((MemoryContext) set)->mem_allocated =
426  KeeperBlock(set)->endptr - ((char *) set);
427 
428  return (MemoryContext) set;
429  }
430  }
431 
432  /* Determine size of initial block */
433  firstBlockSize = MAXALIGN(sizeof(AllocSetContext)) +
435  if (minContextSize != 0)
436  firstBlockSize = Max(firstBlockSize, minContextSize);
437  else
438  firstBlockSize = Max(firstBlockSize, initBlockSize);
439 
440  /*
441  * Allocate the initial block. Unlike other aset.c blocks, it starts with
442  * the context header and its block header follows that.
443  */
444  set = (AllocSet) malloc(firstBlockSize);
445  if (set == NULL)
446  {
447  if (TopMemoryContext)
449  ereport(ERROR,
450  (errcode(ERRCODE_OUT_OF_MEMORY),
451  errmsg("out of memory"),
452  errdetail("Failed while creating memory context \"%s\".",
453  name)));
454  }
455 
456  /*
457  * Avoid writing code that can fail between here and MemoryContextCreate;
458  * we'd leak the header/initial block if we ereport in this stretch.
459  */
460 
461  /* Fill in the initial block's block header */
462  block = KeeperBlock(set);
463  block->aset = set;
464  block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
465  block->endptr = ((char *) set) + firstBlockSize;
466  block->prev = NULL;
467  block->next = NULL;
468 
469  /* Mark unallocated space NOACCESS; leave the block header alone. */
470  VALGRIND_MAKE_MEM_NOACCESS(block->freeptr, block->endptr - block->freeptr);
471 
472  /* Remember block as part of block list */
473  set->blocks = block;
474 
475  /* Finish filling in aset-specific parts of the context header */
476  MemSetAligned(set->freelist, 0, sizeof(set->freelist));
477 
478  set->initBlockSize = (uint32) initBlockSize;
479  set->maxBlockSize = (uint32) maxBlockSize;
480  set->nextBlockSize = (uint32) initBlockSize;
481  set->freeListIndex = freeListIndex;
482 
483  /*
484  * Compute the allocation chunk size limit for this context. It can't be
485  * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
486  * If maxBlockSize is small then requests exceeding the maxBlockSize, or
487  * even a significant fraction of it, should be treated as large chunks
488  * too. For the typical case of maxBlockSize a power of 2, the chunk size
489  * limit will be at most 1/8th maxBlockSize, so that given a stream of
490  * requests that are all the maximum chunk size we will waste at most
491  * 1/8th of the allocated space.
492  *
493  * Also, allocChunkLimit must not exceed ALLOCSET_SEPARATE_THRESHOLD.
494  */
496  "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD");
497 
498  /*
499  * Determine the maximum size that a chunk can be before we allocate an
500  * entire AllocBlock dedicated for that chunk. We set the absolute limit
501  * of that size as ALLOC_CHUNK_LIMIT but we reduce it further so that we
502  * can fit about ALLOC_CHUNK_FRACTION chunks this size on a maximally
503  * sized block. (We opt to keep allocChunkLimit a power-of-2 value
504  * primarily for legacy reasons rather than calculating it so that exactly
505  * ALLOC_CHUNK_FRACTION chunks fit on a maximally sized block.)
506  */
508  while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
509  (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
510  set->allocChunkLimit >>= 1;
511 
512  /* Finally, do the type-independent part of context creation */
514  T_AllocSetContext,
515  MCTX_ASET_ID,
516  parent,
517  name);
518 
519  ((MemoryContext) set)->mem_allocated = firstBlockSize;
520 
521  return (MemoryContext) set;
522 }
523 
524 /*
525  * AllocSetReset
526  * Frees all memory which is allocated in the given set.
527  *
528  * Actually, this routine has some discretion about what to do.
529  * It should mark all allocated chunks freed, but it need not necessarily
530  * give back all the resources the set owns. Our actual implementation is
531  * that we give back all but the "keeper" block (which we must keep, since
532  * it shares a malloc chunk with the context header). In this way, we don't
533  * thrash malloc() when a context is repeatedly reset after small allocations,
534  * which is typical behavior for per-tuple contexts.
535  */
536 void
538 {
539  AllocSet set = (AllocSet) context;
540  AllocBlock block;
541  Size keepersize PG_USED_FOR_ASSERTS_ONLY;
542 
543  Assert(AllocSetIsValid(set));
544 
545 #ifdef MEMORY_CONTEXT_CHECKING
546  /* Check for corruption and leaks before freeing */
547  AllocSetCheck(context);
548 #endif
549 
550  /* Remember keeper block size for Assert below */
551  keepersize = KeeperBlock(set)->endptr - ((char *) set);
552 
553  /* Clear chunk freelists */
554  MemSetAligned(set->freelist, 0, sizeof(set->freelist));
555 
556  block = set->blocks;
557 
558  /* New blocks list will be just the keeper block */
559  set->blocks = KeeperBlock(set);
560 
561  while (block != NULL)
562  {
563  AllocBlock next = block->next;
564 
565  if (IsKeeperBlock(set, block))
566  {
567  /* Reset the block, but don't return it to malloc */
568  char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
569 
570 #ifdef CLOBBER_FREED_MEMORY
571  wipe_mem(datastart, block->freeptr - datastart);
572 #else
573  /* wipe_mem() would have done this */
574  VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
575 #endif
576  block->freeptr = datastart;
577  block->prev = NULL;
578  block->next = NULL;
579  }
580  else
581  {
582  /* Normal case, release the block */
583  context->mem_allocated -= block->endptr - ((char *) block);
584 
585 #ifdef CLOBBER_FREED_MEMORY
586  wipe_mem(block, block->freeptr - ((char *) block));
587 #endif
588  free(block);
589  }
590  block = next;
591  }
592 
593  Assert(context->mem_allocated == keepersize);
594 
595  /* Reset block size allocation sequence, too */
596  set->nextBlockSize = set->initBlockSize;
597 }
598 
599 /*
600  * AllocSetDelete
601  * Frees all memory which is allocated in the given set,
602  * in preparation for deletion of the set.
603  *
604  * Unlike AllocSetReset, this *must* free all resources of the set.
605  */
606 void
608 {
609  AllocSet set = (AllocSet) context;
610  AllocBlock block = set->blocks;
611  Size keepersize PG_USED_FOR_ASSERTS_ONLY;
612 
613  Assert(AllocSetIsValid(set));
614 
615 #ifdef MEMORY_CONTEXT_CHECKING
616  /* Check for corruption and leaks before freeing */
617  AllocSetCheck(context);
618 #endif
619 
620  /* Remember keeper block size for Assert below */
621  keepersize = KeeperBlock(set)->endptr - ((char *) set);
622 
623  /*
624  * If the context is a candidate for a freelist, put it into that freelist
625  * instead of destroying it.
626  */
627  if (set->freeListIndex >= 0)
628  {
630 
631  /*
632  * Reset the context, if it needs it, so that we aren't hanging on to
633  * more than the initial malloc chunk.
634  */
635  if (!context->isReset)
636  MemoryContextResetOnly(context);
637 
638  /*
639  * If the freelist is full, just discard what's already in it. See
640  * comments with context_freelists[].
641  */
642  if (freelist->num_free >= MAX_FREE_CONTEXTS)
643  {
644  while (freelist->first_free != NULL)
645  {
646  AllocSetContext *oldset = freelist->first_free;
647 
648  freelist->first_free = (AllocSetContext *) oldset->header.nextchild;
649  freelist->num_free--;
650 
651  /* All that remains is to free the header/initial block */
652  free(oldset);
653  }
654  Assert(freelist->num_free == 0);
655  }
656 
657  /* Now add the just-deleted context to the freelist. */
658  set->header.nextchild = (MemoryContext) freelist->first_free;
659  freelist->first_free = set;
660  freelist->num_free++;
661 
662  return;
663  }
664 
665  /* Free all blocks, except the keeper which is part of context header */
666  while (block != NULL)
667  {
668  AllocBlock next = block->next;
669 
670  if (!IsKeeperBlock(set, block))
671  context->mem_allocated -= block->endptr - ((char *) block);
672 
673 #ifdef CLOBBER_FREED_MEMORY
674  wipe_mem(block, block->freeptr - ((char *) block));
675 #endif
676 
677  if (!IsKeeperBlock(set, block))
678  free(block);
679 
680  block = next;
681  }
682 
683  Assert(context->mem_allocated == keepersize);
684 
685  /* Finally, free the context header, including the keeper block */
686  free(set);
687 }
688 
689 /*
690  * AllocSetAlloc
691  * Returns pointer to allocated memory of given size or NULL if
692  * request could not be completed; memory is added to the set.
693  *
694  * No request may exceed:
695  * MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
696  * All callers use a much-lower limit.
697  *
698  * Note: when using valgrind, it doesn't matter how the returned allocation
699  * is marked, as mcxt.c will set it to UNDEFINED. In some paths we will
700  * return space that is marked NOACCESS - AllocSetRealloc has to beware!
701  */
702 void *
704 {
705  AllocSet set = (AllocSet) context;
706  AllocBlock block;
707  MemoryChunk *chunk;
708  int fidx;
709  Size chunk_size;
710  Size blksize;
711 
712  Assert(AllocSetIsValid(set));
713 
714  /*
715  * If requested size exceeds maximum for chunks, allocate an entire block
716  * for this request.
717  */
718  if (size > set->allocChunkLimit)
719  {
720 #ifdef MEMORY_CONTEXT_CHECKING
721  /* ensure there's always space for the sentinel byte */
722  chunk_size = MAXALIGN(size + 1);
723 #else
724  chunk_size = MAXALIGN(size);
725 #endif
726 
727  blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
728  block = (AllocBlock) malloc(blksize);
729  if (block == NULL)
730  return NULL;
731 
732  context->mem_allocated += blksize;
733 
734  block->aset = set;
735  block->freeptr = block->endptr = ((char *) block) + blksize;
736 
737  chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
738 
739  /* mark the MemoryChunk as externally managed */
741 
742 #ifdef MEMORY_CONTEXT_CHECKING
743  chunk->requested_size = size;
744  /* set mark to catch clobber of "unused" space */
745  Assert(size < chunk_size);
746  set_sentinel(MemoryChunkGetPointer(chunk), size);
747 #endif
748 #ifdef RANDOMIZE_ALLOCATED_MEMORY
749  /* fill the allocated space with junk */
750  randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
751 #endif
752 
753  /*
754  * Stick the new block underneath the active allocation block, if any,
755  * so that we don't lose the use of the space remaining therein.
756  */
757  if (set->blocks != NULL)
758  {
759  block->prev = set->blocks;
760  block->next = set->blocks->next;
761  if (block->next)
762  block->next->prev = block;
763  set->blocks->next = block;
764  }
765  else
766  {
767  block->prev = NULL;
768  block->next = NULL;
769  set->blocks = block;
770  }
771 
772  /* Ensure any padding bytes are marked NOACCESS. */
773  VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
774  chunk_size - size);
775 
776  /* Disallow access to the chunk header. */
778 
779  return MemoryChunkGetPointer(chunk);
780  }
781 
782  /*
783  * Request is small enough to be treated as a chunk. Look in the
784  * corresponding free list to see if there is a free chunk we could reuse.
785  * If one is found, remove it from the free list, make it again a member
786  * of the alloc set and return its data address.
787  *
788  * Note that we don't attempt to ensure there's space for the sentinel
789  * byte here. We expect a large proportion of allocations to be for sizes
790  * which are already a power of 2. If we were to always make space for a
791  * sentinel byte in MEMORY_CONTEXT_CHECKING builds, then we'd end up
792  * doubling the memory requirements for such allocations.
793  */
794  fidx = AllocSetFreeIndex(size);
795  chunk = set->freelist[fidx];
796  if (chunk != NULL)
797  {
799 
800  /* Allow access to the chunk header. */
802 
803  Assert(fidx == MemoryChunkGetValue(chunk));
804 
805  /* pop this chunk off the freelist */
807  set->freelist[fidx] = link->next;
809 
810 #ifdef MEMORY_CONTEXT_CHECKING
811  chunk->requested_size = size;
812  /* set mark to catch clobber of "unused" space */
813  if (size < GetChunkSizeFromFreeListIdx(fidx))
814  set_sentinel(MemoryChunkGetPointer(chunk), size);
815 #endif
816 #ifdef RANDOMIZE_ALLOCATED_MEMORY
817  /* fill the allocated space with junk */
818  randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
819 #endif
820 
821  /* Ensure any padding bytes are marked NOACCESS. */
822  VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
823  GetChunkSizeFromFreeListIdx(fidx) - size);
824 
825  /* Disallow access to the chunk header. */
827 
828  return MemoryChunkGetPointer(chunk);
829  }
830 
831  /*
832  * Choose the actual chunk size to allocate.
833  */
834  chunk_size = GetChunkSizeFromFreeListIdx(fidx);
835  Assert(chunk_size >= size);
836 
837  /*
838  * If there is enough room in the active allocation block, we will put the
839  * chunk into that block. Else must start a new one.
840  */
841  if ((block = set->blocks) != NULL)
842  {
843  Size availspace = block->endptr - block->freeptr;
844 
845  if (availspace < (chunk_size + ALLOC_CHUNKHDRSZ))
846  {
847  /*
848  * The existing active (top) block does not have enough room for
849  * the requested allocation, but it might still have a useful
850  * amount of space in it. Once we push it down in the block list,
851  * we'll never try to allocate more space from it. So, before we
852  * do that, carve up its free space into chunks that we can put on
853  * the set's freelists.
854  *
855  * Because we can only get here when there's less than
856  * ALLOC_CHUNK_LIMIT left in the block, this loop cannot iterate
857  * more than ALLOCSET_NUM_FREELISTS-1 times.
858  */
859  while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
860  {
862  Size availchunk = availspace - ALLOC_CHUNKHDRSZ;
863  int a_fidx = AllocSetFreeIndex(availchunk);
864 
865  /*
866  * In most cases, we'll get back the index of the next larger
867  * freelist than the one we need to put this chunk on. The
868  * exception is when availchunk is exactly a power of 2.
869  */
870  if (availchunk != GetChunkSizeFromFreeListIdx(a_fidx))
871  {
872  a_fidx--;
873  Assert(a_fidx >= 0);
874  availchunk = GetChunkSizeFromFreeListIdx(a_fidx);
875  }
876 
877  chunk = (MemoryChunk *) (block->freeptr);
878 
879  /* Prepare to initialize the chunk header. */
881  block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
882  availspace -= (availchunk + ALLOC_CHUNKHDRSZ);
883 
884  /* store the freelist index in the value field */
885  MemoryChunkSetHdrMask(chunk, block, a_fidx, MCTX_ASET_ID);
886 #ifdef MEMORY_CONTEXT_CHECKING
887  chunk->requested_size = InvalidAllocSize; /* mark it free */
888 #endif
889  /* push this chunk onto the free list */
890  link = GetFreeListLink(chunk);
891 
893  link->next = set->freelist[a_fidx];
895 
896  set->freelist[a_fidx] = chunk;
897  }
898  /* Mark that we need to create a new block */
899  block = NULL;
900  }
901  }
902 
903  /*
904  * Time to create a new regular (multi-chunk) block?
905  */
906  if (block == NULL)
907  {
908  Size required_size;
909 
910  /*
911  * The first such block has size initBlockSize, and we double the
912  * space in each succeeding block, but not more than maxBlockSize.
913  */
914  blksize = set->nextBlockSize;
915  set->nextBlockSize <<= 1;
916  if (set->nextBlockSize > set->maxBlockSize)
917  set->nextBlockSize = set->maxBlockSize;
918 
919  /*
920  * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
921  * space... but try to keep it a power of 2.
922  */
923  required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
924  while (blksize < required_size)
925  blksize <<= 1;
926 
927  /* Try to allocate it */
928  block = (AllocBlock) malloc(blksize);
929 
930  /*
931  * We could be asking for pretty big blocks here, so cope if malloc
932  * fails. But give up if there's less than 1 MB or so available...
933  */
934  while (block == NULL && blksize > 1024 * 1024)
935  {
936  blksize >>= 1;
937  if (blksize < required_size)
938  break;
939  block = (AllocBlock) malloc(blksize);
940  }
941 
942  if (block == NULL)
943  return NULL;
944 
945  context->mem_allocated += blksize;
946 
947  block->aset = set;
948  block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
949  block->endptr = ((char *) block) + blksize;
950 
951  /* Mark unallocated space NOACCESS. */
953  blksize - ALLOC_BLOCKHDRSZ);
954 
955  block->prev = NULL;
956  block->next = set->blocks;
957  if (block->next)
958  block->next->prev = block;
959  set->blocks = block;
960  }
961 
962  /*
963  * OK, do the allocation
964  */
965  chunk = (MemoryChunk *) (block->freeptr);
966 
967  /* Prepare to initialize the chunk header. */
969 
970  block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
971  Assert(block->freeptr <= block->endptr);
972 
973  /* store the free list index in the value field */
974  MemoryChunkSetHdrMask(chunk, block, fidx, MCTX_ASET_ID);
975 
976 #ifdef MEMORY_CONTEXT_CHECKING
977  chunk->requested_size = size;
978  /* set mark to catch clobber of "unused" space */
979  if (size < chunk_size)
980  set_sentinel(MemoryChunkGetPointer(chunk), size);
981 #endif
982 #ifdef RANDOMIZE_ALLOCATED_MEMORY
983  /* fill the allocated space with junk */
984  randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
985 #endif
986 
987  /* Ensure any padding bytes are marked NOACCESS. */
988  VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
989  chunk_size - size);
990 
991  /* Disallow access to the chunk header. */
993 
994  return MemoryChunkGetPointer(chunk);
995 }
996 
997 /*
998  * AllocSetFree
999  * Frees allocated memory; memory is removed from the set.
1000  */
1001 void
1002 AllocSetFree(void *pointer)
1003 {
1004  AllocSet set;
1005  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1006 
1007  /* Allow access to the chunk header. */
1009 
1010  if (MemoryChunkIsExternal(chunk))
1011  {
1012  /* Release single-chunk block. */
1013  AllocBlock block = ExternalChunkGetBlock(chunk);
1014 
1015  /*
1016  * Try to verify that we have a sane block pointer: the block header
1017  * should reference an aset and the freeptr should match the endptr.
1018  */
1019  if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
1020  elog(ERROR, "could not find block containing chunk %p", chunk);
1021 
1022  set = block->aset;
1023 
1024 #ifdef MEMORY_CONTEXT_CHECKING
1025  {
1026  /* Test for someone scribbling on unused space in chunk */
1027  Assert(chunk->requested_size < (block->endptr - (char *) pointer));
1028  if (!sentinel_ok(pointer, chunk->requested_size))
1029  elog(WARNING, "detected write past chunk end in %s %p",
1030  set->header.name, chunk);
1031  }
1032 #endif
1033 
1034  /* OK, remove block from aset's list and free it */
1035  if (block->prev)
1036  block->prev->next = block->next;
1037  else
1038  set->blocks = block->next;
1039  if (block->next)
1040  block->next->prev = block->prev;
1041 
1042  set->header.mem_allocated -= block->endptr - ((char *) block);
1043 
1044 #ifdef CLOBBER_FREED_MEMORY
1045  wipe_mem(block, block->freeptr - ((char *) block));
1046 #endif
1047  free(block);
1048  }
1049  else
1050  {
1051  AllocBlock block = MemoryChunkGetBlock(chunk);
1052  int fidx;
1054 
1055  /*
1056  * In this path, for speed reasons we just Assert that the referenced
1057  * block is good. We can also Assert that the value field is sane.
1058  * Future field experience may show that these Asserts had better
1059  * become regular runtime test-and-elog checks.
1060  */
1061  Assert(AllocBlockIsValid(block));
1062  set = block->aset;
1063 
1064  fidx = MemoryChunkGetValue(chunk);
1065  Assert(FreeListIdxIsValid(fidx));
1066  link = GetFreeListLink(chunk);
1067 
1068 #ifdef MEMORY_CONTEXT_CHECKING
1069  /* Test for someone scribbling on unused space in chunk */
1070  if (chunk->requested_size < GetChunkSizeFromFreeListIdx(fidx))
1071  if (!sentinel_ok(pointer, chunk->requested_size))
1072  elog(WARNING, "detected write past chunk end in %s %p",
1073  set->header.name, chunk);
1074 #endif
1075 
1076 #ifdef CLOBBER_FREED_MEMORY
1077  wipe_mem(pointer, GetChunkSizeFromFreeListIdx(fidx));
1078 #endif
1079  /* push this chunk onto the top of the free list */
1081  link->next = set->freelist[fidx];
1083  set->freelist[fidx] = chunk;
1084 
1085 #ifdef MEMORY_CONTEXT_CHECKING
1086 
1087  /*
1088  * Reset requested_size to InvalidAllocSize in chunks that are on free
1089  * list.
1090  */
1091  chunk->requested_size = InvalidAllocSize;
1092 #endif
1093  }
1094 }
1095 
1096 /*
1097  * AllocSetRealloc
1098  * Returns new pointer to allocated memory of given size or NULL if
1099  * request could not be completed; this memory is added to the set.
1100  * Memory associated with given pointer is copied into the new memory,
1101  * and the old memory is freed.
1102  *
1103  * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size. This
1104  * makes our Valgrind client requests less-precise, hazarding false negatives.
1105  * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old
1106  * request size.)
1107  */
1108 void *
1109 AllocSetRealloc(void *pointer, Size size)
1110 {
1111  AllocBlock block;
1112  AllocSet set;
1113  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1114  Size oldchksize;
1115  int fidx;
1116 
1117  /* Allow access to the chunk header. */
1119 
1120  if (MemoryChunkIsExternal(chunk))
1121  {
1122  /*
1123  * The chunk must have been allocated as a single-chunk block. Use
1124  * realloc() to make the containing block bigger, or smaller, with
1125  * minimum space wastage.
1126  */
1127  Size chksize;
1128  Size blksize;
1129  Size oldblksize;
1130 
1131  block = ExternalChunkGetBlock(chunk);
1132 
1133  /*
1134  * Try to verify that we have a sane block pointer: the block header
1135  * should reference an aset and the freeptr should match the endptr.
1136  */
1137  if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
1138  elog(ERROR, "could not find block containing chunk %p", chunk);
1139 
1140  set = block->aset;
1141 
1142  oldchksize = block->endptr - (char *) pointer;
1143 
1144 #ifdef MEMORY_CONTEXT_CHECKING
1145  /* Test for someone scribbling on unused space in chunk */
1146  Assert(chunk->requested_size < oldchksize);
1147  if (!sentinel_ok(pointer, chunk->requested_size))
1148  elog(WARNING, "detected write past chunk end in %s %p",
1149  set->header.name, chunk);
1150 #endif
1151 
1152 #ifdef MEMORY_CONTEXT_CHECKING
1153  /* ensure there's always space for the sentinel byte */
1154  chksize = MAXALIGN(size + 1);
1155 #else
1156  chksize = MAXALIGN(size);
1157 #endif
1158 
1159  /* Do the realloc */
1160  blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
1161  oldblksize = block->endptr - ((char *) block);
1162 
1163  block = (AllocBlock) realloc(block, blksize);
1164  if (block == NULL)
1165  {
1166  /* Disallow access to the chunk header. */
1168  return NULL;
1169  }
1170 
1171  /* updated separately, not to underflow when (oldblksize > blksize) */
1172  set->header.mem_allocated -= oldblksize;
1173  set->header.mem_allocated += blksize;
1174 
1175  block->freeptr = block->endptr = ((char *) block) + blksize;
1176 
1177  /* Update pointers since block has likely been moved */
1178  chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
1179  pointer = MemoryChunkGetPointer(chunk);
1180  if (block->prev)
1181  block->prev->next = block;
1182  else
1183  set->blocks = block;
1184  if (block->next)
1185  block->next->prev = block;
1186 
1187 #ifdef MEMORY_CONTEXT_CHECKING
1188 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1189 
1190  /*
1191  * We can only randomize the extra space if we know the prior request.
1192  * When using Valgrind, randomize_mem() also marks memory UNDEFINED.
1193  */
1194  if (size > chunk->requested_size)
1195  randomize_mem((char *) pointer + chunk->requested_size,
1196  size - chunk->requested_size);
1197 #else
1198 
1199  /*
1200  * If this is an increase, realloc() will have marked any
1201  * newly-allocated part (from oldchksize to chksize) UNDEFINED, but we
1202  * also need to adjust trailing bytes from the old allocation (from
1203  * chunk->requested_size to oldchksize) as they are marked NOACCESS.
1204  * Make sure not to mark too many bytes in case chunk->requested_size
1205  * < size < oldchksize.
1206  */
1207 #ifdef USE_VALGRIND
1208  if (Min(size, oldchksize) > chunk->requested_size)
1209  VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1210  Min(size, oldchksize) - chunk->requested_size);
1211 #endif
1212 #endif
1213 
1214  chunk->requested_size = size;
1215  /* set mark to catch clobber of "unused" space */
1216  Assert(size < chksize);
1217  set_sentinel(pointer, size);
1218 #else /* !MEMORY_CONTEXT_CHECKING */
1219 
1220  /*
1221  * We may need to adjust marking of bytes from the old allocation as
1222  * some of them may be marked NOACCESS. We don't know how much of the
1223  * old chunk size was the requested size; it could have been as small
1224  * as one byte. We have to be conservative and just mark the entire
1225  * old portion DEFINED. Make sure not to mark memory beyond the new
1226  * allocation in case it's smaller than the old one.
1227  */
1228  VALGRIND_MAKE_MEM_DEFINED(pointer, Min(size, oldchksize));
1229 #endif
1230 
1231  /* Ensure any padding bytes are marked NOACCESS. */
1232  VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1233 
1234  /* Disallow access to the chunk header . */
1236 
1237  return pointer;
1238  }
1239 
1240  block = MemoryChunkGetBlock(chunk);
1241 
1242  /*
1243  * In this path, for speed reasons we just Assert that the referenced
1244  * block is good. We can also Assert that the value field is sane. Future
1245  * field experience may show that these Asserts had better become regular
1246  * runtime test-and-elog checks.
1247  */
1248  Assert(AllocBlockIsValid(block));
1249  set = block->aset;
1250 
1251  fidx = MemoryChunkGetValue(chunk);
1252  Assert(FreeListIdxIsValid(fidx));
1253  oldchksize = GetChunkSizeFromFreeListIdx(fidx);
1254 
1255 #ifdef MEMORY_CONTEXT_CHECKING
1256  /* Test for someone scribbling on unused space in chunk */
1257  if (chunk->requested_size < oldchksize)
1258  if (!sentinel_ok(pointer, chunk->requested_size))
1259  elog(WARNING, "detected write past chunk end in %s %p",
1260  set->header.name, chunk);
1261 #endif
1262 
1263  /*
1264  * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1265  * allocated area already is >= the new size. (In particular, we will
1266  * fall out here if the requested size is a decrease.)
1267  */
1268  if (oldchksize >= size)
1269  {
1270 #ifdef MEMORY_CONTEXT_CHECKING
1271  Size oldrequest = chunk->requested_size;
1272 
1273 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1274  /* We can only fill the extra space if we know the prior request */
1275  if (size > oldrequest)
1276  randomize_mem((char *) pointer + oldrequest,
1277  size - oldrequest);
1278 #endif
1279 
1280  chunk->requested_size = size;
1281 
1282  /*
1283  * If this is an increase, mark any newly-available part UNDEFINED.
1284  * Otherwise, mark the obsolete part NOACCESS.
1285  */
1286  if (size > oldrequest)
1287  VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1288  size - oldrequest);
1289  else
1290  VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1291  oldchksize - size);
1292 
1293  /* set mark to catch clobber of "unused" space */
1294  if (size < oldchksize)
1295  set_sentinel(pointer, size);
1296 #else /* !MEMORY_CONTEXT_CHECKING */
1297 
1298  /*
1299  * We don't have the information to determine whether we're growing
1300  * the old request or shrinking it, so we conservatively mark the
1301  * entire new allocation DEFINED.
1302  */
1303  VALGRIND_MAKE_MEM_NOACCESS(pointer, oldchksize);
1304  VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1305 #endif
1306 
1307  /* Disallow access to the chunk header. */
1309 
1310  return pointer;
1311  }
1312  else
1313  {
1314  /*
1315  * Enlarge-a-small-chunk case. We just do this by brute force, ie,
1316  * allocate a new chunk and copy the data. Since we know the existing
1317  * data isn't huge, this won't involve any great memcpy expense, so
1318  * it's not worth being smarter. (At one time we tried to avoid
1319  * memcpy when it was possible to enlarge the chunk in-place, but that
1320  * turns out to misbehave unpleasantly for repeated cycles of
1321  * palloc/repalloc/pfree: the eventually freed chunks go into the
1322  * wrong freelist for the next initial palloc request, and so we leak
1323  * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1324  */
1325  AllocPointer newPointer;
1326  Size oldsize;
1327 
1328  /* allocate new chunk */
1329  newPointer = AllocSetAlloc((MemoryContext) set, size);
1330 
1331  /* leave immediately if request was not completed */
1332  if (newPointer == NULL)
1333  {
1334  /* Disallow access to the chunk header. */
1336  return NULL;
1337  }
1338 
1339  /*
1340  * AllocSetAlloc() may have returned a region that is still NOACCESS.
1341  * Change it to UNDEFINED for the moment; memcpy() will then transfer
1342  * definedness from the old allocation to the new. If we know the old
1343  * allocation, copy just that much. Otherwise, make the entire old
1344  * chunk defined to avoid errors as we copy the currently-NOACCESS
1345  * trailing bytes.
1346  */
1347  VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
1348 #ifdef MEMORY_CONTEXT_CHECKING
1349  oldsize = chunk->requested_size;
1350 #else
1351  oldsize = oldchksize;
1352  VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1353 #endif
1354 
1355  /* transfer existing data (certain to fit) */
1356  memcpy(newPointer, pointer, oldsize);
1357 
1358  /* free old chunk */
1359  AllocSetFree(pointer);
1360 
1361  return newPointer;
1362  }
1363 }
1364 
1365 /*
1366  * AllocSetGetChunkContext
1367  * Return the MemoryContext that 'pointer' belongs to.
1368  */
1371 {
1372  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1373  AllocBlock block;
1374  AllocSet set;
1375 
1376  /* Allow access to the chunk header. */
1378 
1379  if (MemoryChunkIsExternal(chunk))
1380  block = ExternalChunkGetBlock(chunk);
1381  else
1382  block = (AllocBlock) MemoryChunkGetBlock(chunk);
1383 
1384  /* Disallow access to the chunk header. */
1386 
1387  Assert(AllocBlockIsValid(block));
1388  set = block->aset;
1389 
1390  return &set->header;
1391 }
1392 
1393 /*
1394  * AllocSetGetChunkSpace
1395  * Given a currently-allocated chunk, determine the total space
1396  * it occupies (including all memory-allocation overhead).
1397  */
1398 Size
1400 {
1401  MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1402  int fidx;
1403 
1404  /* Allow access to the chunk header. */
1406 
1407  if (MemoryChunkIsExternal(chunk))
1408  {
1409  AllocBlock block = ExternalChunkGetBlock(chunk);
1410 
1411  /* Disallow access to the chunk header. */
1413 
1414  Assert(AllocBlockIsValid(block));
1415 
1416  return block->endptr - (char *) chunk;
1417  }
1418 
1419  fidx = MemoryChunkGetValue(chunk);
1420  Assert(FreeListIdxIsValid(fidx));
1421 
1422  /* Disallow access to the chunk header. */
1424 
1426 }
1427 
1428 /*
1429  * AllocSetIsEmpty
1430  * Is an allocset empty of any allocated space?
1431  */
1432 bool
1434 {
1435  Assert(AllocSetIsValid(context));
1436 
1437  /*
1438  * For now, we say "empty" only if the context is new or just reset. We
1439  * could examine the freelists to determine if all space has been freed,
1440  * but it's not really worth the trouble for present uses of this
1441  * functionality.
1442  */
1443  if (context->isReset)
1444  return true;
1445  return false;
1446 }
1447 
1448 /*
1449  * AllocSetStats
1450  * Compute stats about memory consumption of an allocset.
1451  *
1452  * printfunc: if not NULL, pass a human-readable stats string to this.
1453  * passthru: pass this pointer through to printfunc.
1454  * totals: if not NULL, add stats about this context into *totals.
1455  * print_to_stderr: print stats to stderr if true, elog otherwise.
1456  */
1457 void
1459  MemoryStatsPrintFunc printfunc, void *passthru,
1460  MemoryContextCounters *totals, bool print_to_stderr)
1461 {
1462  AllocSet set = (AllocSet) context;
1463  Size nblocks = 0;
1464  Size freechunks = 0;
1465  Size totalspace;
1466  Size freespace = 0;
1467  AllocBlock block;
1468  int fidx;
1469 
1470  Assert(AllocSetIsValid(set));
1471 
1472  /* Include context header in totalspace */
1473  totalspace = MAXALIGN(sizeof(AllocSetContext));
1474 
1475  for (block = set->blocks; block != NULL; block = block->next)
1476  {
1477  nblocks++;
1478  totalspace += block->endptr - ((char *) block);
1479  freespace += block->endptr - block->freeptr;
1480  }
1481  for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1482  {
1483  Size chksz = GetChunkSizeFromFreeListIdx(fidx);
1484  MemoryChunk *chunk = set->freelist[fidx];
1485 
1486  while (chunk != NULL)
1487  {
1489 
1490  /* Allow access to the chunk header. */
1492  Assert(MemoryChunkGetValue(chunk) == fidx);
1494 
1495  freechunks++;
1496  freespace += chksz + ALLOC_CHUNKHDRSZ;
1497 
1499  chunk = link->next;
1501  }
1502  }
1503 
1504  if (printfunc)
1505  {
1506  char stats_string[200];
1507 
1508  snprintf(stats_string, sizeof(stats_string),
1509  "%zu total in %zu blocks; %zu free (%zu chunks); %zu used",
1510  totalspace, nblocks, freespace, freechunks,
1511  totalspace - freespace);
1512  printfunc(context, passthru, stats_string, print_to_stderr);
1513  }
1514 
1515  if (totals)
1516  {
1517  totals->nblocks += nblocks;
1518  totals->freechunks += freechunks;
1519  totals->totalspace += totalspace;
1520  totals->freespace += freespace;
1521  }
1522 }
1523 
1524 
1525 #ifdef MEMORY_CONTEXT_CHECKING
1526 
1527 /*
1528  * AllocSetCheck
1529  * Walk through chunks and check consistency of memory.
1530  *
1531  * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
1532  * find yourself in an infinite loop when trouble occurs, because this
1533  * routine will be entered again when elog cleanup tries to release memory!
1534  */
1535 void
1536 AllocSetCheck(MemoryContext context)
1537 {
1538  AllocSet set = (AllocSet) context;
1539  const char *name = set->header.name;
1540  AllocBlock prevblock;
1541  AllocBlock block;
1542  Size total_allocated = 0;
1543 
1544  for (prevblock = NULL, block = set->blocks;
1545  block != NULL;
1546  prevblock = block, block = block->next)
1547  {
1548  char *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
1549  long blk_used = block->freeptr - bpoz;
1550  long blk_data = 0;
1551  long nchunks = 0;
1552  bool has_external_chunk = false;
1553 
1554  if (IsKeeperBlock(set, block))
1555  total_allocated += block->endptr - ((char *) set);
1556  else
1557  total_allocated += block->endptr - ((char *) block);
1558 
1559  /*
1560  * Empty block - empty can be keeper-block only
1561  */
1562  if (!blk_used)
1563  {
1564  if (!IsKeeperBlock(set, block))
1565  elog(WARNING, "problem in alloc set %s: empty block %p",
1566  name, block);
1567  }
1568 
1569  /*
1570  * Check block header fields
1571  */
1572  if (block->aset != set ||
1573  block->prev != prevblock ||
1574  block->freeptr < bpoz ||
1575  block->freeptr > block->endptr)
1576  elog(WARNING, "problem in alloc set %s: corrupt header in block %p",
1577  name, block);
1578 
1579  /*
1580  * Chunk walker
1581  */
1582  while (bpoz < block->freeptr)
1583  {
1584  MemoryChunk *chunk = (MemoryChunk *) bpoz;
1585  Size chsize,
1586  dsize;
1587 
1588  /* Allow access to the chunk header. */
1590 
1591  if (MemoryChunkIsExternal(chunk))
1592  {
1593  chsize = block->endptr - (char *) MemoryChunkGetPointer(chunk); /* aligned chunk size */
1594  has_external_chunk = true;
1595 
1596  /* make sure this chunk consumes the entire block */
1597  if (chsize + ALLOC_CHUNKHDRSZ != blk_used)
1598  elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p",
1599  name, chunk, block);
1600  }
1601  else
1602  {
1603  int fidx = MemoryChunkGetValue(chunk);
1604 
1605  if (!FreeListIdxIsValid(fidx))
1606  elog(WARNING, "problem in alloc set %s: bad chunk size for chunk %p in block %p",
1607  name, chunk, block);
1608 
1609  chsize = GetChunkSizeFromFreeListIdx(fidx); /* aligned chunk size */
1610 
1611  /*
1612  * Check the stored block offset correctly references this
1613  * block.
1614  */
1615  if (block != MemoryChunkGetBlock(chunk))
1616  elog(WARNING, "problem in alloc set %s: bad block offset for chunk %p in block %p",
1617  name, chunk, block);
1618  }
1619  dsize = chunk->requested_size; /* real data */
1620 
1621  /* an allocated chunk's requested size must be <= the chsize */
1622  if (dsize != InvalidAllocSize && dsize > chsize)
1623  elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
1624  name, chunk, block);
1625 
1626  /* chsize must not be smaller than the first freelist's size */
1627  if (chsize < (1 << ALLOC_MINBITS))
1628  elog(WARNING, "problem in alloc set %s: bad size %zu for chunk %p in block %p",
1629  name, chsize, chunk, block);
1630 
1631  /*
1632  * Check for overwrite of padding space in an allocated chunk.
1633  */
1634  if (dsize != InvalidAllocSize && dsize < chsize &&
1635  !sentinel_ok(chunk, ALLOC_CHUNKHDRSZ + dsize))
1636  elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
1637  name, block, chunk);
1638 
1639  /* if chunk is allocated, disallow access to the chunk header */
1640  if (dsize != InvalidAllocSize)
1642 
1643  blk_data += chsize;
1644  nchunks++;
1645 
1646  bpoz += ALLOC_CHUNKHDRSZ + chsize;
1647  }
1648 
1649  if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used)
1650  elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p",
1651  name, block);
1652 
1653  if (has_external_chunk && nchunks > 1)
1654  elog(WARNING, "problem in alloc set %s: external chunk on non-dedicated block %p",
1655  name, block);
1656  }
1657 
1658  Assert(total_allocated == context->mem_allocated);
1659 }
1660 
1661 #endif /* MEMORY_CONTEXT_CHECKING */
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:259
void AllocSetReset(MemoryContext context)
Definition: aset.c:537
#define AllocSetIsValid(set)
Definition: aset.c:200
#define AllocBlockIsValid(block)
Definition: aset.c:207
#define IsKeeperBlock(set, block)
Definition: aset.c:248
#define GetFreeListLink(chkptr)
Definition: aset.c:132
#define FreeListIdxIsValid(fidx)
Definition: aset.c:136
Size AllocSetGetChunkSpace(void *pointer)
Definition: aset.c:1399
#define ALLOC_CHUNKHDRSZ
Definition: aset.c:105
MemoryContext AllocSetGetChunkContext(void *pointer)
Definition: aset.c:1370
void AllocSetStats(MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals, bool print_to_stderr)
Definition: aset.c:1458
#define KeeperBlock(set)
Definition: aset.c:244
#define GetChunkSizeFromFreeListIdx(fidx)
Definition: aset.c:140
#define ALLOC_MINBITS
Definition: aset.c:83
struct AllocBlockData * AllocBlock
Definition: aset.c:107
#define MAX_FREE_CONTEXTS
Definition: aset.c:241
static int AllocSetFreeIndex(Size size)
Definition: aset.c:277
bool AllocSetIsEmpty(MemoryContext context)
Definition: aset.c:1433
#define ALLOC_BLOCKHDRSZ
Definition: aset.c:104
struct AllocBlockData AllocBlockData
void * AllocSetRealloc(void *pointer, Size size)
Definition: aset.c:1109
void * AllocPointer
Definition: aset.c:113
#define ALLOCSET_NUM_FREELISTS
Definition: aset.c:84
struct AllocSetContext AllocSetContext
#define ALLOC_CHUNK_FRACTION
Definition: aset.c:87
void AllocSetFree(void *pointer)
Definition: aset.c:1002
void AllocSetDelete(MemoryContext context)
Definition: aset.c:607
struct AllocSetFreeList AllocSetFreeList
#define ALLOC_CHUNK_LIMIT
Definition: aset.c:85
struct AllocFreeListLink AllocFreeListLink
static AllocSetFreeList context_freelists[2]
Definition: aset.c:257
#define ExternalChunkGetBlock(chunk)
Definition: aset.c:215
void * AllocSetAlloc(MemoryContext context, Size size)
Definition: aset.c:703
MemoryContext AllocSetContextCreateInternal(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition: aset.c:347
AllocSetContext * AllocSet
Definition: aset.c:167
static int32 next
Definition: blutils.c:220
unsigned int uint32
Definition: c.h:495
#define Min(x, y)
Definition: c.h:993
#define MAXALIGN(LEN)
Definition: c.h:800
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:171
#define Max(x, y)
Definition: c.h:987
#define MemSetAligned(start, val, len)
Definition: c.h:1039
#define StaticAssertDecl(condition, errmessage)
Definition: c.h:925
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:927
size_t Size
Definition: c.h:594
int errdetail(const char *fmt,...)
Definition: elog.c:1202
int errcode(int sqlerrcode)
Definition: elog.c:858
int errmsg(const char *fmt,...)
Definition: elog.c:1069
#define WARNING
Definition: elog.h:36
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
#define realloc(a, b)
Definition: header.h:60
#define free(a)
Definition: header.h:65
#define malloc(a)
Definition: header.h:50
Assert(fmt[strlen(fmt) - 1] !='\n')
void MemoryContextCreate(MemoryContext node, NodeTag tag, MemoryContextMethodID method_id, MemoryContext parent, const char *name)
Definition: mcxt.c:973
MemoryContext TopMemoryContext
Definition: mcxt.c:141
void MemoryContextStats(MemoryContext context)
Definition: mcxt.c:699
void MemoryContextResetOnly(MemoryContext context)
Definition: mcxt.c:349
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition: memdebug.h:27
#define VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
Definition: memdebug.h:28
void(* MemoryStatsPrintFunc)(MemoryContext context, void *passthru, const char *stats_string, bool print_to_stderr)
Definition: memnodes.h:54
#define ALLOCSET_SMALL_MINSIZE
Definition: memutils.h:157
#define ALLOCSET_DEFAULT_MINSIZE
Definition: memutils.h:147
#define AllocHugeSizeIsValid(size)
Definition: memutils.h:49
#define InvalidAllocSize
Definition: memutils.h:47
#define ALLOCSET_SEPARATE_THRESHOLD
Definition: memutils.h:177
#define ALLOCSET_SMALL_INITSIZE
Definition: memutils.h:158
#define ALLOCSET_DEFAULT_INITSIZE
Definition: memutils.h:148
@ MCTX_ASET_ID
#define MEMORYCHUNK_MAX_BLOCKOFFSET
static Size MemoryChunkGetValue(MemoryChunk *chunk)
#define MemoryChunkGetPointer(c)
static bool MemoryChunkIsExternal(MemoryChunk *chunk)
static void MemoryChunkSetHdrMaskExternal(MemoryChunk *chunk, MemoryContextMethodID methodid)
static void * MemoryChunkGetBlock(MemoryChunk *chunk)
#define PointerGetMemoryChunk(p)
static void MemoryChunkSetHdrMask(MemoryChunk *chunk, void *block, Size value, MemoryContextMethodID methodid)
struct MemoryContextData * MemoryContext
Definition: palloc.h:36
static int pg_leftmost_one_pos32(uint32 word)
Definition: pg_bitutils.h:41
PGDLLIMPORT const uint8 pg_leftmost_one_pos[256]
Definition: pg_bitutils.c:34
#define snprintf
Definition: port.h:238
AllocBlock prev
Definition: aset.c:184
AllocSet aset
Definition: aset.c:183
char * freeptr
Definition: aset.c:186
AllocBlock next
Definition: aset.c:185
char * endptr
Definition: aset.c:187
uint32 initBlockSize
Definition: aset.c:159
uint32 maxBlockSize
Definition: aset.c:160
uint32 allocChunkLimit
Definition: aset.c:162
MemoryContextData header
Definition: aset.c:154
int freeListIndex
Definition: aset.c:164
AllocBlock blocks
Definition: aset.c:156
uint32 nextBlockSize
Definition: aset.c:161
MemoryChunk * freelist[ALLOCSET_NUM_FREELISTS]
Definition: aset.c:157
int num_free
Definition: aset.c:252
AllocSetContext * first_free
Definition: aset.c:253
MemoryContext nextchild
Definition: memnodes.h:92
Size mem_allocated
Definition: memnodes.h:87
const char * name
Definition: memnodes.h:93
const char * name