PostgreSQL Source Code  git master
aset.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * aset.c
4  * Allocation set definitions.
5  *
6  * AllocSet is our standard implementation of the abstract MemoryContext
7  * type.
8  *
9  *
10  * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
11  * Portions Copyright (c) 1994, Regents of the University of California
12  *
13  * IDENTIFICATION
14  * src/backend/utils/mmgr/aset.c
15  *
16  * NOTE:
17  * This is a new (Feb. 05, 1999) implementation of the allocation set
18  * routines. AllocSet...() does not use OrderedSet...() any more.
19  * Instead it manages allocations in a block pool by itself, combining
20  * many small allocations in a few bigger blocks. AllocSetFree() normally
21  * doesn't free() memory really. It just add's the free'd area to some
22  * list for later reuse by AllocSetAlloc(). All memory blocks are free()'d
23  * at once on AllocSetReset(), which happens when the memory context gets
24  * destroyed.
25  * Jan Wieck
26  *
27  * Performance improvement from Tom Lane, 8/99: for extremely large request
28  * sizes, we do want to be able to give the memory back to free() as soon
29  * as it is pfree()'d. Otherwise we risk tying up a lot of memory in
30  * freelist entries that might never be usable. This is specially needed
31  * when the caller is repeatedly repalloc()'ing a block bigger and bigger;
32  * the previous instances of the block were guaranteed to be wasted until
33  * AllocSetReset() under the old way.
34  *
35  * Further improvement 12/00: as the code stood, request sizes in the
36  * midrange between "small" and "large" were handled very inefficiently,
37  * because any sufficiently large free chunk would be used to satisfy a
38  * request, even if it was much larger than necessary. This led to more
39  * and more wasted space in allocated chunks over time. To fix, get rid
40  * of the midrange behavior: we now handle only "small" power-of-2-size
41  * chunks as chunks. Anything "large" is passed off to malloc(). Change
42  * the number of freelists to change the small/large boundary.
43  *
44  *-------------------------------------------------------------------------
45  */
46 
47 #include "postgres.h"
48 
49 #include "utils/memdebug.h"
50 #include "utils/memutils.h"
51 
52 /* Define this to detail debug alloc information */
53 /* #define HAVE_ALLOCINFO */
54 
55 /*--------------------
56  * Chunk freelist k holds chunks of size 1 << (k + ALLOC_MINBITS),
57  * for k = 0 .. ALLOCSET_NUM_FREELISTS-1.
58  *
59  * Note that all chunks in the freelists have power-of-2 sizes. This
60  * improves recyclability: we may waste some space, but the wasted space
61  * should stay pretty constant as requests are made and released.
62  *
63  * A request too large for the last freelist is handled by allocating a
64  * dedicated block from malloc(). The block still has a block header and
65  * chunk header, but when the chunk is freed we'll return the whole block
66  * to malloc(), not put it on our freelists.
67  *
68  * CAUTION: ALLOC_MINBITS must be large enough so that
69  * 1<<ALLOC_MINBITS is at least MAXALIGN,
70  * or we may fail to align the smallest chunks adequately.
71  * 8-byte alignment is enough on all currently known machines.
72  *
73  * With the current parameters, request sizes up to 8K are treated as chunks,
74  * larger requests go into dedicated blocks. Change ALLOCSET_NUM_FREELISTS
75  * to adjust the boundary point; and adjust ALLOCSET_SEPARATE_THRESHOLD in
76  * memutils.h to agree. (Note: in contexts with small maxBlockSize, we may
77  * set the allocChunkLimit to less than 8K, so as to avoid space wastage.)
78  *--------------------
79  */
80 
81 #define ALLOC_MINBITS 3 /* smallest chunk size is 8 bytes */
82 #define ALLOCSET_NUM_FREELISTS 11
83 #define ALLOC_CHUNK_LIMIT (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
84 /* Size of largest chunk that we use a fixed size for */
85 #define ALLOC_CHUNK_FRACTION 4
86 /* We allow chunks to be at most 1/4 of maxBlockSize (less overhead) */
87 
88 /*--------------------
89  * The first block allocated for an allocset has size initBlockSize.
90  * Each time we have to allocate another block, we double the block size
91  * (if possible, and without exceeding maxBlockSize), so as to reduce
92  * the bookkeeping load on malloc().
93  *
94  * Blocks allocated to hold oversize chunks do not follow this rule, however;
95  * they are just however big they need to be to hold that single chunk.
96  *
97  * Also, if a minContextSize is specified, the first block has that size,
98  * and then initBlockSize is used for the next one.
99  *--------------------
100  */
101 
102 #define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData))
103 #define ALLOC_CHUNKHDRSZ sizeof(struct AllocChunkData)
104 
105 typedef struct AllocBlockData *AllocBlock; /* forward reference */
106 typedef struct AllocChunkData *AllocChunk;
107 
108 /*
109  * AllocPointer
110  * Aligned pointer which may be a member of an allocation set.
111  */
112 typedef void *AllocPointer;
113 
114 /*
115  * AllocSetContext is our standard implementation of MemoryContext.
116  *
117  * Note: header.isReset means there is nothing for AllocSetReset to do.
118  * This is different from the aset being physically empty (empty blocks list)
119  * because we will still have a keeper block. It's also different from the set
120  * being logically empty, because we don't attempt to detect pfree'ing the
121  * last active chunk.
122  */
123 typedef struct AllocSetContext
124 {
125  MemoryContextData header; /* Standard memory-context fields */
126  /* Info about storage allocated in this context: */
127  AllocBlock blocks; /* head of list of blocks in this set */
128  AllocChunk freelist[ALLOCSET_NUM_FREELISTS]; /* free chunk lists */
129  /* Allocation parameters for this context: */
130  Size initBlockSize; /* initial block size */
131  Size maxBlockSize; /* maximum block size */
132  Size nextBlockSize; /* next block size to allocate */
133  Size allocChunkLimit; /* effective chunk size limit */
134  AllocBlock keeper; /* keep this block over resets */
135  /* freelist this context could be put in, or -1 if not a candidate: */
136  int freeListIndex; /* index in context_freelists[], or -1 */
138 
140 
141 /*
142  * AllocBlock
143  * An AllocBlock is the unit of memory that is obtained by aset.c
144  * from malloc(). It contains one or more AllocChunks, which are
145  * the units requested by palloc() and freed by pfree(). AllocChunks
146  * cannot be returned to malloc() individually, instead they are put
147  * on freelists by pfree() and re-used by the next palloc() that has
148  * a matching request size.
149  *
150  * AllocBlockData is the header data for a block --- the usable space
151  * within the block begins at the next alignment boundary.
152  */
153 typedef struct AllocBlockData
154 {
155  AllocSet aset; /* aset that owns this block */
156  AllocBlock prev; /* prev block in aset's blocks list, if any */
157  AllocBlock next; /* next block in aset's blocks list, if any */
158  char *freeptr; /* start of free space in this block */
159  char *endptr; /* end of space in this block */
161 
162 /*
163  * AllocChunk
164  * The prefix of each piece of memory in an AllocBlock
165  *
166  * Note: to meet the memory context APIs, the payload area of the chunk must
167  * be maxaligned, and the "aset" link must be immediately adjacent to the
168  * payload area (cf. GetMemoryChunkContext). We simplify matters for this
169  * module by requiring sizeof(AllocChunkData) to be maxaligned, and then
170  * we can ensure things work by adding any required alignment padding before
171  * the "aset" field. There is a static assertion below that the alignment
172  * is done correctly.
173  */
174 typedef struct AllocChunkData
175 {
176  /* size is always the size of the usable space in the chunk */
178 #ifdef MEMORY_CONTEXT_CHECKING
179  /* when debugging memory usage, also store actual requested size */
180  /* this is zero in a free chunk */
181  Size requested_size;
182 
183 #define ALLOCCHUNK_RAWSIZE (SIZEOF_SIZE_T * 2 + SIZEOF_VOID_P)
184 #else
185 #define ALLOCCHUNK_RAWSIZE (SIZEOF_SIZE_T + SIZEOF_VOID_P)
186 #endif /* MEMORY_CONTEXT_CHECKING */
187 
188  /* ensure proper alignment by adding padding if needed */
189 #if (ALLOCCHUNK_RAWSIZE % MAXIMUM_ALIGNOF) != 0
190  char padding[MAXIMUM_ALIGNOF - ALLOCCHUNK_RAWSIZE % MAXIMUM_ALIGNOF];
191 #endif
192 
193  /* aset is the owning aset if allocated, or the freelist link if free */
194  void *aset;
195  /* there must not be any padding to reach a MAXALIGN boundary here! */
197 
198 /*
199  * Only the "aset" field should be accessed outside this module.
200  * We keep the rest of an allocated chunk's header marked NOACCESS when using
201  * valgrind. But note that chunk headers that are in a freelist are kept
202  * accessible, for simplicity.
203  */
204 #define ALLOCCHUNK_PRIVATE_LEN offsetof(AllocChunkData, aset)
205 
206 /*
207  * AllocPointerIsValid
208  * True iff pointer is valid allocation pointer.
209  */
210 #define AllocPointerIsValid(pointer) PointerIsValid(pointer)
211 
212 /*
213  * AllocSetIsValid
214  * True iff set is valid allocation set.
215  */
216 #define AllocSetIsValid(set) PointerIsValid(set)
217 
218 #define AllocPointerGetChunk(ptr) \
219  ((AllocChunk)(((char *)(ptr)) - ALLOC_CHUNKHDRSZ))
220 #define AllocChunkGetPointer(chk) \
221  ((AllocPointer)(((char *)(chk)) + ALLOC_CHUNKHDRSZ))
222 
223 /*
224  * Rather than repeatedly creating and deleting memory contexts, we keep some
225  * freed contexts in freelists so that we can hand them out again with little
226  * work. Before putting a context in a freelist, we reset it so that it has
227  * only its initial malloc chunk and no others. To be a candidate for a
228  * freelist, a context must have the same minContextSize/initBlockSize as
229  * other contexts in the list; but its maxBlockSize is irrelevant since that
230  * doesn't affect the size of the initial chunk.
231  *
232  * We currently provide one freelist for ALLOCSET_DEFAULT_SIZES contexts
233  * and one for ALLOCSET_SMALL_SIZES contexts; the latter works for
234  * ALLOCSET_START_SMALL_SIZES too, since only the maxBlockSize differs.
235  *
236  * Ordinarily, we re-use freelist contexts in last-in-first-out order, in
237  * hopes of improving locality of reference. But if there get to be too
238  * many contexts in the list, we'd prefer to drop the most-recently-created
239  * contexts in hopes of keeping the process memory map compact.
240  * We approximate that by simply deleting all existing entries when the list
241  * overflows, on the assumption that queries that allocate a lot of contexts
242  * will probably free them in more or less reverse order of allocation.
243  *
244  * Contexts in a freelist are chained via their nextchild pointers.
245  */
246 #define MAX_FREE_CONTEXTS 100 /* arbitrary limit on freelist length */
247 
248 typedef struct AllocSetFreeList
249 {
250  int num_free; /* current list length */
251  AllocSetContext *first_free; /* list header */
253 
254 /* context_freelists[0] is for default params, [1] for small params */
256 {
257  {
258  0, NULL
259  },
260  {
261  0, NULL
262  }
263 };
264 
265 /*
266  * These functions implement the MemoryContext API for AllocSet contexts.
267  */
268 static void *AllocSetAlloc(MemoryContext context, Size size);
269 static void AllocSetFree(MemoryContext context, void *pointer);
270 static void *AllocSetRealloc(MemoryContext context, void *pointer, Size size);
271 static void AllocSetReset(MemoryContext context);
272 static void AllocSetDelete(MemoryContext context);
273 static Size AllocSetGetChunkSpace(MemoryContext context, void *pointer);
274 static bool AllocSetIsEmpty(MemoryContext context);
275 static void AllocSetStats(MemoryContext context,
276  MemoryStatsPrintFunc printfunc, void *passthru,
277  MemoryContextCounters *totals);
278 
279 #ifdef MEMORY_CONTEXT_CHECKING
280 static void AllocSetCheck(MemoryContext context);
281 #endif
282 
283 /*
284  * This is the virtual function table for AllocSet contexts.
285  */
288  AllocSetFree,
295 #ifdef MEMORY_CONTEXT_CHECKING
296  ,AllocSetCheck
297 #endif
298 };
299 
300 /*
301  * Table for AllocSetFreeIndex
302  */
303 #define LT16(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n
304 
305 static const unsigned char LogTable256[256] =
306 {
307  0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
308  LT16(5), LT16(6), LT16(6), LT16(7), LT16(7), LT16(7), LT16(7),
309  LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8)
310 };
311 
312 /* ----------
313  * Debug macros
314  * ----------
315  */
316 #ifdef HAVE_ALLOCINFO
317 #define AllocFreeInfo(_cxt, _chunk) \
318  fprintf(stderr, "AllocFree: %s: %p, %zu\n", \
319  (_cxt)->header.name, (_chunk), (_chunk)->size)
320 #define AllocAllocInfo(_cxt, _chunk) \
321  fprintf(stderr, "AllocAlloc: %s: %p, %zu\n", \
322  (_cxt)->header.name, (_chunk), (_chunk)->size)
323 #else
324 #define AllocFreeInfo(_cxt, _chunk)
325 #define AllocAllocInfo(_cxt, _chunk)
326 #endif
327 
328 /* ----------
329  * AllocSetFreeIndex -
330  *
331  * Depending on the size of an allocation compute which freechunk
332  * list of the alloc set it belongs to. Caller must have verified
333  * that size <= ALLOC_CHUNK_LIMIT.
334  * ----------
335  */
336 static inline int
338 {
339  int idx;
340  unsigned int t,
341  tsize;
342 
343  if (size > (1 << ALLOC_MINBITS))
344  {
345  tsize = (size - 1) >> ALLOC_MINBITS;
346 
347  /*
348  * At this point we need to obtain log2(tsize)+1, ie, the number of
349  * not-all-zero bits at the right. We used to do this with a
350  * shift-and-count loop, but this function is enough of a hotspot to
351  * justify micro-optimization effort. The best approach seems to be
352  * to use a lookup table. Note that this code assumes that
353  * ALLOCSET_NUM_FREELISTS <= 17, since we only cope with two bytes of
354  * the tsize value.
355  */
356  t = tsize >> 8;
357  idx = t ? LogTable256[t] + 8 : LogTable256[tsize];
358 
360  }
361  else
362  idx = 0;
363 
364  return idx;
365 }
366 
367 
368 /*
369  * Public routines
370  */
371 
372 
373 /*
374  * AllocSetContextCreateExtended
375  * Create a new AllocSet context.
376  *
377  * parent: parent context, or NULL if top-level context
378  * name: name of context (must be statically allocated)
379  * minContextSize: minimum context size
380  * initBlockSize: initial allocation block size
381  * maxBlockSize: maximum allocation block size
382  *
383  * Most callers should abstract the context size parameters using a macro
384  * such as ALLOCSET_DEFAULT_SIZES. (This is now *required* when going
385  * through the AllocSetContextCreate macro.)
386  */
389  const char *name,
390  Size minContextSize,
393 {
394  int freeListIndex;
395  Size firstBlockSize;
396  AllocSet set;
397  AllocBlock block;
398 
399  /* Assert we padded AllocChunkData properly */
401  "sizeof(AllocChunkData) is not maxaligned");
404  "padding calculation in AllocChunkData is wrong");
405 
406  /*
407  * First, validate allocation parameters. Once these were regular runtime
408  * test and elog's, but in practice Asserts seem sufficient because nobody
409  * varies their parameters at runtime. We somewhat arbitrarily enforce a
410  * minimum 1K block size.
411  */
412  Assert(initBlockSize == MAXALIGN(initBlockSize) &&
413  initBlockSize >= 1024);
414  Assert(maxBlockSize == MAXALIGN(maxBlockSize) &&
415  maxBlockSize >= initBlockSize &&
416  AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
417  Assert(minContextSize == 0 ||
418  (minContextSize == MAXALIGN(minContextSize) &&
419  minContextSize >= 1024 &&
420  minContextSize <= maxBlockSize));
421 
422  /*
423  * Check whether the parameters match either available freelist. We do
424  * not need to demand a match of maxBlockSize.
425  */
426  if (minContextSize == ALLOCSET_DEFAULT_MINSIZE &&
427  initBlockSize == ALLOCSET_DEFAULT_INITSIZE)
428  freeListIndex = 0;
429  else if (minContextSize == ALLOCSET_SMALL_MINSIZE &&
430  initBlockSize == ALLOCSET_SMALL_INITSIZE)
431  freeListIndex = 1;
432  else
433  freeListIndex = -1;
434 
435  /*
436  * If a suitable freelist entry exists, just recycle that context.
437  */
438  if (freeListIndex >= 0)
439  {
440  AllocSetFreeList *freelist = &context_freelists[freeListIndex];
441 
442  if (freelist->first_free != NULL)
443  {
444  /* Remove entry from freelist */
445  set = freelist->first_free;
446  freelist->first_free = (AllocSet) set->header.nextchild;
447  freelist->num_free--;
448 
449  /* Update its maxBlockSize; everything else should be OK */
450  set->maxBlockSize = maxBlockSize;
451 
452  /* Reinitialize its header, installing correct name and parent */
456  parent,
457  name);
458 
459  return (MemoryContext) set;
460  }
461  }
462 
463  /* Determine size of initial block */
464  firstBlockSize = MAXALIGN(sizeof(AllocSetContext)) +
466  if (minContextSize != 0)
467  firstBlockSize = Max(firstBlockSize, minContextSize);
468  else
469  firstBlockSize = Max(firstBlockSize, initBlockSize);
470 
471  /*
472  * Allocate the initial block. Unlike other aset.c blocks, it starts with
473  * the context header and its block header follows that.
474  */
475  set = (AllocSet) malloc(firstBlockSize);
476  if (set == NULL)
477  {
478  if (TopMemoryContext)
480  ereport(ERROR,
481  (errcode(ERRCODE_OUT_OF_MEMORY),
482  errmsg("out of memory"),
483  errdetail("Failed while creating memory context \"%s\".",
484  name)));
485  }
486 
487  /*
488  * Avoid writing code that can fail between here and MemoryContextCreate;
489  * we'd leak the header/initial block if we ereport in this stretch.
490  */
491 
492  /* Fill in the initial block's block header */
493  block = (AllocBlock) (((char *) set) + MAXALIGN(sizeof(AllocSetContext)));
494  block->aset = set;
495  block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
496  block->endptr = ((char *) set) + firstBlockSize;
497  block->prev = NULL;
498  block->next = NULL;
499 
500  /* Mark unallocated space NOACCESS; leave the block header alone. */
501  VALGRIND_MAKE_MEM_NOACCESS(block->freeptr, block->endptr - block->freeptr);
502 
503  /* Remember block as part of block list */
504  set->blocks = block;
505  /* Mark block as not to be released at reset time */
506  set->keeper = block;
507 
508  /* Finish filling in aset-specific parts of the context header */
509  MemSetAligned(set->freelist, 0, sizeof(set->freelist));
510 
511  set->initBlockSize = initBlockSize;
512  set->maxBlockSize = maxBlockSize;
513  set->nextBlockSize = initBlockSize;
514  set->freeListIndex = freeListIndex;
515 
516  /*
517  * Compute the allocation chunk size limit for this context. It can't be
518  * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
519  * If maxBlockSize is small then requests exceeding the maxBlockSize, or
520  * even a significant fraction of it, should be treated as large chunks
521  * too. For the typical case of maxBlockSize a power of 2, the chunk size
522  * limit will be at most 1/8th maxBlockSize, so that given a stream of
523  * requests that are all the maximum chunk size we will waste at most
524  * 1/8th of the allocated space.
525  *
526  * We have to have allocChunkLimit a power of two, because the requested
527  * and actually-allocated sizes of any chunk must be on the same side of
528  * the limit, else we get confused about whether the chunk is "big".
529  *
530  * Also, allocChunkLimit must not exceed ALLOCSET_SEPARATE_THRESHOLD.
531  */
533  "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD");
534 
535  set->allocChunkLimit = ALLOC_CHUNK_LIMIT;
536  while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
537  (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
538  set->allocChunkLimit >>= 1;
539 
540  /* Finally, do the type-independent part of context creation */
543  &AllocSetMethods,
544  parent,
545  name);
546 
547  return (MemoryContext) set;
548 }
549 
550 /*
551  * AllocSetReset
552  * Frees all memory which is allocated in the given set.
553  *
554  * Actually, this routine has some discretion about what to do.
555  * It should mark all allocated chunks freed, but it need not necessarily
556  * give back all the resources the set owns. Our actual implementation is
557  * that we give back all but the "keeper" block (which we must keep, since
558  * it shares a malloc chunk with the context header). In this way, we don't
559  * thrash malloc() when a context is repeatedly reset after small allocations,
560  * which is typical behavior for per-tuple contexts.
561  */
562 static void
564 {
565  AllocSet set = (AllocSet) context;
566  AllocBlock block;
567 
569 
570 #ifdef MEMORY_CONTEXT_CHECKING
571  /* Check for corruption and leaks before freeing */
572  AllocSetCheck(context);
573 #endif
574 
575  /* Clear chunk freelists */
576  MemSetAligned(set->freelist, 0, sizeof(set->freelist));
577 
578  block = set->blocks;
579 
580  /* New blocks list will be just the keeper block */
581  set->blocks = set->keeper;
582 
583  while (block != NULL)
584  {
585  AllocBlock next = block->next;
586 
587  if (block == set->keeper)
588  {
589  /* Reset the block, but don't return it to malloc */
590  char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
591 
592 #ifdef CLOBBER_FREED_MEMORY
593  wipe_mem(datastart, block->freeptr - datastart);
594 #else
595  /* wipe_mem() would have done this */
596  VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
597 #endif
598  block->freeptr = datastart;
599  block->prev = NULL;
600  block->next = NULL;
601  }
602  else
603  {
604  /* Normal case, release the block */
605 #ifdef CLOBBER_FREED_MEMORY
606  wipe_mem(block, block->freeptr - ((char *) block));
607 #endif
608  free(block);
609  }
610  block = next;
611  }
612 
613  /* Reset block size allocation sequence, too */
614  set->nextBlockSize = set->initBlockSize;
615 }
616 
617 /*
618  * AllocSetDelete
619  * Frees all memory which is allocated in the given set,
620  * in preparation for deletion of the set.
621  *
622  * Unlike AllocSetReset, this *must* free all resources of the set.
623  */
624 static void
626 {
627  AllocSet set = (AllocSet) context;
628  AllocBlock block = set->blocks;
629 
631 
632 #ifdef MEMORY_CONTEXT_CHECKING
633  /* Check for corruption and leaks before freeing */
634  AllocSetCheck(context);
635 #endif
636 
637  /*
638  * If the context is a candidate for a freelist, put it into that freelist
639  * instead of destroying it.
640  */
641  if (set->freeListIndex >= 0)
642  {
643  AllocSetFreeList *freelist = &context_freelists[set->freeListIndex];
644 
645  /*
646  * Reset the context, if it needs it, so that we aren't hanging on to
647  * more than the initial malloc chunk.
648  */
649  if (!context->isReset)
650  MemoryContextResetOnly(context);
651 
652  /*
653  * If the freelist is full, just discard what's already in it. See
654  * comments with context_freelists[].
655  */
656  if (freelist->num_free >= MAX_FREE_CONTEXTS)
657  {
658  while (freelist->first_free != NULL)
659  {
660  AllocSetContext *oldset = freelist->first_free;
661 
662  freelist->first_free = (AllocSetContext *) oldset->header.nextchild;
663  freelist->num_free--;
664 
665  /* All that remains is to free the header/initial block */
666  free(oldset);
667  }
668  Assert(freelist->num_free == 0);
669  }
670 
671  /* Now add the just-deleted context to the freelist. */
672  set->header.nextchild = (MemoryContext) freelist->first_free;
673  freelist->first_free = set;
674  freelist->num_free++;
675 
676  return;
677  }
678 
679  /* Free all blocks, except the keeper which is part of context header */
680  while (block != NULL)
681  {
682  AllocBlock next = block->next;
683 
684 #ifdef CLOBBER_FREED_MEMORY
685  wipe_mem(block, block->freeptr - ((char *) block));
686 #endif
687 
688  if (block != set->keeper)
689  free(block);
690 
691  block = next;
692  }
693 
694  /* Finally, free the context header, including the keeper block */
695  free(set);
696 }
697 
698 /*
699  * AllocSetAlloc
700  * Returns pointer to allocated memory of given size or NULL if
701  * request could not be completed; memory is added to the set.
702  *
703  * No request may exceed:
704  * MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
705  * All callers use a much-lower limit.
706  *
707  * Note: when using valgrind, it doesn't matter how the returned allocation
708  * is marked, as mcxt.c will set it to UNDEFINED. In some paths we will
709  * return space that is marked NOACCESS - AllocSetRealloc has to beware!
710  */
711 static void *
713 {
714  AllocSet set = (AllocSet) context;
715  AllocBlock block;
716  AllocChunk chunk;
717  int fidx;
718  Size chunk_size;
719  Size blksize;
720 
722 
723  /*
724  * If requested size exceeds maximum for chunks, allocate an entire block
725  * for this request.
726  */
727  if (size > set->allocChunkLimit)
728  {
729  chunk_size = MAXALIGN(size);
730  blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
731  block = (AllocBlock) malloc(blksize);
732  if (block == NULL)
733  return NULL;
734  block->aset = set;
735  block->freeptr = block->endptr = ((char *) block) + blksize;
736 
737  chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
738  chunk->aset = set;
739  chunk->size = chunk_size;
740 #ifdef MEMORY_CONTEXT_CHECKING
741  chunk->requested_size = size;
742  /* set mark to catch clobber of "unused" space */
743  if (size < chunk_size)
744  set_sentinel(AllocChunkGetPointer(chunk), size);
745 #endif
746 #ifdef RANDOMIZE_ALLOCATED_MEMORY
747  /* fill the allocated space with junk */
748  randomize_mem((char *) AllocChunkGetPointer(chunk), size);
749 #endif
750 
751  /*
752  * Stick the new block underneath the active allocation block, if any,
753  * so that we don't lose the use of the space remaining therein.
754  */
755  if (set->blocks != NULL)
756  {
757  block->prev = set->blocks;
758  block->next = set->blocks->next;
759  if (block->next)
760  block->next->prev = block;
761  set->blocks->next = block;
762  }
763  else
764  {
765  block->prev = NULL;
766  block->next = NULL;
767  set->blocks = block;
768  }
769 
770  AllocAllocInfo(set, chunk);
771 
772  /* Ensure any padding bytes are marked NOACCESS. */
773  VALGRIND_MAKE_MEM_NOACCESS((char *) AllocChunkGetPointer(chunk) + size,
774  chunk_size - size);
775 
776  /* Disallow external access to private part of chunk header. */
778 
779  return AllocChunkGetPointer(chunk);
780  }
781 
782  /*
783  * Request is small enough to be treated as a chunk. Look in the
784  * corresponding free list to see if there is a free chunk we could reuse.
785  * If one is found, remove it from the free list, make it again a member
786  * of the alloc set and return its data address.
787  */
788  fidx = AllocSetFreeIndex(size);
789  chunk = set->freelist[fidx];
790  if (chunk != NULL)
791  {
792  Assert(chunk->size >= size);
793 
794  set->freelist[fidx] = (AllocChunk) chunk->aset;
795 
796  chunk->aset = (void *) set;
797 
798 #ifdef MEMORY_CONTEXT_CHECKING
799  chunk->requested_size = size;
800  /* set mark to catch clobber of "unused" space */
801  if (size < chunk->size)
802  set_sentinel(AllocChunkGetPointer(chunk), size);
803 #endif
804 #ifdef RANDOMIZE_ALLOCATED_MEMORY
805  /* fill the allocated space with junk */
806  randomize_mem((char *) AllocChunkGetPointer(chunk), size);
807 #endif
808 
809  AllocAllocInfo(set, chunk);
810 
811  /* Ensure any padding bytes are marked NOACCESS. */
812  VALGRIND_MAKE_MEM_NOACCESS((char *) AllocChunkGetPointer(chunk) + size,
813  chunk->size - size);
814 
815  /* Disallow external access to private part of chunk header. */
817 
818  return AllocChunkGetPointer(chunk);
819  }
820 
821  /*
822  * Choose the actual chunk size to allocate.
823  */
824  chunk_size = (1 << ALLOC_MINBITS) << fidx;
825  Assert(chunk_size >= size);
826 
827  /*
828  * If there is enough room in the active allocation block, we will put the
829  * chunk into that block. Else must start a new one.
830  */
831  if ((block = set->blocks) != NULL)
832  {
833  Size availspace = block->endptr - block->freeptr;
834 
835  if (availspace < (chunk_size + ALLOC_CHUNKHDRSZ))
836  {
837  /*
838  * The existing active (top) block does not have enough room for
839  * the requested allocation, but it might still have a useful
840  * amount of space in it. Once we push it down in the block list,
841  * we'll never try to allocate more space from it. So, before we
842  * do that, carve up its free space into chunks that we can put on
843  * the set's freelists.
844  *
845  * Because we can only get here when there's less than
846  * ALLOC_CHUNK_LIMIT left in the block, this loop cannot iterate
847  * more than ALLOCSET_NUM_FREELISTS-1 times.
848  */
849  while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
850  {
851  Size availchunk = availspace - ALLOC_CHUNKHDRSZ;
852  int a_fidx = AllocSetFreeIndex(availchunk);
853 
854  /*
855  * In most cases, we'll get back the index of the next larger
856  * freelist than the one we need to put this chunk on. The
857  * exception is when availchunk is exactly a power of 2.
858  */
859  if (availchunk != ((Size) 1 << (a_fidx + ALLOC_MINBITS)))
860  {
861  a_fidx--;
862  Assert(a_fidx >= 0);
863  availchunk = ((Size) 1 << (a_fidx + ALLOC_MINBITS));
864  }
865 
866  chunk = (AllocChunk) (block->freeptr);
867 
868  /* Prepare to initialize the chunk header. */
869  VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
870 
871  block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
872  availspace -= (availchunk + ALLOC_CHUNKHDRSZ);
873 
874  chunk->size = availchunk;
875 #ifdef MEMORY_CONTEXT_CHECKING
876  chunk->requested_size = 0; /* mark it free */
877 #endif
878  chunk->aset = (void *) set->freelist[a_fidx];
879  set->freelist[a_fidx] = chunk;
880  }
881 
882  /* Mark that we need to create a new block */
883  block = NULL;
884  }
885  }
886 
887  /*
888  * Time to create a new regular (multi-chunk) block?
889  */
890  if (block == NULL)
891  {
892  Size required_size;
893 
894  /*
895  * The first such block has size initBlockSize, and we double the
896  * space in each succeeding block, but not more than maxBlockSize.
897  */
898  blksize = set->nextBlockSize;
899  set->nextBlockSize <<= 1;
900  if (set->nextBlockSize > set->maxBlockSize)
901  set->nextBlockSize = set->maxBlockSize;
902 
903  /*
904  * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
905  * space... but try to keep it a power of 2.
906  */
907  required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
908  while (blksize < required_size)
909  blksize <<= 1;
910 
911  /* Try to allocate it */
912  block = (AllocBlock) malloc(blksize);
913 
914  /*
915  * We could be asking for pretty big blocks here, so cope if malloc
916  * fails. But give up if there's less than a meg or so available...
917  */
918  while (block == NULL && blksize > 1024 * 1024)
919  {
920  blksize >>= 1;
921  if (blksize < required_size)
922  break;
923  block = (AllocBlock) malloc(blksize);
924  }
925 
926  if (block == NULL)
927  return NULL;
928 
929  block->aset = set;
930  block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
931  block->endptr = ((char *) block) + blksize;
932 
933  /* Mark unallocated space NOACCESS. */
935  blksize - ALLOC_BLOCKHDRSZ);
936 
937  block->prev = NULL;
938  block->next = set->blocks;
939  if (block->next)
940  block->next->prev = block;
941  set->blocks = block;
942  }
943 
944  /*
945  * OK, do the allocation
946  */
947  chunk = (AllocChunk) (block->freeptr);
948 
949  /* Prepare to initialize the chunk header. */
951 
952  block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
953  Assert(block->freeptr <= block->endptr);
954 
955  chunk->aset = (void *) set;
956  chunk->size = chunk_size;
957 #ifdef MEMORY_CONTEXT_CHECKING
958  chunk->requested_size = size;
959  /* set mark to catch clobber of "unused" space */
960  if (size < chunk->size)
961  set_sentinel(AllocChunkGetPointer(chunk), size);
962 #endif
963 #ifdef RANDOMIZE_ALLOCATED_MEMORY
964  /* fill the allocated space with junk */
965  randomize_mem((char *) AllocChunkGetPointer(chunk), size);
966 #endif
967 
968  AllocAllocInfo(set, chunk);
969 
970  /* Ensure any padding bytes are marked NOACCESS. */
971  VALGRIND_MAKE_MEM_NOACCESS((char *) AllocChunkGetPointer(chunk) + size,
972  chunk_size - size);
973 
974  /* Disallow external access to private part of chunk header. */
976 
977  return AllocChunkGetPointer(chunk);
978 }
979 
980 /*
981  * AllocSetFree
982  * Frees allocated memory; memory is removed from the set.
983  */
984 static void
985 AllocSetFree(MemoryContext context, void *pointer)
986 {
987  AllocSet set = (AllocSet) context;
988  AllocChunk chunk = AllocPointerGetChunk(pointer);
989 
990  /* Allow access to private part of chunk header. */
992 
993  AllocFreeInfo(set, chunk);
994 
995 #ifdef MEMORY_CONTEXT_CHECKING
996  /* Test for someone scribbling on unused space in chunk */
997  if (chunk->requested_size < chunk->size)
998  if (!sentinel_ok(pointer, chunk->requested_size))
999  elog(WARNING, "detected write past chunk end in %s %p",
1000  set->header.name, chunk);
1001 #endif
1002 
1003  if (chunk->size > set->allocChunkLimit)
1004  {
1005  /*
1006  * Big chunks are certain to have been allocated as single-chunk
1007  * blocks. Just unlink that block and return it to malloc().
1008  */
1009  AllocBlock block = (AllocBlock) (((char *) chunk) - ALLOC_BLOCKHDRSZ);
1010 
1011  /*
1012  * Try to verify that we have a sane block pointer: it should
1013  * reference the correct aset, and freeptr and endptr should point
1014  * just past the chunk.
1015  */
1016  if (block->aset != set ||
1017  block->freeptr != block->endptr ||
1018  block->freeptr != ((char *) block) +
1019  (chunk->size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
1020  elog(ERROR, "could not find block containing chunk %p", chunk);
1021 
1022  /* OK, remove block from aset's list and free it */
1023  if (block->prev)
1024  block->prev->next = block->next;
1025  else
1026  set->blocks = block->next;
1027  if (block->next)
1028  block->next->prev = block->prev;
1029 #ifdef CLOBBER_FREED_MEMORY
1030  wipe_mem(block, block->freeptr - ((char *) block));
1031 #endif
1032  free(block);
1033  }
1034  else
1035  {
1036  /* Normal case, put the chunk into appropriate freelist */
1037  int fidx = AllocSetFreeIndex(chunk->size);
1038 
1039  chunk->aset = (void *) set->freelist[fidx];
1040 
1041 #ifdef CLOBBER_FREED_MEMORY
1042  wipe_mem(pointer, chunk->size);
1043 #endif
1044 
1045 #ifdef MEMORY_CONTEXT_CHECKING
1046  /* Reset requested_size to 0 in chunks that are on freelist */
1047  chunk->requested_size = 0;
1048 #endif
1049  set->freelist[fidx] = chunk;
1050  }
1051 }
1052 
1053 /*
1054  * AllocSetRealloc
1055  * Returns new pointer to allocated memory of given size or NULL if
1056  * request could not be completed; this memory is added to the set.
1057  * Memory associated with given pointer is copied into the new memory,
1058  * and the old memory is freed.
1059  *
1060  * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size. This
1061  * makes our Valgrind client requests less-precise, hazarding false negatives.
1062  * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old
1063  * request size.)
1064  */
1065 static void *
1066 AllocSetRealloc(MemoryContext context, void *pointer, Size size)
1067 {
1068  AllocSet set = (AllocSet) context;
1069  AllocChunk chunk = AllocPointerGetChunk(pointer);
1070  Size oldsize;
1071 
1072  /* Allow access to private part of chunk header. */
1074 
1075  oldsize = chunk->size;
1076 
1077 #ifdef MEMORY_CONTEXT_CHECKING
1078  /* Test for someone scribbling on unused space in chunk */
1079  if (chunk->requested_size < oldsize)
1080  if (!sentinel_ok(pointer, chunk->requested_size))
1081  elog(WARNING, "detected write past chunk end in %s %p",
1082  set->header.name, chunk);
1083 #endif
1084 
1085  /*
1086  * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1087  * allocated area already is >= the new size. (In particular, we always
1088  * fall out here if the requested size is a decrease.)
1089  */
1090  if (oldsize >= size)
1091  {
1092 #ifdef MEMORY_CONTEXT_CHECKING
1093  Size oldrequest = chunk->requested_size;
1094 
1095 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1096  /* We can only fill the extra space if we know the prior request */
1097  if (size > oldrequest)
1098  randomize_mem((char *) pointer + oldrequest,
1099  size - oldrequest);
1100 #endif
1101 
1102  chunk->requested_size = size;
1103 
1104  /*
1105  * If this is an increase, mark any newly-available part UNDEFINED.
1106  * Otherwise, mark the obsolete part NOACCESS.
1107  */
1108  if (size > oldrequest)
1109  VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1110  size - oldrequest);
1111  else
1112  VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1113  oldsize - size);
1114 
1115  /* set mark to catch clobber of "unused" space */
1116  if (size < oldsize)
1117  set_sentinel(pointer, size);
1118 #else /* !MEMORY_CONTEXT_CHECKING */
1119 
1120  /*
1121  * We don't have the information to determine whether we're growing
1122  * the old request or shrinking it, so we conservatively mark the
1123  * entire new allocation DEFINED.
1124  */
1125  VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
1126  VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1127 #endif
1128 
1129  /* Disallow external access to private part of chunk header. */
1131 
1132  return pointer;
1133  }
1134 
1135  if (oldsize > set->allocChunkLimit)
1136  {
1137  /*
1138  * The chunk must have been allocated as a single-chunk block. Use
1139  * realloc() to make the containing block bigger with minimum space
1140  * wastage.
1141  */
1142  AllocBlock block = (AllocBlock) (((char *) chunk) - ALLOC_BLOCKHDRSZ);
1143  Size chksize;
1144  Size blksize;
1145 
1146  /*
1147  * Try to verify that we have a sane block pointer: it should
1148  * reference the correct aset, and freeptr and endptr should point
1149  * just past the chunk.
1150  */
1151  if (block->aset != set ||
1152  block->freeptr != block->endptr ||
1153  block->freeptr != ((char *) block) +
1154  (chunk->size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
1155  elog(ERROR, "could not find block containing chunk %p", chunk);
1156 
1157  /* Do the realloc */
1158  chksize = MAXALIGN(size);
1159  blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
1160  block = (AllocBlock) realloc(block, blksize);
1161  if (block == NULL)
1162  {
1163  /* Disallow external access to private part of chunk header. */
1165  return NULL;
1166  }
1167  block->freeptr = block->endptr = ((char *) block) + blksize;
1168 
1169  /* Update pointers since block has likely been moved */
1170  chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
1171  pointer = AllocChunkGetPointer(chunk);
1172  if (block->prev)
1173  block->prev->next = block;
1174  else
1175  set->blocks = block;
1176  if (block->next)
1177  block->next->prev = block;
1178  chunk->size = chksize;
1179 
1180 #ifdef MEMORY_CONTEXT_CHECKING
1181 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1182  /* We can only fill the extra space if we know the prior request */
1183  randomize_mem((char *) pointer + chunk->requested_size,
1184  size - chunk->requested_size);
1185 #endif
1186 
1187  /*
1188  * realloc() (or randomize_mem()) will have left the newly-allocated
1189  * part UNDEFINED, but we may need to adjust trailing bytes from the
1190  * old allocation.
1191  */
1192  VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1193  oldsize - chunk->requested_size);
1194 
1195  chunk->requested_size = size;
1196 
1197  /* set mark to catch clobber of "unused" space */
1198  if (size < chunk->size)
1199  set_sentinel(pointer, size);
1200 #else /* !MEMORY_CONTEXT_CHECKING */
1201 
1202  /*
1203  * We don't know how much of the old chunk size was the actual
1204  * allocation; it could have been as small as one byte. We have to be
1205  * conservative and just mark the entire old portion DEFINED.
1206  */
1207  VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1208 #endif
1209 
1210  /* Ensure any padding bytes are marked NOACCESS. */
1211  VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1212 
1213  /* Disallow external access to private part of chunk header. */
1215 
1216  return pointer;
1217  }
1218  else
1219  {
1220  /*
1221  * Small-chunk case. We just do this by brute force, ie, allocate a
1222  * new chunk and copy the data. Since we know the existing data isn't
1223  * huge, this won't involve any great memcpy expense, so it's not
1224  * worth being smarter. (At one time we tried to avoid memcpy when it
1225  * was possible to enlarge the chunk in-place, but that turns out to
1226  * misbehave unpleasantly for repeated cycles of
1227  * palloc/repalloc/pfree: the eventually freed chunks go into the
1228  * wrong freelist for the next initial palloc request, and so we leak
1229  * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1230  */
1231  AllocPointer newPointer;
1232 
1233  /* allocate new chunk */
1234  newPointer = AllocSetAlloc((MemoryContext) set, size);
1235 
1236  /* leave immediately if request was not completed */
1237  if (newPointer == NULL)
1238  {
1239  /* Disallow external access to private part of chunk header. */
1241  return NULL;
1242  }
1243 
1244  /*
1245  * AllocSetAlloc() may have returned a region that is still NOACCESS.
1246  * Change it to UNDEFINED for the moment; memcpy() will then transfer
1247  * definedness from the old allocation to the new. If we know the old
1248  * allocation, copy just that much. Otherwise, make the entire old
1249  * chunk defined to avoid errors as we copy the currently-NOACCESS
1250  * trailing bytes.
1251  */
1252  VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
1253 #ifdef MEMORY_CONTEXT_CHECKING
1254  oldsize = chunk->requested_size;
1255 #else
1256  VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1257 #endif
1258 
1259  /* transfer existing data (certain to fit) */
1260  memcpy(newPointer, pointer, oldsize);
1261 
1262  /* free old chunk */
1263  AllocSetFree((MemoryContext) set, pointer);
1264 
1265  return newPointer;
1266  }
1267 }
1268 
1269 /*
1270  * AllocSetGetChunkSpace
1271  * Given a currently-allocated chunk, determine the total space
1272  * it occupies (including all memory-allocation overhead).
1273  */
1274 static Size
1275 AllocSetGetChunkSpace(MemoryContext context, void *pointer)
1276 {
1277  AllocChunk chunk = AllocPointerGetChunk(pointer);
1278  Size result;
1279 
1281  result = chunk->size + ALLOC_CHUNKHDRSZ;
1283  return result;
1284 }
1285 
1286 /*
1287  * AllocSetIsEmpty
1288  * Is an allocset empty of any allocated space?
1289  */
1290 static bool
1292 {
1293  /*
1294  * For now, we say "empty" only if the context is new or just reset. We
1295  * could examine the freelists to determine if all space has been freed,
1296  * but it's not really worth the trouble for present uses of this
1297  * functionality.
1298  */
1299  if (context->isReset)
1300  return true;
1301  return false;
1302 }
1303 
1304 /*
1305  * AllocSetStats
1306  * Compute stats about memory consumption of an allocset.
1307  *
1308  * printfunc: if not NULL, pass a human-readable stats string to this.
1309  * passthru: pass this pointer through to printfunc.
1310  * totals: if not NULL, add stats about this context into *totals.
1311  */
1312 static void
1314  MemoryStatsPrintFunc printfunc, void *passthru,
1315  MemoryContextCounters *totals)
1316 {
1317  AllocSet set = (AllocSet) context;
1318  Size nblocks = 0;
1319  Size freechunks = 0;
1320  Size totalspace;
1321  Size freespace = 0;
1322  AllocBlock block;
1323  int fidx;
1324 
1325  /* Include context header in totalspace */
1326  totalspace = MAXALIGN(sizeof(AllocSetContext));
1327 
1328  for (block = set->blocks; block != NULL; block = block->next)
1329  {
1330  nblocks++;
1331  totalspace += block->endptr - ((char *) block);
1332  freespace += block->endptr - block->freeptr;
1333  }
1334  for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1335  {
1336  AllocChunk chunk;
1337 
1338  for (chunk = set->freelist[fidx]; chunk != NULL;
1339  chunk = (AllocChunk) chunk->aset)
1340  {
1341  freechunks++;
1342  freespace += chunk->size + ALLOC_CHUNKHDRSZ;
1343  }
1344  }
1345 
1346  if (printfunc)
1347  {
1348  char stats_string[200];
1349 
1350  snprintf(stats_string, sizeof(stats_string),
1351  "%zu total in %zd blocks; %zu free (%zd chunks); %zu used",
1352  totalspace, nblocks, freespace, freechunks,
1353  totalspace - freespace);
1354  printfunc(context, passthru, stats_string);
1355  }
1356 
1357  if (totals)
1358  {
1359  totals->nblocks += nblocks;
1360  totals->freechunks += freechunks;
1361  totals->totalspace += totalspace;
1362  totals->freespace += freespace;
1363  }
1364 }
1365 
1366 
1367 #ifdef MEMORY_CONTEXT_CHECKING
1368 
1369 /*
1370  * AllocSetCheck
1371  * Walk through chunks and check consistency of memory.
1372  *
1373  * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
1374  * find yourself in an infinite loop when trouble occurs, because this
1375  * routine will be entered again when elog cleanup tries to release memory!
1376  */
1377 static void
1378 AllocSetCheck(MemoryContext context)
1379 {
1380  AllocSet set = (AllocSet) context;
1381  const char *name = set->header.name;
1382  AllocBlock prevblock;
1383  AllocBlock block;
1384 
1385  for (prevblock = NULL, block = set->blocks;
1386  block != NULL;
1387  prevblock = block, block = block->next)
1388  {
1389  char *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
1390  long blk_used = block->freeptr - bpoz;
1391  long blk_data = 0;
1392  long nchunks = 0;
1393 
1394  /*
1395  * Empty block - empty can be keeper-block only
1396  */
1397  if (!blk_used)
1398  {
1399  if (set->keeper != block)
1400  elog(WARNING, "problem in alloc set %s: empty block %p",
1401  name, block);
1402  }
1403 
1404  /*
1405  * Check block header fields
1406  */
1407  if (block->aset != set ||
1408  block->prev != prevblock ||
1409  block->freeptr < bpoz ||
1410  block->freeptr > block->endptr)
1411  elog(WARNING, "problem in alloc set %s: corrupt header in block %p",
1412  name, block);
1413 
1414  /*
1415  * Chunk walker
1416  */
1417  while (bpoz < block->freeptr)
1418  {
1419  AllocChunk chunk = (AllocChunk) bpoz;
1420  Size chsize,
1421  dsize;
1422 
1423  /* Allow access to private part of chunk header. */
1425 
1426  chsize = chunk->size; /* aligned chunk size */
1427  dsize = chunk->requested_size; /* real data */
1428 
1429  /*
1430  * Check chunk size
1431  */
1432  if (dsize > chsize)
1433  elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
1434  name, chunk, block);
1435  if (chsize < (1 << ALLOC_MINBITS))
1436  elog(WARNING, "problem in alloc set %s: bad size %zu for chunk %p in block %p",
1437  name, chsize, chunk, block);
1438 
1439  /* single-chunk block? */
1440  if (chsize > set->allocChunkLimit &&
1441  chsize + ALLOC_CHUNKHDRSZ != blk_used)
1442  elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p",
1443  name, chunk, block);
1444 
1445  /*
1446  * If chunk is allocated, check for correct aset pointer. (If it's
1447  * free, the aset is the freelist pointer, which we can't check as
1448  * easily...) Note this is an incomplete test, since palloc(0)
1449  * produces an allocated chunk with requested_size == 0.
1450  */
1451  if (dsize > 0 && chunk->aset != (void *) set)
1452  elog(WARNING, "problem in alloc set %s: bogus aset link in block %p, chunk %p",
1453  name, block, chunk);
1454 
1455  /*
1456  * Check for overwrite of padding space in an allocated chunk.
1457  */
1458  if (chunk->aset == (void *) set && dsize < chsize &&
1459  !sentinel_ok(chunk, ALLOC_CHUNKHDRSZ + dsize))
1460  elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
1461  name, block, chunk);
1462 
1463  /*
1464  * If chunk is allocated, disallow external access to private part
1465  * of chunk header.
1466  */
1467  if (chunk->aset == (void *) set)
1469 
1470  blk_data += chsize;
1471  nchunks++;
1472 
1473  bpoz += ALLOC_CHUNKHDRSZ + chsize;
1474  }
1475 
1476  if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used)
1477  elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p",
1478  name, block);
1479  }
1480 }
1481 
1482 #endif /* MEMORY_CONTEXT_CHECKING */
#define MemSetAligned(start, val, len)
Definition: c.h:941
Size initBlockSize
Definition: aset.c:130
#define AllocFreeInfo(_cxt, _chunk)
Definition: aset.c:324
MemoryContext AllocSetContextCreateExtended(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition: aset.c:388
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
AllocBlock blocks
Definition: aset.c:127
static int32 next
Definition: blutils.c:211
void(* MemoryStatsPrintFunc)(MemoryContext context, void *passthru, const char *stats_string)
Definition: memnodes.h:54
struct AllocBlockData AllocBlockData
static Size AllocSetGetChunkSpace(MemoryContext context, void *pointer)
Definition: aset.c:1275
MemoryContextData header
Definition: aset.c:125
void * AllocPointer
Definition: aset.c:112
#define AllocSetIsValid(set)
Definition: aset.c:216
#define VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
Definition: memdebug.h:28
struct AllocSetFreeList AllocSetFreeList
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition: memdebug.h:27
static int AllocSetFreeIndex(Size size)
Definition: aset.c:337
static void AllocSetReset(MemoryContext context)
Definition: aset.c:563
int num_free
Definition: aset.c:250
#define AllocChunkGetPointer(chk)
Definition: aset.c:220
#define ALLOCCHUNK_PRIVATE_LEN
Definition: aset.c:204
#define LT16(n)
Definition: aset.c:303
int errcode(int sqlerrcode)
Definition: elog.c:575
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:264
int snprintf(char *str, size_t count, const char *fmt,...) pg_attribute_printf(3
#define ALLOC_BLOCKHDRSZ
Definition: aset.c:102
AllocBlock keeper
Definition: aset.c:134
AllocSet aset
Definition: aset.c:155
void MemoryContextResetOnly(MemoryContext context)
Definition: mcxt.c:155
char * freeptr
Definition: aset.c:158
#define ALLOCSET_DEFAULT_MINSIZE
Definition: memutils.h:189
#define ALLOCSET_SMALL_MINSIZE
Definition: memutils.h:199
AllocSetContext * first_free
Definition: aset.c:251
#define malloc(a)
Definition: header.h:50
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:795
static AllocSetFreeList context_freelists[2]
Definition: aset.c:255
#define ERROR
Definition: elog.h:43
#define ALLOC_CHUNK_LIMIT
Definition: aset.c:83
void MemoryContextStats(MemoryContext context)
Definition: mcxt.c:474
char * endptr
Definition: aset.c:159
#define ALLOCSET_NUM_FREELISTS
Definition: aset.c:82
static void * AllocSetRealloc(MemoryContext context, void *pointer, Size size)
Definition: aset.c:1066
static void AllocSetStats(MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals)
Definition: aset.c:1313
#define ALLOCSET_SEPARATE_THRESHOLD
Definition: memutils.h:219
int errdetail(const char *fmt,...)
Definition: elog.c:873
#define ALLOC_CHUNKHDRSZ
Definition: aset.c:103
AllocBlock next
Definition: aset.c:157
void MemoryContextCreate(MemoryContext node, NodeTag tag, const MemoryContextMethods *methods, MemoryContext parent, const char *name)
Definition: mcxt.c:724
#define ereport(elevel, rest)
Definition: elog.h:122
#define AssertArg(condition)
Definition: c.h:701
MemoryContext TopMemoryContext
Definition: mcxt.c:44
struct MemoryContextData * MemoryContext
Definition: palloc.h:36
static bool AllocSetIsEmpty(MemoryContext context)
Definition: aset.c:1291
#define WARNING
Definition: elog.h:40
#define ALLOCSET_SMALL_INITSIZE
Definition: memutils.h:200
int freeListIndex
Definition: aset.c:136
static const unsigned char LogTable256[256]
Definition: aset.c:305
#define ALLOCCHUNK_RAWSIZE
Definition: aset.c:185
#define ALLOC_CHUNK_FRACTION
Definition: aset.c:85
struct AllocBlockData * AllocBlock
Definition: aset.c:105
#define MAX_FREE_CONTEXTS
Definition: aset.c:246
AllocChunk freelist[ALLOCSET_NUM_FREELISTS]
Definition: aset.c:128
Size nextBlockSize
Definition: aset.c:132
AllocBlock prev
Definition: aset.c:156
#define free(a)
Definition: header.h:65
Size allocChunkLimit
Definition: aset.c:133
#define Max(x, y)
Definition: c.h:851
struct AllocChunkData * AllocChunk
Definition: aset.c:106
#define Assert(condition)
Definition: c.h:699
static void AllocSetFree(MemoryContext context, void *pointer)
Definition: aset.c:985
size_t Size
Definition: c.h:433
#define MAXALIGN(LEN)
Definition: c.h:652
struct AllocChunkData AllocChunkData
static void AllocSetDelete(MemoryContext context)
Definition: aset.c:625
#define realloc(a, b)
Definition: header.h:60
const char * name
Definition: encode.c:521
#define AllocHugeSizeIsValid(size)
Definition: memutils.h:46
void * aset
Definition: aset.c:194
int errmsg(const char *fmt,...)
Definition: elog.c:797
#define ALLOCSET_DEFAULT_INITSIZE
Definition: memutils.h:190
static void * AllocSetAlloc(MemoryContext context, Size size)
Definition: aset.c:712
static const MemoryContextMethods AllocSetMethods
Definition: aset.c:286
AllocSetContext * AllocSet
Definition: aset.c:139
struct AllocSetContext AllocSetContext
#define ALLOC_MINBITS
Definition: aset.c:81
#define elog
Definition: elog.h:219
Size maxBlockSize
Definition: aset.c:131
#define AllocAllocInfo(_cxt, _chunk)
Definition: aset.c:325
#define offsetof(type, field)
Definition: c.h:622
MemoryContext nextchild
Definition: memnodes.h:86
Size size
Definition: aset.c:177
#define AllocPointerGetChunk(ptr)
Definition: aset.c:218