PostgreSQL Source Code  git master
aset.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * aset.c
4  * Allocation set definitions.
5  *
6  * AllocSet is our standard implementation of the abstract MemoryContext
7  * type.
8  *
9  *
10  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
11  * Portions Copyright (c) 1994, Regents of the University of California
12  *
13  * IDENTIFICATION
14  * src/backend/utils/mmgr/aset.c
15  *
16  * NOTE:
17  * This is a new (Feb. 05, 1999) implementation of the allocation set
18  * routines. AllocSet...() does not use OrderedSet...() any more.
19  * Instead it manages allocations in a block pool by itself, combining
20  * many small allocations in a few bigger blocks. AllocSetFree() normally
21  * doesn't free() memory really. It just add's the free'd area to some
22  * list for later reuse by AllocSetAlloc(). All memory blocks are free()'d
23  * at once on AllocSetReset(), which happens when the memory context gets
24  * destroyed.
25  * Jan Wieck
26  *
27  * Performance improvement from Tom Lane, 8/99: for extremely large request
28  * sizes, we do want to be able to give the memory back to free() as soon
29  * as it is pfree()'d. Otherwise we risk tying up a lot of memory in
30  * freelist entries that might never be usable. This is specially needed
31  * when the caller is repeatedly repalloc()'ing a block bigger and bigger;
32  * the previous instances of the block were guaranteed to be wasted until
33  * AllocSetReset() under the old way.
34  *
35  * Further improvement 12/00: as the code stood, request sizes in the
36  * midrange between "small" and "large" were handled very inefficiently,
37  * because any sufficiently large free chunk would be used to satisfy a
38  * request, even if it was much larger than necessary. This led to more
39  * and more wasted space in allocated chunks over time. To fix, get rid
40  * of the midrange behavior: we now handle only "small" power-of-2-size
41  * chunks as chunks. Anything "large" is passed off to malloc(). Change
42  * the number of freelists to change the small/large boundary.
43  *
44  *-------------------------------------------------------------------------
45  */
46 
47 #include "postgres.h"
48 
49 #include "port/pg_bitutils.h"
50 #include "utils/memdebug.h"
51 #include "utils/memutils.h"
52 
53 /*--------------------
54  * Chunk freelist k holds chunks of size 1 << (k + ALLOC_MINBITS),
55  * for k = 0 .. ALLOCSET_NUM_FREELISTS-1.
56  *
57  * Note that all chunks in the freelists have power-of-2 sizes. This
58  * improves recyclability: we may waste some space, but the wasted space
59  * should stay pretty constant as requests are made and released.
60  *
61  * A request too large for the last freelist is handled by allocating a
62  * dedicated block from malloc(). The block still has a block header and
63  * chunk header, but when the chunk is freed we'll return the whole block
64  * to malloc(), not put it on our freelists.
65  *
66  * CAUTION: ALLOC_MINBITS must be large enough so that
67  * 1<<ALLOC_MINBITS is at least MAXALIGN,
68  * or we may fail to align the smallest chunks adequately.
69  * 8-byte alignment is enough on all currently known machines.
70  *
71  * With the current parameters, request sizes up to 8K are treated as chunks,
72  * larger requests go into dedicated blocks. Change ALLOCSET_NUM_FREELISTS
73  * to adjust the boundary point; and adjust ALLOCSET_SEPARATE_THRESHOLD in
74  * memutils.h to agree. (Note: in contexts with small maxBlockSize, we may
75  * set the allocChunkLimit to less than 8K, so as to avoid space wastage.)
76  *--------------------
77  */
78 
79 #define ALLOC_MINBITS 3 /* smallest chunk size is 8 bytes */
80 #define ALLOCSET_NUM_FREELISTS 11
81 #define ALLOC_CHUNK_LIMIT (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
82 /* Size of largest chunk that we use a fixed size for */
83 #define ALLOC_CHUNK_FRACTION 4
84 /* We allow chunks to be at most 1/4 of maxBlockSize (less overhead) */
85 
86 /*--------------------
87  * The first block allocated for an allocset has size initBlockSize.
88  * Each time we have to allocate another block, we double the block size
89  * (if possible, and without exceeding maxBlockSize), so as to reduce
90  * the bookkeeping load on malloc().
91  *
92  * Blocks allocated to hold oversize chunks do not follow this rule, however;
93  * they are just however big they need to be to hold that single chunk.
94  *
95  * Also, if a minContextSize is specified, the first block has that size,
96  * and then initBlockSize is used for the next one.
97  *--------------------
98  */
99 
100 #define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData))
101 #define ALLOC_CHUNKHDRSZ sizeof(struct AllocChunkData)
102 
103 typedef struct AllocBlockData *AllocBlock; /* forward reference */
104 typedef struct AllocChunkData *AllocChunk;
105 
106 /*
107  * AllocPointer
108  * Aligned pointer which may be a member of an allocation set.
109  */
110 typedef void *AllocPointer;
111 
112 /*
113  * AllocSetContext is our standard implementation of MemoryContext.
114  *
115  * Note: header.isReset means there is nothing for AllocSetReset to do.
116  * This is different from the aset being physically empty (empty blocks list)
117  * because we will still have a keeper block. It's also different from the set
118  * being logically empty, because we don't attempt to detect pfree'ing the
119  * last active chunk.
120  */
121 typedef struct AllocSetContext
122 {
123  MemoryContextData header; /* Standard memory-context fields */
124  /* Info about storage allocated in this context: */
125  AllocBlock blocks; /* head of list of blocks in this set */
126  AllocChunk freelist[ALLOCSET_NUM_FREELISTS]; /* free chunk lists */
127  /* Allocation parameters for this context: */
128  Size initBlockSize; /* initial block size */
129  Size maxBlockSize; /* maximum block size */
130  Size nextBlockSize; /* next block size to allocate */
131  Size allocChunkLimit; /* effective chunk size limit */
132  AllocBlock keeper; /* keep this block over resets */
133  /* freelist this context could be put in, or -1 if not a candidate: */
134  int freeListIndex; /* index in context_freelists[], or -1 */
136 
138 
139 /*
140  * AllocBlock
141  * An AllocBlock is the unit of memory that is obtained by aset.c
142  * from malloc(). It contains one or more AllocChunks, which are
143  * the units requested by palloc() and freed by pfree(). AllocChunks
144  * cannot be returned to malloc() individually, instead they are put
145  * on freelists by pfree() and re-used by the next palloc() that has
146  * a matching request size.
147  *
148  * AllocBlockData is the header data for a block --- the usable space
149  * within the block begins at the next alignment boundary.
150  */
151 typedef struct AllocBlockData
152 {
153  AllocSet aset; /* aset that owns this block */
154  AllocBlock prev; /* prev block in aset's blocks list, if any */
155  AllocBlock next; /* next block in aset's blocks list, if any */
156  char *freeptr; /* start of free space in this block */
157  char *endptr; /* end of space in this block */
159 
160 /*
161  * AllocChunk
162  * The prefix of each piece of memory in an AllocBlock
163  *
164  * Note: to meet the memory context APIs, the payload area of the chunk must
165  * be maxaligned, and the "aset" link must be immediately adjacent to the
166  * payload area (cf. GetMemoryChunkContext). We simplify matters for this
167  * module by requiring sizeof(AllocChunkData) to be maxaligned, and then
168  * we can ensure things work by adding any required alignment padding before
169  * the "aset" field. There is a static assertion below that the alignment
170  * is done correctly.
171  */
172 typedef struct AllocChunkData
173 {
174  /* size is always the size of the usable space in the chunk */
176 #ifdef MEMORY_CONTEXT_CHECKING
177  /* when debugging memory usage, also store actual requested size */
178  /* this is zero in a free chunk */
179  Size requested_size;
180 
181 #define ALLOCCHUNK_RAWSIZE (SIZEOF_SIZE_T * 2 + SIZEOF_VOID_P)
182 #else
183 #define ALLOCCHUNK_RAWSIZE (SIZEOF_SIZE_T + SIZEOF_VOID_P)
184 #endif /* MEMORY_CONTEXT_CHECKING */
185 
186  /* ensure proper alignment by adding padding if needed */
187 #if (ALLOCCHUNK_RAWSIZE % MAXIMUM_ALIGNOF) != 0
188  char padding[MAXIMUM_ALIGNOF - ALLOCCHUNK_RAWSIZE % MAXIMUM_ALIGNOF];
189 #endif
190 
191  /* aset is the owning aset if allocated, or the freelist link if free */
192  void *aset;
193  /* there must not be any padding to reach a MAXALIGN boundary here! */
195 
196 /*
197  * Only the "aset" field should be accessed outside this module.
198  * We keep the rest of an allocated chunk's header marked NOACCESS when using
199  * valgrind. But note that chunk headers that are in a freelist are kept
200  * accessible, for simplicity.
201  */
202 #define ALLOCCHUNK_PRIVATE_LEN offsetof(AllocChunkData, aset)
203 
204 /*
205  * AllocPointerIsValid
206  * True iff pointer is valid allocation pointer.
207  */
208 #define AllocPointerIsValid(pointer) PointerIsValid(pointer)
209 
210 /*
211  * AllocSetIsValid
212  * True iff set is valid allocation set.
213  */
214 #define AllocSetIsValid(set) PointerIsValid(set)
215 
216 #define AllocPointerGetChunk(ptr) \
217  ((AllocChunk)(((char *)(ptr)) - ALLOC_CHUNKHDRSZ))
218 #define AllocChunkGetPointer(chk) \
219  ((AllocPointer)(((char *)(chk)) + ALLOC_CHUNKHDRSZ))
220 
221 /*
222  * Rather than repeatedly creating and deleting memory contexts, we keep some
223  * freed contexts in freelists so that we can hand them out again with little
224  * work. Before putting a context in a freelist, we reset it so that it has
225  * only its initial malloc chunk and no others. To be a candidate for a
226  * freelist, a context must have the same minContextSize/initBlockSize as
227  * other contexts in the list; but its maxBlockSize is irrelevant since that
228  * doesn't affect the size of the initial chunk.
229  *
230  * We currently provide one freelist for ALLOCSET_DEFAULT_SIZES contexts
231  * and one for ALLOCSET_SMALL_SIZES contexts; the latter works for
232  * ALLOCSET_START_SMALL_SIZES too, since only the maxBlockSize differs.
233  *
234  * Ordinarily, we re-use freelist contexts in last-in-first-out order, in
235  * hopes of improving locality of reference. But if there get to be too
236  * many contexts in the list, we'd prefer to drop the most-recently-created
237  * contexts in hopes of keeping the process memory map compact.
238  * We approximate that by simply deleting all existing entries when the list
239  * overflows, on the assumption that queries that allocate a lot of contexts
240  * will probably free them in more or less reverse order of allocation.
241  *
242  * Contexts in a freelist are chained via their nextchild pointers.
243  */
244 #define MAX_FREE_CONTEXTS 100 /* arbitrary limit on freelist length */
245 
246 typedef struct AllocSetFreeList
247 {
248  int num_free; /* current list length */
249  AllocSetContext *first_free; /* list header */
251 
252 /* context_freelists[0] is for default params, [1] for small params */
254 {
255  {
256  0, NULL
257  },
258  {
259  0, NULL
260  }
261 };
262 
263 /*
264  * These functions implement the MemoryContext API for AllocSet contexts.
265  */
266 static void *AllocSetAlloc(MemoryContext context, Size size);
267 static void AllocSetFree(MemoryContext context, void *pointer);
268 static void *AllocSetRealloc(MemoryContext context, void *pointer, Size size);
269 static void AllocSetReset(MemoryContext context);
270 static void AllocSetDelete(MemoryContext context);
271 static Size AllocSetGetChunkSpace(MemoryContext context, void *pointer);
272 static bool AllocSetIsEmpty(MemoryContext context);
273 static void AllocSetStats(MemoryContext context,
274  MemoryStatsPrintFunc printfunc, void *passthru,
275  MemoryContextCounters *totals);
276 
277 #ifdef MEMORY_CONTEXT_CHECKING
278 static void AllocSetCheck(MemoryContext context);
279 #endif
280 
281 /*
282  * This is the virtual function table for AllocSet contexts.
283  */
286  AllocSetFree,
293 #ifdef MEMORY_CONTEXT_CHECKING
294  ,AllocSetCheck
295 #endif
296 };
297 
298 
299 /* ----------
300  * AllocSetFreeIndex -
301  *
302  * Depending on the size of an allocation compute which freechunk
303  * list of the alloc set it belongs to. Caller must have verified
304  * that size <= ALLOC_CHUNK_LIMIT.
305  * ----------
306  */
307 static inline int
309 {
310  int idx;
311 
312  if (size > (1 << ALLOC_MINBITS))
313  {
314  /*----------
315  * At this point we must compute ceil(log2(size >> ALLOC_MINBITS)).
316  * This is the same as
317  * pg_leftmost_one_pos32((size - 1) >> ALLOC_MINBITS) + 1
318  * or equivalently
319  * pg_leftmost_one_pos32(size - 1) - ALLOC_MINBITS + 1
320  *
321  * However, rather than just calling that function, we duplicate the
322  * logic here, allowing an additional optimization. It's reasonable
323  * to assume that ALLOC_CHUNK_LIMIT fits in 16 bits, so we can unroll
324  * the byte-at-a-time loop in pg_leftmost_one_pos32 and just handle
325  * the last two bytes.
326  *
327  * Yes, this function is enough of a hot-spot to make it worth this
328  * much trouble.
329  *----------
330  */
331 #ifdef HAVE__BUILTIN_CLZ
332  idx = 31 - __builtin_clz((uint32) size - 1) - ALLOC_MINBITS + 1;
333 #else
334  uint32 t,
335  tsize;
336 
337  /* Statically assert that we only have a 16-bit input value. */
339  "ALLOC_CHUNK_LIMIT must be less than 64kB");
340 
341  tsize = size - 1;
342  t = tsize >> 8;
343  idx = t ? pg_leftmost_one_pos[t] + 8 : pg_leftmost_one_pos[tsize];
344  idx -= ALLOC_MINBITS - 1;
345 #endif
346 
348  }
349  else
350  idx = 0;
351 
352  return idx;
353 }
354 
355 
356 /*
357  * Public routines
358  */
359 
360 
361 /*
362  * AllocSetContextCreateInternal
363  * Create a new AllocSet context.
364  *
365  * parent: parent context, or NULL if top-level context
366  * name: name of context (must be statically allocated)
367  * minContextSize: minimum context size
368  * initBlockSize: initial allocation block size
369  * maxBlockSize: maximum allocation block size
370  *
371  * Most callers should abstract the context size parameters using a macro
372  * such as ALLOCSET_DEFAULT_SIZES.
373  *
374  * Note: don't call this directly; go through the wrapper macro
375  * AllocSetContextCreate.
376  */
379  const char *name,
380  Size minContextSize,
383 {
384  int freeListIndex;
385  Size firstBlockSize;
386  AllocSet set;
387  AllocBlock block;
388 
389  /* Assert we padded AllocChunkData properly */
391  "sizeof(AllocChunkData) is not maxaligned");
394  "padding calculation in AllocChunkData is wrong");
395 
396  /*
397  * First, validate allocation parameters. Once these were regular runtime
398  * test and elog's, but in practice Asserts seem sufficient because nobody
399  * varies their parameters at runtime. We somewhat arbitrarily enforce a
400  * minimum 1K block size.
401  */
402  Assert(initBlockSize == MAXALIGN(initBlockSize) &&
403  initBlockSize >= 1024);
404  Assert(maxBlockSize == MAXALIGN(maxBlockSize) &&
405  maxBlockSize >= initBlockSize &&
406  AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
407  Assert(minContextSize == 0 ||
408  (minContextSize == MAXALIGN(minContextSize) &&
409  minContextSize >= 1024 &&
410  minContextSize <= maxBlockSize));
411 
412  /*
413  * Check whether the parameters match either available freelist. We do
414  * not need to demand a match of maxBlockSize.
415  */
416  if (minContextSize == ALLOCSET_DEFAULT_MINSIZE &&
417  initBlockSize == ALLOCSET_DEFAULT_INITSIZE)
418  freeListIndex = 0;
419  else if (minContextSize == ALLOCSET_SMALL_MINSIZE &&
420  initBlockSize == ALLOCSET_SMALL_INITSIZE)
421  freeListIndex = 1;
422  else
423  freeListIndex = -1;
424 
425  /*
426  * If a suitable freelist entry exists, just recycle that context.
427  */
428  if (freeListIndex >= 0)
429  {
430  AllocSetFreeList *freelist = &context_freelists[freeListIndex];
431 
432  if (freelist->first_free != NULL)
433  {
434  /* Remove entry from freelist */
435  set = freelist->first_free;
436  freelist->first_free = (AllocSet) set->header.nextchild;
437  freelist->num_free--;
438 
439  /* Update its maxBlockSize; everything else should be OK */
440  set->maxBlockSize = maxBlockSize;
441 
442  /* Reinitialize its header, installing correct name and parent */
446  parent,
447  name);
448 
449  ((MemoryContext) set)->mem_allocated =
450  set->keeper->endptr - ((char *) set);
451 
452  return (MemoryContext) set;
453  }
454  }
455 
456  /* Determine size of initial block */
457  firstBlockSize = MAXALIGN(sizeof(AllocSetContext)) +
459  if (minContextSize != 0)
460  firstBlockSize = Max(firstBlockSize, minContextSize);
461  else
462  firstBlockSize = Max(firstBlockSize, initBlockSize);
463 
464  /*
465  * Allocate the initial block. Unlike other aset.c blocks, it starts with
466  * the context header and its block header follows that.
467  */
468  set = (AllocSet) malloc(firstBlockSize);
469  if (set == NULL)
470  {
471  if (TopMemoryContext)
473  ereport(ERROR,
474  (errcode(ERRCODE_OUT_OF_MEMORY),
475  errmsg("out of memory"),
476  errdetail("Failed while creating memory context \"%s\".",
477  name)));
478  }
479 
480  /*
481  * Avoid writing code that can fail between here and MemoryContextCreate;
482  * we'd leak the header/initial block if we ereport in this stretch.
483  */
484 
485  /* Fill in the initial block's block header */
486  block = (AllocBlock) (((char *) set) + MAXALIGN(sizeof(AllocSetContext)));
487  block->aset = set;
488  block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
489  block->endptr = ((char *) set) + firstBlockSize;
490  block->prev = NULL;
491  block->next = NULL;
492 
493  /* Mark unallocated space NOACCESS; leave the block header alone. */
494  VALGRIND_MAKE_MEM_NOACCESS(block->freeptr, block->endptr - block->freeptr);
495 
496  /* Remember block as part of block list */
497  set->blocks = block;
498  /* Mark block as not to be released at reset time */
499  set->keeper = block;
500 
501  /* Finish filling in aset-specific parts of the context header */
502  MemSetAligned(set->freelist, 0, sizeof(set->freelist));
503 
504  set->initBlockSize = initBlockSize;
505  set->maxBlockSize = maxBlockSize;
506  set->nextBlockSize = initBlockSize;
507  set->freeListIndex = freeListIndex;
508 
509  /*
510  * Compute the allocation chunk size limit for this context. It can't be
511  * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
512  * If maxBlockSize is small then requests exceeding the maxBlockSize, or
513  * even a significant fraction of it, should be treated as large chunks
514  * too. For the typical case of maxBlockSize a power of 2, the chunk size
515  * limit will be at most 1/8th maxBlockSize, so that given a stream of
516  * requests that are all the maximum chunk size we will waste at most
517  * 1/8th of the allocated space.
518  *
519  * We have to have allocChunkLimit a power of two, because the requested
520  * and actually-allocated sizes of any chunk must be on the same side of
521  * the limit, else we get confused about whether the chunk is "big".
522  *
523  * Also, allocChunkLimit must not exceed ALLOCSET_SEPARATE_THRESHOLD.
524  */
526  "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD");
527 
528  set->allocChunkLimit = ALLOC_CHUNK_LIMIT;
529  while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
530  (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
531  set->allocChunkLimit >>= 1;
532 
533  /* Finally, do the type-independent part of context creation */
536  &AllocSetMethods,
537  parent,
538  name);
539 
540  ((MemoryContext) set)->mem_allocated = firstBlockSize;
541 
542  return (MemoryContext) set;
543 }
544 
545 /*
546  * AllocSetReset
547  * Frees all memory which is allocated in the given set.
548  *
549  * Actually, this routine has some discretion about what to do.
550  * It should mark all allocated chunks freed, but it need not necessarily
551  * give back all the resources the set owns. Our actual implementation is
552  * that we give back all but the "keeper" block (which we must keep, since
553  * it shares a malloc chunk with the context header). In this way, we don't
554  * thrash malloc() when a context is repeatedly reset after small allocations,
555  * which is typical behavior for per-tuple contexts.
556  */
557 static void
559 {
560  AllocSet set = (AllocSet) context;
561  AllocBlock block;
562  Size keepersize PG_USED_FOR_ASSERTS_ONLY
563  = set->keeper->endptr - ((char *) set);
564 
566 
567 #ifdef MEMORY_CONTEXT_CHECKING
568  /* Check for corruption and leaks before freeing */
569  AllocSetCheck(context);
570 #endif
571 
572  /* Clear chunk freelists */
573  MemSetAligned(set->freelist, 0, sizeof(set->freelist));
574 
575  block = set->blocks;
576 
577  /* New blocks list will be just the keeper block */
578  set->blocks = set->keeper;
579 
580  while (block != NULL)
581  {
582  AllocBlock next = block->next;
583 
584  if (block == set->keeper)
585  {
586  /* Reset the block, but don't return it to malloc */
587  char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
588 
589 #ifdef CLOBBER_FREED_MEMORY
590  wipe_mem(datastart, block->freeptr - datastart);
591 #else
592  /* wipe_mem() would have done this */
593  VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
594 #endif
595  block->freeptr = datastart;
596  block->prev = NULL;
597  block->next = NULL;
598  }
599  else
600  {
601  /* Normal case, release the block */
602  context->mem_allocated -= block->endptr - ((char *) block);
603 
604 #ifdef CLOBBER_FREED_MEMORY
605  wipe_mem(block, block->freeptr - ((char *) block));
606 #endif
607  free(block);
608  }
609  block = next;
610  }
611 
612  Assert(context->mem_allocated == keepersize);
613 
614  /* Reset block size allocation sequence, too */
615  set->nextBlockSize = set->initBlockSize;
616 }
617 
618 /*
619  * AllocSetDelete
620  * Frees all memory which is allocated in the given set,
621  * in preparation for deletion of the set.
622  *
623  * Unlike AllocSetReset, this *must* free all resources of the set.
624  */
625 static void
627 {
628  AllocSet set = (AllocSet) context;
629  AllocBlock block = set->blocks;
630  Size keepersize PG_USED_FOR_ASSERTS_ONLY
631  = set->keeper->endptr - ((char *) set);
632 
634 
635 #ifdef MEMORY_CONTEXT_CHECKING
636  /* Check for corruption and leaks before freeing */
637  AllocSetCheck(context);
638 #endif
639 
640  /*
641  * If the context is a candidate for a freelist, put it into that freelist
642  * instead of destroying it.
643  */
644  if (set->freeListIndex >= 0)
645  {
646  AllocSetFreeList *freelist = &context_freelists[set->freeListIndex];
647 
648  /*
649  * Reset the context, if it needs it, so that we aren't hanging on to
650  * more than the initial malloc chunk.
651  */
652  if (!context->isReset)
653  MemoryContextResetOnly(context);
654 
655  /*
656  * If the freelist is full, just discard what's already in it. See
657  * comments with context_freelists[].
658  */
659  if (freelist->num_free >= MAX_FREE_CONTEXTS)
660  {
661  while (freelist->first_free != NULL)
662  {
663  AllocSetContext *oldset = freelist->first_free;
664 
665  freelist->first_free = (AllocSetContext *) oldset->header.nextchild;
666  freelist->num_free--;
667 
668  /* All that remains is to free the header/initial block */
669  free(oldset);
670  }
671  Assert(freelist->num_free == 0);
672  }
673 
674  /* Now add the just-deleted context to the freelist. */
675  set->header.nextchild = (MemoryContext) freelist->first_free;
676  freelist->first_free = set;
677  freelist->num_free++;
678 
679  return;
680  }
681 
682  /* Free all blocks, except the keeper which is part of context header */
683  while (block != NULL)
684  {
685  AllocBlock next = block->next;
686 
687  if (block != set->keeper)
688  context->mem_allocated -= block->endptr - ((char *) block);
689 
690 #ifdef CLOBBER_FREED_MEMORY
691  wipe_mem(block, block->freeptr - ((char *) block));
692 #endif
693 
694  if (block != set->keeper)
695  free(block);
696 
697  block = next;
698  }
699 
700  Assert(context->mem_allocated == keepersize);
701 
702  /* Finally, free the context header, including the keeper block */
703  free(set);
704 }
705 
706 /*
707  * AllocSetAlloc
708  * Returns pointer to allocated memory of given size or NULL if
709  * request could not be completed; memory is added to the set.
710  *
711  * No request may exceed:
712  * MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
713  * All callers use a much-lower limit.
714  *
715  * Note: when using valgrind, it doesn't matter how the returned allocation
716  * is marked, as mcxt.c will set it to UNDEFINED. In some paths we will
717  * return space that is marked NOACCESS - AllocSetRealloc has to beware!
718  */
719 static void *
721 {
722  AllocSet set = (AllocSet) context;
723  AllocBlock block;
724  AllocChunk chunk;
725  int fidx;
726  Size chunk_size;
727  Size blksize;
728 
730 
731  /*
732  * If requested size exceeds maximum for chunks, allocate an entire block
733  * for this request.
734  */
735  if (size > set->allocChunkLimit)
736  {
737  chunk_size = MAXALIGN(size);
738  blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
739  block = (AllocBlock) malloc(blksize);
740  if (block == NULL)
741  return NULL;
742 
743  context->mem_allocated += blksize;
744 
745  block->aset = set;
746  block->freeptr = block->endptr = ((char *) block) + blksize;
747 
748  chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
749  chunk->aset = set;
750  chunk->size = chunk_size;
751 #ifdef MEMORY_CONTEXT_CHECKING
752  chunk->requested_size = size;
753  /* set mark to catch clobber of "unused" space */
754  if (size < chunk_size)
755  set_sentinel(AllocChunkGetPointer(chunk), size);
756 #endif
757 #ifdef RANDOMIZE_ALLOCATED_MEMORY
758  /* fill the allocated space with junk */
759  randomize_mem((char *) AllocChunkGetPointer(chunk), size);
760 #endif
761 
762  /*
763  * Stick the new block underneath the active allocation block, if any,
764  * so that we don't lose the use of the space remaining therein.
765  */
766  if (set->blocks != NULL)
767  {
768  block->prev = set->blocks;
769  block->next = set->blocks->next;
770  if (block->next)
771  block->next->prev = block;
772  set->blocks->next = block;
773  }
774  else
775  {
776  block->prev = NULL;
777  block->next = NULL;
778  set->blocks = block;
779  }
780 
781  /* Ensure any padding bytes are marked NOACCESS. */
782  VALGRIND_MAKE_MEM_NOACCESS((char *) AllocChunkGetPointer(chunk) + size,
783  chunk_size - size);
784 
785  /* Disallow external access to private part of chunk header. */
787 
788  return AllocChunkGetPointer(chunk);
789  }
790 
791  /*
792  * Request is small enough to be treated as a chunk. Look in the
793  * corresponding free list to see if there is a free chunk we could reuse.
794  * If one is found, remove it from the free list, make it again a member
795  * of the alloc set and return its data address.
796  */
797  fidx = AllocSetFreeIndex(size);
798  chunk = set->freelist[fidx];
799  if (chunk != NULL)
800  {
801  Assert(chunk->size >= size);
802 
803  set->freelist[fidx] = (AllocChunk) chunk->aset;
804 
805  chunk->aset = (void *) set;
806 
807 #ifdef MEMORY_CONTEXT_CHECKING
808  chunk->requested_size = size;
809  /* set mark to catch clobber of "unused" space */
810  if (size < chunk->size)
811  set_sentinel(AllocChunkGetPointer(chunk), size);
812 #endif
813 #ifdef RANDOMIZE_ALLOCATED_MEMORY
814  /* fill the allocated space with junk */
815  randomize_mem((char *) AllocChunkGetPointer(chunk), size);
816 #endif
817 
818  /* Ensure any padding bytes are marked NOACCESS. */
819  VALGRIND_MAKE_MEM_NOACCESS((char *) AllocChunkGetPointer(chunk) + size,
820  chunk->size - size);
821 
822  /* Disallow external access to private part of chunk header. */
824 
825  return AllocChunkGetPointer(chunk);
826  }
827 
828  /*
829  * Choose the actual chunk size to allocate.
830  */
831  chunk_size = (1 << ALLOC_MINBITS) << fidx;
832  Assert(chunk_size >= size);
833 
834  /*
835  * If there is enough room in the active allocation block, we will put the
836  * chunk into that block. Else must start a new one.
837  */
838  if ((block = set->blocks) != NULL)
839  {
840  Size availspace = block->endptr - block->freeptr;
841 
842  if (availspace < (chunk_size + ALLOC_CHUNKHDRSZ))
843  {
844  /*
845  * The existing active (top) block does not have enough room for
846  * the requested allocation, but it might still have a useful
847  * amount of space in it. Once we push it down in the block list,
848  * we'll never try to allocate more space from it. So, before we
849  * do that, carve up its free space into chunks that we can put on
850  * the set's freelists.
851  *
852  * Because we can only get here when there's less than
853  * ALLOC_CHUNK_LIMIT left in the block, this loop cannot iterate
854  * more than ALLOCSET_NUM_FREELISTS-1 times.
855  */
856  while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
857  {
858  Size availchunk = availspace - ALLOC_CHUNKHDRSZ;
859  int a_fidx = AllocSetFreeIndex(availchunk);
860 
861  /*
862  * In most cases, we'll get back the index of the next larger
863  * freelist than the one we need to put this chunk on. The
864  * exception is when availchunk is exactly a power of 2.
865  */
866  if (availchunk != ((Size) 1 << (a_fidx + ALLOC_MINBITS)))
867  {
868  a_fidx--;
869  Assert(a_fidx >= 0);
870  availchunk = ((Size) 1 << (a_fidx + ALLOC_MINBITS));
871  }
872 
873  chunk = (AllocChunk) (block->freeptr);
874 
875  /* Prepare to initialize the chunk header. */
876  VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
877 
878  block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
879  availspace -= (availchunk + ALLOC_CHUNKHDRSZ);
880 
881  chunk->size = availchunk;
882 #ifdef MEMORY_CONTEXT_CHECKING
883  chunk->requested_size = 0; /* mark it free */
884 #endif
885  chunk->aset = (void *) set->freelist[a_fidx];
886  set->freelist[a_fidx] = chunk;
887  }
888 
889  /* Mark that we need to create a new block */
890  block = NULL;
891  }
892  }
893 
894  /*
895  * Time to create a new regular (multi-chunk) block?
896  */
897  if (block == NULL)
898  {
899  Size required_size;
900 
901  /*
902  * The first such block has size initBlockSize, and we double the
903  * space in each succeeding block, but not more than maxBlockSize.
904  */
905  blksize = set->nextBlockSize;
906  set->nextBlockSize <<= 1;
907  if (set->nextBlockSize > set->maxBlockSize)
908  set->nextBlockSize = set->maxBlockSize;
909 
910  /*
911  * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
912  * space... but try to keep it a power of 2.
913  */
914  required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
915  while (blksize < required_size)
916  blksize <<= 1;
917 
918  /* Try to allocate it */
919  block = (AllocBlock) malloc(blksize);
920 
921  /*
922  * We could be asking for pretty big blocks here, so cope if malloc
923  * fails. But give up if there's less than 1 MB or so available...
924  */
925  while (block == NULL && blksize > 1024 * 1024)
926  {
927  blksize >>= 1;
928  if (blksize < required_size)
929  break;
930  block = (AllocBlock) malloc(blksize);
931  }
932 
933  if (block == NULL)
934  return NULL;
935 
936  context->mem_allocated += blksize;
937 
938  block->aset = set;
939  block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
940  block->endptr = ((char *) block) + blksize;
941 
942  /* Mark unallocated space NOACCESS. */
944  blksize - ALLOC_BLOCKHDRSZ);
945 
946  block->prev = NULL;
947  block->next = set->blocks;
948  if (block->next)
949  block->next->prev = block;
950  set->blocks = block;
951  }
952 
953  /*
954  * OK, do the allocation
955  */
956  chunk = (AllocChunk) (block->freeptr);
957 
958  /* Prepare to initialize the chunk header. */
960 
961  block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
962  Assert(block->freeptr <= block->endptr);
963 
964  chunk->aset = (void *) set;
965  chunk->size = chunk_size;
966 #ifdef MEMORY_CONTEXT_CHECKING
967  chunk->requested_size = size;
968  /* set mark to catch clobber of "unused" space */
969  if (size < chunk->size)
970  set_sentinel(AllocChunkGetPointer(chunk), size);
971 #endif
972 #ifdef RANDOMIZE_ALLOCATED_MEMORY
973  /* fill the allocated space with junk */
974  randomize_mem((char *) AllocChunkGetPointer(chunk), size);
975 #endif
976 
977  /* Ensure any padding bytes are marked NOACCESS. */
978  VALGRIND_MAKE_MEM_NOACCESS((char *) AllocChunkGetPointer(chunk) + size,
979  chunk_size - size);
980 
981  /* Disallow external access to private part of chunk header. */
983 
984  return AllocChunkGetPointer(chunk);
985 }
986 
987 /*
988  * AllocSetFree
989  * Frees allocated memory; memory is removed from the set.
990  */
991 static void
992 AllocSetFree(MemoryContext context, void *pointer)
993 {
994  AllocSet set = (AllocSet) context;
995  AllocChunk chunk = AllocPointerGetChunk(pointer);
996 
997  /* Allow access to private part of chunk header. */
999 
1000 #ifdef MEMORY_CONTEXT_CHECKING
1001  /* Test for someone scribbling on unused space in chunk */
1002  if (chunk->requested_size < chunk->size)
1003  if (!sentinel_ok(pointer, chunk->requested_size))
1004  elog(WARNING, "detected write past chunk end in %s %p",
1005  set->header.name, chunk);
1006 #endif
1007 
1008  if (chunk->size > set->allocChunkLimit)
1009  {
1010  /*
1011  * Big chunks are certain to have been allocated as single-chunk
1012  * blocks. Just unlink that block and return it to malloc().
1013  */
1014  AllocBlock block = (AllocBlock) (((char *) chunk) - ALLOC_BLOCKHDRSZ);
1015 
1016  /*
1017  * Try to verify that we have a sane block pointer: it should
1018  * reference the correct aset, and freeptr and endptr should point
1019  * just past the chunk.
1020  */
1021  if (block->aset != set ||
1022  block->freeptr != block->endptr ||
1023  block->freeptr != ((char *) block) +
1024  (chunk->size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
1025  elog(ERROR, "could not find block containing chunk %p", chunk);
1026 
1027  /* OK, remove block from aset's list and free it */
1028  if (block->prev)
1029  block->prev->next = block->next;
1030  else
1031  set->blocks = block->next;
1032  if (block->next)
1033  block->next->prev = block->prev;
1034 
1035  context->mem_allocated -= block->endptr - ((char *) block);
1036 
1037 #ifdef CLOBBER_FREED_MEMORY
1038  wipe_mem(block, block->freeptr - ((char *) block));
1039 #endif
1040  free(block);
1041  }
1042  else
1043  {
1044  /* Normal case, put the chunk into appropriate freelist */
1045  int fidx = AllocSetFreeIndex(chunk->size);
1046 
1047  chunk->aset = (void *) set->freelist[fidx];
1048 
1049 #ifdef CLOBBER_FREED_MEMORY
1050  wipe_mem(pointer, chunk->size);
1051 #endif
1052 
1053 #ifdef MEMORY_CONTEXT_CHECKING
1054  /* Reset requested_size to 0 in chunks that are on freelist */
1055  chunk->requested_size = 0;
1056 #endif
1057  set->freelist[fidx] = chunk;
1058  }
1059 }
1060 
1061 /*
1062  * AllocSetRealloc
1063  * Returns new pointer to allocated memory of given size or NULL if
1064  * request could not be completed; this memory is added to the set.
1065  * Memory associated with given pointer is copied into the new memory,
1066  * and the old memory is freed.
1067  *
1068  * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size. This
1069  * makes our Valgrind client requests less-precise, hazarding false negatives.
1070  * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old
1071  * request size.)
1072  */
1073 static void *
1074 AllocSetRealloc(MemoryContext context, void *pointer, Size size)
1075 {
1076  AllocSet set = (AllocSet) context;
1077  AllocChunk chunk = AllocPointerGetChunk(pointer);
1078  Size oldsize;
1079 
1080  /* Allow access to private part of chunk header. */
1082 
1083  oldsize = chunk->size;
1084 
1085 #ifdef MEMORY_CONTEXT_CHECKING
1086  /* Test for someone scribbling on unused space in chunk */
1087  if (chunk->requested_size < oldsize)
1088  if (!sentinel_ok(pointer, chunk->requested_size))
1089  elog(WARNING, "detected write past chunk end in %s %p",
1090  set->header.name, chunk);
1091 #endif
1092 
1093  if (oldsize > set->allocChunkLimit)
1094  {
1095  /*
1096  * The chunk must have been allocated as a single-chunk block. Use
1097  * realloc() to make the containing block bigger, or smaller, with
1098  * minimum space wastage.
1099  */
1100  AllocBlock block = (AllocBlock) (((char *) chunk) - ALLOC_BLOCKHDRSZ);
1101  Size chksize;
1102  Size blksize;
1103  Size oldblksize;
1104 
1105  /*
1106  * Try to verify that we have a sane block pointer: it should
1107  * reference the correct aset, and freeptr and endptr should point
1108  * just past the chunk.
1109  */
1110  if (block->aset != set ||
1111  block->freeptr != block->endptr ||
1112  block->freeptr != ((char *) block) +
1113  (oldsize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
1114  elog(ERROR, "could not find block containing chunk %p", chunk);
1115 
1116  /*
1117  * Even if the new request is less than set->allocChunkLimit, we stick
1118  * with the single-chunk block approach. Therefore we need
1119  * chunk->size to be bigger than set->allocChunkLimit, so we don't get
1120  * confused about the chunk's status in future calls.
1121  */
1122  chksize = Max(size, set->allocChunkLimit + 1);
1123  chksize = MAXALIGN(chksize);
1124 
1125  /* Do the realloc */
1126  blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
1127  oldblksize = block->endptr - ((char *) block);
1128 
1129  block = (AllocBlock) realloc(block, blksize);
1130  if (block == NULL)
1131  {
1132  /* Disallow external access to private part of chunk header. */
1134  return NULL;
1135  }
1136 
1137  /* updated separately, not to underflow when (oldblksize > blksize) */
1138  context->mem_allocated -= oldblksize;
1139  context->mem_allocated += blksize;
1140 
1141  block->freeptr = block->endptr = ((char *) block) + blksize;
1142 
1143  /* Update pointers since block has likely been moved */
1144  chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
1145  pointer = AllocChunkGetPointer(chunk);
1146  if (block->prev)
1147  block->prev->next = block;
1148  else
1149  set->blocks = block;
1150  if (block->next)
1151  block->next->prev = block;
1152  chunk->size = chksize;
1153 
1154 #ifdef MEMORY_CONTEXT_CHECKING
1155 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1156  /* We can only fill the extra space if we know the prior request */
1157  if (size > chunk->requested_size)
1158  randomize_mem((char *) pointer + chunk->requested_size,
1159  size - chunk->requested_size);
1160 #endif
1161 
1162  /*
1163  * realloc() (or randomize_mem()) will have left any newly-allocated
1164  * part UNDEFINED, but we may need to adjust trailing bytes from the
1165  * old allocation.
1166  */
1167 #ifdef USE_VALGRIND
1168  if (oldsize > chunk->requested_size)
1169  VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1170  oldsize - chunk->requested_size);
1171 #endif
1172 
1173  chunk->requested_size = size;
1174 
1175  /* set mark to catch clobber of "unused" space */
1176  if (size < chunk->size)
1177  set_sentinel(pointer, size);
1178 #else /* !MEMORY_CONTEXT_CHECKING */
1179 
1180  /*
1181  * We don't know how much of the old chunk size was the actual
1182  * allocation; it could have been as small as one byte. We have to be
1183  * conservative and just mark the entire old portion DEFINED.
1184  */
1185  VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1186 #endif
1187 
1188  /* Ensure any padding bytes are marked NOACCESS. */
1189  VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1190 
1191  /* Disallow external access to private part of chunk header. */
1193 
1194  return pointer;
1195  }
1196 
1197  /*
1198  * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1199  * allocated area already is >= the new size. (In particular, we will
1200  * fall out here if the requested size is a decrease.)
1201  */
1202  else if (oldsize >= size)
1203  {
1204 #ifdef MEMORY_CONTEXT_CHECKING
1205  Size oldrequest = chunk->requested_size;
1206 
1207 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1208  /* We can only fill the extra space if we know the prior request */
1209  if (size > oldrequest)
1210  randomize_mem((char *) pointer + oldrequest,
1211  size - oldrequest);
1212 #endif
1213 
1214  chunk->requested_size = size;
1215 
1216  /*
1217  * If this is an increase, mark any newly-available part UNDEFINED.
1218  * Otherwise, mark the obsolete part NOACCESS.
1219  */
1220  if (size > oldrequest)
1221  VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1222  size - oldrequest);
1223  else
1224  VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1225  oldsize - size);
1226 
1227  /* set mark to catch clobber of "unused" space */
1228  if (size < oldsize)
1229  set_sentinel(pointer, size);
1230 #else /* !MEMORY_CONTEXT_CHECKING */
1231 
1232  /*
1233  * We don't have the information to determine whether we're growing
1234  * the old request or shrinking it, so we conservatively mark the
1235  * entire new allocation DEFINED.
1236  */
1237  VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
1238  VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1239 #endif
1240 
1241  /* Disallow external access to private part of chunk header. */
1243 
1244  return pointer;
1245  }
1246  else
1247  {
1248  /*
1249  * Enlarge-a-small-chunk case. We just do this by brute force, ie,
1250  * allocate a new chunk and copy the data. Since we know the existing
1251  * data isn't huge, this won't involve any great memcpy expense, so
1252  * it's not worth being smarter. (At one time we tried to avoid
1253  * memcpy when it was possible to enlarge the chunk in-place, but that
1254  * turns out to misbehave unpleasantly for repeated cycles of
1255  * palloc/repalloc/pfree: the eventually freed chunks go into the
1256  * wrong freelist for the next initial palloc request, and so we leak
1257  * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1258  */
1259  AllocPointer newPointer;
1260 
1261  /* allocate new chunk */
1262  newPointer = AllocSetAlloc((MemoryContext) set, size);
1263 
1264  /* leave immediately if request was not completed */
1265  if (newPointer == NULL)
1266  {
1267  /* Disallow external access to private part of chunk header. */
1269  return NULL;
1270  }
1271 
1272  /*
1273  * AllocSetAlloc() may have returned a region that is still NOACCESS.
1274  * Change it to UNDEFINED for the moment; memcpy() will then transfer
1275  * definedness from the old allocation to the new. If we know the old
1276  * allocation, copy just that much. Otherwise, make the entire old
1277  * chunk defined to avoid errors as we copy the currently-NOACCESS
1278  * trailing bytes.
1279  */
1280  VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
1281 #ifdef MEMORY_CONTEXT_CHECKING
1282  oldsize = chunk->requested_size;
1283 #else
1284  VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1285 #endif
1286 
1287  /* transfer existing data (certain to fit) */
1288  memcpy(newPointer, pointer, oldsize);
1289 
1290  /* free old chunk */
1291  AllocSetFree((MemoryContext) set, pointer);
1292 
1293  return newPointer;
1294  }
1295 }
1296 
1297 /*
1298  * AllocSetGetChunkSpace
1299  * Given a currently-allocated chunk, determine the total space
1300  * it occupies (including all memory-allocation overhead).
1301  */
1302 static Size
1303 AllocSetGetChunkSpace(MemoryContext context, void *pointer)
1304 {
1305  AllocChunk chunk = AllocPointerGetChunk(pointer);
1306  Size result;
1307 
1309  result = chunk->size + ALLOC_CHUNKHDRSZ;
1311  return result;
1312 }
1313 
1314 /*
1315  * AllocSetIsEmpty
1316  * Is an allocset empty of any allocated space?
1317  */
1318 static bool
1320 {
1321  /*
1322  * For now, we say "empty" only if the context is new or just reset. We
1323  * could examine the freelists to determine if all space has been freed,
1324  * but it's not really worth the trouble for present uses of this
1325  * functionality.
1326  */
1327  if (context->isReset)
1328  return true;
1329  return false;
1330 }
1331 
1332 /*
1333  * AllocSetStats
1334  * Compute stats about memory consumption of an allocset.
1335  *
1336  * printfunc: if not NULL, pass a human-readable stats string to this.
1337  * passthru: pass this pointer through to printfunc.
1338  * totals: if not NULL, add stats about this context into *totals.
1339  */
1340 static void
1342  MemoryStatsPrintFunc printfunc, void *passthru,
1343  MemoryContextCounters *totals)
1344 {
1345  AllocSet set = (AllocSet) context;
1346  Size nblocks = 0;
1347  Size freechunks = 0;
1348  Size totalspace;
1349  Size freespace = 0;
1350  AllocBlock block;
1351  int fidx;
1352 
1353  /* Include context header in totalspace */
1354  totalspace = MAXALIGN(sizeof(AllocSetContext));
1355 
1356  for (block = set->blocks; block != NULL; block = block->next)
1357  {
1358  nblocks++;
1359  totalspace += block->endptr - ((char *) block);
1360  freespace += block->endptr - block->freeptr;
1361  }
1362  for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1363  {
1364  AllocChunk chunk;
1365 
1366  for (chunk = set->freelist[fidx]; chunk != NULL;
1367  chunk = (AllocChunk) chunk->aset)
1368  {
1369  freechunks++;
1370  freespace += chunk->size + ALLOC_CHUNKHDRSZ;
1371  }
1372  }
1373 
1374  if (printfunc)
1375  {
1376  char stats_string[200];
1377 
1378  snprintf(stats_string, sizeof(stats_string),
1379  "%zu total in %zd blocks; %zu free (%zd chunks); %zu used",
1380  totalspace, nblocks, freespace, freechunks,
1381  totalspace - freespace);
1382  printfunc(context, passthru, stats_string);
1383  }
1384 
1385  if (totals)
1386  {
1387  totals->nblocks += nblocks;
1388  totals->freechunks += freechunks;
1389  totals->totalspace += totalspace;
1390  totals->freespace += freespace;
1391  }
1392 }
1393 
1394 
1395 #ifdef MEMORY_CONTEXT_CHECKING
1396 
1397 /*
1398  * AllocSetCheck
1399  * Walk through chunks and check consistency of memory.
1400  *
1401  * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
1402  * find yourself in an infinite loop when trouble occurs, because this
1403  * routine will be entered again when elog cleanup tries to release memory!
1404  */
1405 static void
1406 AllocSetCheck(MemoryContext context)
1407 {
1408  AllocSet set = (AllocSet) context;
1409  const char *name = set->header.name;
1410  AllocBlock prevblock;
1411  AllocBlock block;
1412  Size total_allocated = 0;
1413 
1414  for (prevblock = NULL, block = set->blocks;
1415  block != NULL;
1416  prevblock = block, block = block->next)
1417  {
1418  char *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
1419  long blk_used = block->freeptr - bpoz;
1420  long blk_data = 0;
1421  long nchunks = 0;
1422 
1423  if (set->keeper == block)
1424  total_allocated += block->endptr - ((char *) set);
1425  else
1426  total_allocated += block->endptr - ((char *) block);
1427 
1428  /*
1429  * Empty block - empty can be keeper-block only
1430  */
1431  if (!blk_used)
1432  {
1433  if (set->keeper != block)
1434  elog(WARNING, "problem in alloc set %s: empty block %p",
1435  name, block);
1436  }
1437 
1438  /*
1439  * Check block header fields
1440  */
1441  if (block->aset != set ||
1442  block->prev != prevblock ||
1443  block->freeptr < bpoz ||
1444  block->freeptr > block->endptr)
1445  elog(WARNING, "problem in alloc set %s: corrupt header in block %p",
1446  name, block);
1447 
1448  /*
1449  * Chunk walker
1450  */
1451  while (bpoz < block->freeptr)
1452  {
1453  AllocChunk chunk = (AllocChunk) bpoz;
1454  Size chsize,
1455  dsize;
1456 
1457  /* Allow access to private part of chunk header. */
1459 
1460  chsize = chunk->size; /* aligned chunk size */
1461  dsize = chunk->requested_size; /* real data */
1462 
1463  /*
1464  * Check chunk size
1465  */
1466  if (dsize > chsize)
1467  elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
1468  name, chunk, block);
1469  if (chsize < (1 << ALLOC_MINBITS))
1470  elog(WARNING, "problem in alloc set %s: bad size %zu for chunk %p in block %p",
1471  name, chsize, chunk, block);
1472 
1473  /* single-chunk block? */
1474  if (chsize > set->allocChunkLimit &&
1475  chsize + ALLOC_CHUNKHDRSZ != blk_used)
1476  elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p",
1477  name, chunk, block);
1478 
1479  /*
1480  * If chunk is allocated, check for correct aset pointer. (If it's
1481  * free, the aset is the freelist pointer, which we can't check as
1482  * easily...) Note this is an incomplete test, since palloc(0)
1483  * produces an allocated chunk with requested_size == 0.
1484  */
1485  if (dsize > 0 && chunk->aset != (void *) set)
1486  elog(WARNING, "problem in alloc set %s: bogus aset link in block %p, chunk %p",
1487  name, block, chunk);
1488 
1489  /*
1490  * Check for overwrite of padding space in an allocated chunk.
1491  */
1492  if (chunk->aset == (void *) set && dsize < chsize &&
1493  !sentinel_ok(chunk, ALLOC_CHUNKHDRSZ + dsize))
1494  elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
1495  name, block, chunk);
1496 
1497  /*
1498  * If chunk is allocated, disallow external access to private part
1499  * of chunk header.
1500  */
1501  if (chunk->aset == (void *) set)
1503 
1504  blk_data += chsize;
1505  nchunks++;
1506 
1507  bpoz += ALLOC_CHUNKHDRSZ + chsize;
1508  }
1509 
1510  if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used)
1511  elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p",
1512  name, block);
1513  }
1514 
1515  Assert(total_allocated == context->mem_allocated);
1516 }
1517 
1518 #endif /* MEMORY_CONTEXT_CHECKING */
#define MemSetAligned(start, val, len)
Definition: c.h:1004
Size initBlockSize
Definition: aset.c:128
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
AllocBlock blocks
Definition: aset.c:125
static int32 next
Definition: blutils.c:218
void(* MemoryStatsPrintFunc)(MemoryContext context, void *passthru, const char *stats_string)
Definition: memnodes.h:54
struct AllocBlockData AllocBlockData
static Size AllocSetGetChunkSpace(MemoryContext context, void *pointer)
Definition: aset.c:1303
MemoryContextData header
Definition: aset.c:123
void * AllocPointer
Definition: aset.c:110
#define AllocSetIsValid(set)
Definition: aset.c:214
#define VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
Definition: memdebug.h:28
struct AllocSetFreeList AllocSetFreeList
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition: memdebug.h:27
static int AllocSetFreeIndex(Size size)
Definition: aset.c:308
static void AllocSetReset(MemoryContext context)
Definition: aset.c:558
int num_free
Definition: aset.c:248
#define AllocChunkGetPointer(chk)
Definition: aset.c:218
#define ALLOCCHUNK_PRIVATE_LEN
Definition: aset.c:202
int errcode(int sqlerrcode)
Definition: elog.c:610
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:259
#define ALLOC_BLOCKHDRSZ
Definition: aset.c:100
AllocBlock keeper
Definition: aset.c:132
AllocSet aset
Definition: aset.c:153
void MemoryContextResetOnly(MemoryContext context)
Definition: mcxt.c:155
char * freeptr
Definition: aset.c:156
#define ALLOCSET_DEFAULT_MINSIZE
Definition: memutils.h:189
#define ALLOCSET_SMALL_MINSIZE
Definition: memutils.h:199
AllocSetContext * first_free
Definition: aset.c:249
#define malloc(a)
Definition: header.h:50
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:852
static AllocSetFreeList context_freelists[2]
Definition: aset.c:253
#define ERROR
Definition: elog.h:43
#define ALLOC_CHUNK_LIMIT
Definition: aset.c:81
void MemoryContextStats(MemoryContext context)
Definition: mcxt.c:498
PGDLLIMPORT const uint8 pg_leftmost_one_pos[256]
Definition: pg_bitutils.c:34
char * endptr
Definition: aset.c:157
#define ALLOCSET_NUM_FREELISTS
Definition: aset.c:80
static void * AllocSetRealloc(MemoryContext context, void *pointer, Size size)
Definition: aset.c:1074
static void AllocSetStats(MemoryContext context, MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals)
Definition: aset.c:1341
#define ALLOCSET_SEPARATE_THRESHOLD
Definition: memutils.h:219
int errdetail(const char *fmt,...)
Definition: elog.c:957
#define ALLOC_CHUNKHDRSZ
Definition: aset.c:101
unsigned int uint32
Definition: c.h:367
AllocBlock next
Definition: aset.c:155
void MemoryContextCreate(MemoryContext node, NodeTag tag, const MemoryContextMethods *methods, MemoryContext parent, const char *name)
Definition: mcxt.c:748
#define AssertArg(condition)
Definition: c.h:740
MemoryContext TopMemoryContext
Definition: mcxt.c:44
struct MemoryContextData * MemoryContext
Definition: palloc.h:36
MemoryContext AllocSetContextCreateInternal(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition: aset.c:378
static bool AllocSetIsEmpty(MemoryContext context)
Definition: aset.c:1319
#define WARNING
Definition: elog.h:40
#define ALLOCSET_SMALL_INITSIZE
Definition: memutils.h:200
int freeListIndex
Definition: aset.c:134
#define ALLOCCHUNK_RAWSIZE
Definition: aset.c:183
#define ALLOC_CHUNK_FRACTION
Definition: aset.c:83
struct AllocBlockData * AllocBlock
Definition: aset.c:103
#define MAX_FREE_CONTEXTS
Definition: aset.c:244
AllocChunk freelist[ALLOCSET_NUM_FREELISTS]
Definition: aset.c:126
Size nextBlockSize
Definition: aset.c:130
#define ereport(elevel,...)
Definition: elog.h:144
AllocBlock prev
Definition: aset.c:154
#define free(a)
Definition: header.h:65
Size allocChunkLimit
Definition: aset.c:131
#define Max(x, y)
Definition: c.h:914
struct AllocChunkData * AllocChunk
Definition: aset.c:104
#define Assert(condition)
Definition: c.h:738
static void AllocSetFree(MemoryContext context, void *pointer)
Definition: aset.c:992
size_t Size
Definition: c.h:466
#define MAXALIGN(LEN)
Definition: c.h:691
struct AllocChunkData AllocChunkData
static void AllocSetDelete(MemoryContext context)
Definition: aset.c:626
#define realloc(a, b)
Definition: header.h:60
const char * name
Definition: encode.c:555
Size mem_allocated
Definition: memnodes.h:82
#define AllocHugeSizeIsValid(size)
Definition: memutils.h:46
void * aset
Definition: aset.c:192
int errmsg(const char *fmt,...)
Definition: elog.c:824
#define elog(elevel,...)
Definition: elog.h:214
#define ALLOCSET_DEFAULT_INITSIZE
Definition: memutils.h:190
static void * AllocSetAlloc(MemoryContext context, Size size)
Definition: aset.c:720
static const MemoryContextMethods AllocSetMethods
Definition: aset.c:284
AllocSetContext * AllocSet
Definition: aset.c:137
struct AllocSetContext AllocSetContext
#define ALLOC_MINBITS
Definition: aset.c:79
Size maxBlockSize
Definition: aset.c:129
#define snprintf
Definition: port.h:193
#define offsetof(type, field)
Definition: c.h:661
MemoryContext nextchild
Definition: memnodes.h:87
Size size
Definition: aset.c:175
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:121
#define AllocPointerGetChunk(ptr)
Definition: aset.c:216