PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
aset.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * aset.c
4  * Allocation set definitions.
5  *
6  * AllocSet is our standard implementation of the abstract MemoryContext
7  * type.
8  *
9  *
10  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
11  * Portions Copyright (c) 1994, Regents of the University of California
12  *
13  * IDENTIFICATION
14  * src/backend/utils/mmgr/aset.c
15  *
16  * NOTE:
17  * This is a new (Feb. 05, 1999) implementation of the allocation set
18  * routines. AllocSet...() does not use OrderedSet...() any more.
19  * Instead it manages allocations in a block pool by itself, combining
20  * many small allocations in a few bigger blocks. AllocSetFree() normally
21  * doesn't free() memory really. It just add's the free'd area to some
22  * list for later reuse by AllocSetAlloc(). All memory blocks are free()'d
23  * at once on AllocSetReset(), which happens when the memory context gets
24  * destroyed.
25  * Jan Wieck
26  *
27  * Performance improvement from Tom Lane, 8/99: for extremely large request
28  * sizes, we do want to be able to give the memory back to free() as soon
29  * as it is pfree()'d. Otherwise we risk tying up a lot of memory in
30  * freelist entries that might never be usable. This is specially needed
31  * when the caller is repeatedly repalloc()'ing a block bigger and bigger;
32  * the previous instances of the block were guaranteed to be wasted until
33  * AllocSetReset() under the old way.
34  *
35  * Further improvement 12/00: as the code stood, request sizes in the
36  * midrange between "small" and "large" were handled very inefficiently,
37  * because any sufficiently large free chunk would be used to satisfy a
38  * request, even if it was much larger than necessary. This led to more
39  * and more wasted space in allocated chunks over time. To fix, get rid
40  * of the midrange behavior: we now handle only "small" power-of-2-size
41  * chunks as chunks. Anything "large" is passed off to malloc(). Change
42  * the number of freelists to change the small/large boundary.
43  *
44  *
45  * About CLOBBER_FREED_MEMORY:
46  *
47  * If this symbol is defined, all freed memory is overwritten with 0x7F's.
48  * This is useful for catching places that reference already-freed memory.
49  *
50  * About MEMORY_CONTEXT_CHECKING:
51  *
52  * Since we usually round request sizes up to the next power of 2, there
53  * is often some unused space immediately after a requested data area.
54  * Thus, if someone makes the common error of writing past what they've
55  * requested, the problem is likely to go unnoticed ... until the day when
56  * there *isn't* any wasted space, perhaps because of different memory
57  * alignment on a new platform, or some other effect. To catch this sort
58  * of problem, the MEMORY_CONTEXT_CHECKING option stores 0x7E just beyond
59  * the requested space whenever the request is less than the actual chunk
60  * size, and verifies that the byte is undamaged when the chunk is freed.
61  *
62  *
63  * About USE_VALGRIND and Valgrind client requests:
64  *
65  * Valgrind provides "client request" macros that exchange information with
66  * the host Valgrind (if any). Under !USE_VALGRIND, memdebug.h stubs out
67  * currently-used macros.
68  *
69  * When running under Valgrind, we want a NOACCESS memory region both before
70  * and after the allocation. The chunk header is tempting as the preceding
71  * region, but mcxt.c expects to able to examine the standard chunk header
72  * fields. Therefore, we use, when available, the requested_size field and
73  * any subsequent padding. requested_size is made NOACCESS before returning
74  * a chunk pointer to a caller. However, to reduce client request traffic,
75  * it is kept DEFINED in chunks on the free list.
76  *
77  * The rounded-up capacity of the chunk usually acts as a post-allocation
78  * NOACCESS region. If the request consumes precisely the entire chunk,
79  * there is no such region; another chunk header may immediately follow. In
80  * that case, Valgrind will not detect access beyond the end of the chunk.
81  *
82  * See also the cooperating Valgrind client requests in mcxt.c.
83  *
84  *-------------------------------------------------------------------------
85  */
86 
87 #include "postgres.h"
88 
89 #include "utils/memdebug.h"
90 #include "utils/memutils.h"
91 
92 /* Define this to detail debug alloc information */
93 /* #define HAVE_ALLOCINFO */
94 
95 /*--------------------
96  * Chunk freelist k holds chunks of size 1 << (k + ALLOC_MINBITS),
97  * for k = 0 .. ALLOCSET_NUM_FREELISTS-1.
98  *
99  * Note that all chunks in the freelists have power-of-2 sizes. This
100  * improves recyclability: we may waste some space, but the wasted space
101  * should stay pretty constant as requests are made and released.
102  *
103  * A request too large for the last freelist is handled by allocating a
104  * dedicated block from malloc(). The block still has a block header and
105  * chunk header, but when the chunk is freed we'll return the whole block
106  * to malloc(), not put it on our freelists.
107  *
108  * CAUTION: ALLOC_MINBITS must be large enough so that
109  * 1<<ALLOC_MINBITS is at least MAXALIGN,
110  * or we may fail to align the smallest chunks adequately.
111  * 8-byte alignment is enough on all currently known machines.
112  *
113  * With the current parameters, request sizes up to 8K are treated as chunks,
114  * larger requests go into dedicated blocks. Change ALLOCSET_NUM_FREELISTS
115  * to adjust the boundary point; and adjust ALLOCSET_SEPARATE_THRESHOLD in
116  * memutils.h to agree. (Note: in contexts with small maxBlockSize, we may
117  * set the allocChunkLimit to less than 8K, so as to avoid space wastage.)
118  *--------------------
119  */
120 
121 #define ALLOC_MINBITS 3 /* smallest chunk size is 8 bytes */
122 #define ALLOCSET_NUM_FREELISTS 11
123 #define ALLOC_CHUNK_LIMIT (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
124 /* Size of largest chunk that we use a fixed size for */
125 #define ALLOC_CHUNK_FRACTION 4
126 /* We allow chunks to be at most 1/4 of maxBlockSize (less overhead) */
127 
128 /*--------------------
129  * The first block allocated for an allocset has size initBlockSize.
130  * Each time we have to allocate another block, we double the block size
131  * (if possible, and without exceeding maxBlockSize), so as to reduce
132  * the bookkeeping load on malloc().
133  *
134  * Blocks allocated to hold oversize chunks do not follow this rule, however;
135  * they are just however big they need to be to hold that single chunk.
136  *--------------------
137  */
138 
139 #define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData))
140 #define ALLOC_CHUNKHDRSZ MAXALIGN(sizeof(AllocChunkData))
141 
142 /* Portion of ALLOC_CHUNKHDRSZ examined outside aset.c. */
143 #define ALLOC_CHUNK_PUBLIC \
144  (offsetof(AllocChunkData, size) + sizeof(Size))
145 
146 /* Portion of ALLOC_CHUNKHDRSZ excluding trailing padding. */
147 #ifdef MEMORY_CONTEXT_CHECKING
148 #define ALLOC_CHUNK_USED \
149  (offsetof(AllocChunkData, requested_size) + sizeof(Size))
150 #else
151 #define ALLOC_CHUNK_USED \
152  (offsetof(AllocChunkData, size) + sizeof(Size))
153 #endif
154 
155 typedef struct AllocBlockData *AllocBlock; /* forward reference */
156 typedef struct AllocChunkData *AllocChunk;
157 
158 /*
159  * AllocPointer
160  * Aligned pointer which may be a member of an allocation set.
161  */
162 typedef void *AllocPointer;
163 
164 /*
165  * AllocSetContext is our standard implementation of MemoryContext.
166  *
167  * Note: header.isReset means there is nothing for AllocSetReset to do.
168  * This is different from the aset being physically empty (empty blocks list)
169  * because we may still have a keeper block. It's also different from the set
170  * being logically empty, because we don't attempt to detect pfree'ing the
171  * last active chunk.
172  */
173 typedef struct AllocSetContext
174 {
175  MemoryContextData header; /* Standard memory-context fields */
176  /* Info about storage allocated in this context: */
177  AllocBlock blocks; /* head of list of blocks in this set */
178  AllocChunk freelist[ALLOCSET_NUM_FREELISTS]; /* free chunk lists */
179  /* Allocation parameters for this context: */
180  Size initBlockSize; /* initial block size */
181  Size maxBlockSize; /* maximum block size */
182  Size nextBlockSize; /* next block size to allocate */
183  Size allocChunkLimit; /* effective chunk size limit */
184  AllocBlock keeper; /* if not NULL, keep this block over resets */
186 
188 
189 /*
190  * AllocBlock
191  * An AllocBlock is the unit of memory that is obtained by aset.c
192  * from malloc(). It contains one or more AllocChunks, which are
193  * the units requested by palloc() and freed by pfree(). AllocChunks
194  * cannot be returned to malloc() individually, instead they are put
195  * on freelists by pfree() and re-used by the next palloc() that has
196  * a matching request size.
197  *
198  * AllocBlockData is the header data for a block --- the usable space
199  * within the block begins at the next alignment boundary.
200  */
201 typedef struct AllocBlockData
202 {
203  AllocSet aset; /* aset that owns this block */
204  AllocBlock next; /* next block in aset's blocks list */
205  char *freeptr; /* start of free space in this block */
206  char *endptr; /* end of space in this block */
208 
209 /*
210  * AllocChunk
211  * The prefix of each piece of memory in an AllocBlock
212  *
213  * NB: this MUST match StandardChunkHeader as defined by utils/memutils.h.
214  */
215 typedef struct AllocChunkData
216 {
217  /* aset is the owning aset if allocated, or the freelist link if free */
218  void *aset;
219  /* size is always the size of the usable space in the chunk */
221 #ifdef MEMORY_CONTEXT_CHECKING
222  /* when debugging memory usage, also store actual requested size */
223  /* this is zero in a free chunk */
224  Size requested_size;
225 #endif
227 
228 /*
229  * AllocPointerIsValid
230  * True iff pointer is valid allocation pointer.
231  */
232 #define AllocPointerIsValid(pointer) PointerIsValid(pointer)
233 
234 /*
235  * AllocSetIsValid
236  * True iff set is valid allocation set.
237  */
238 #define AllocSetIsValid(set) PointerIsValid(set)
239 
240 #define AllocPointerGetChunk(ptr) \
241  ((AllocChunk)(((char *)(ptr)) - ALLOC_CHUNKHDRSZ))
242 #define AllocChunkGetPointer(chk) \
243  ((AllocPointer)(((char *)(chk)) + ALLOC_CHUNKHDRSZ))
244 
245 /*
246  * These functions implement the MemoryContext API for AllocSet contexts.
247  */
248 static void *AllocSetAlloc(MemoryContext context, Size size);
249 static void AllocSetFree(MemoryContext context, void *pointer);
250 static void *AllocSetRealloc(MemoryContext context, void *pointer, Size size);
251 static void AllocSetInit(MemoryContext context);
252 static void AllocSetReset(MemoryContext context);
253 static void AllocSetDelete(MemoryContext context);
254 static Size AllocSetGetChunkSpace(MemoryContext context, void *pointer);
255 static bool AllocSetIsEmpty(MemoryContext context);
256 static void AllocSetStats(MemoryContext context, int level, bool print,
257  MemoryContextCounters *totals);
258 
259 #ifdef MEMORY_CONTEXT_CHECKING
260 static void AllocSetCheck(MemoryContext context);
261 #endif
262 
263 /*
264  * This is the virtual function table for AllocSet contexts.
265  */
268  AllocSetFree,
270  AllocSetInit,
276 #ifdef MEMORY_CONTEXT_CHECKING
277  ,AllocSetCheck
278 #endif
279 };
280 
281 /*
282  * Table for AllocSetFreeIndex
283  */
284 #define LT16(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n
285 
286 static const unsigned char LogTable256[256] =
287 {
288  0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
289  LT16(5), LT16(6), LT16(6), LT16(7), LT16(7), LT16(7), LT16(7),
290  LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8)
291 };
292 
293 /* ----------
294  * Debug macros
295  * ----------
296  */
297 #ifdef HAVE_ALLOCINFO
298 #define AllocFreeInfo(_cxt, _chunk) \
299  fprintf(stderr, "AllocFree: %s: %p, %d\n", \
300  (_cxt)->header.name, (_chunk), (_chunk)->size)
301 #define AllocAllocInfo(_cxt, _chunk) \
302  fprintf(stderr, "AllocAlloc: %s: %p, %d\n", \
303  (_cxt)->header.name, (_chunk), (_chunk)->size)
304 #else
305 #define AllocFreeInfo(_cxt, _chunk)
306 #define AllocAllocInfo(_cxt, _chunk)
307 #endif
308 
309 /* ----------
310  * AllocSetFreeIndex -
311  *
312  * Depending on the size of an allocation compute which freechunk
313  * list of the alloc set it belongs to. Caller must have verified
314  * that size <= ALLOC_CHUNK_LIMIT.
315  * ----------
316  */
317 static inline int
319 {
320  int idx;
321  unsigned int t,
322  tsize;
323 
324  if (size > (1 << ALLOC_MINBITS))
325  {
326  tsize = (size - 1) >> ALLOC_MINBITS;
327 
328  /*
329  * At this point we need to obtain log2(tsize)+1, ie, the number of
330  * not-all-zero bits at the right. We used to do this with a
331  * shift-and-count loop, but this function is enough of a hotspot to
332  * justify micro-optimization effort. The best approach seems to be
333  * to use a lookup table. Note that this code assumes that
334  * ALLOCSET_NUM_FREELISTS <= 17, since we only cope with two bytes of
335  * the tsize value.
336  */
337  t = tsize >> 8;
338  idx = t ? LogTable256[t] + 8 : LogTable256[tsize];
339 
341  }
342  else
343  idx = 0;
344 
345  return idx;
346 }
347 
348 #ifdef CLOBBER_FREED_MEMORY
349 
350 /* Wipe freed memory for debugging purposes */
351 static void
352 wipe_mem(void *ptr, size_t size)
353 {
354  VALGRIND_MAKE_MEM_UNDEFINED(ptr, size);
355  memset(ptr, 0x7F, size);
356  VALGRIND_MAKE_MEM_NOACCESS(ptr, size);
357 }
358 #endif
359 
360 #ifdef MEMORY_CONTEXT_CHECKING
361 static void
362 set_sentinel(void *base, Size offset)
363 {
364  char *ptr = (char *) base + offset;
365 
367  *ptr = 0x7E;
369 }
370 
371 static bool
372 sentinel_ok(const void *base, Size offset)
373 {
374  const char *ptr = (const char *) base + offset;
375  bool ret;
376 
378  ret = *ptr == 0x7E;
380 
381  return ret;
382 }
383 #endif
384 
385 #ifdef RANDOMIZE_ALLOCATED_MEMORY
386 
387 /*
388  * Fill a just-allocated piece of memory with "random" data. It's not really
389  * very random, just a repeating sequence with a length that's prime. What
390  * we mainly want out of it is to have a good probability that two palloc's
391  * of the same number of bytes start out containing different data.
392  *
393  * The region may be NOACCESS, so make it UNDEFINED first to avoid errors as
394  * we fill it. Filling the region makes it DEFINED, so make it UNDEFINED
395  * again afterward. Whether to finally make it UNDEFINED or NOACCESS is
396  * fairly arbitrary. UNDEFINED is more convenient for AllocSetRealloc(), and
397  * other callers have no preference.
398  */
399 static void
400 randomize_mem(char *ptr, size_t size)
401 {
402  static int save_ctr = 1;
403  size_t remaining = size;
404  int ctr;
405 
406  ctr = save_ctr;
407  VALGRIND_MAKE_MEM_UNDEFINED(ptr, size);
408  while (remaining-- > 0)
409  {
410  *ptr++ = ctr;
411  if (++ctr > 251)
412  ctr = 1;
413  }
414  VALGRIND_MAKE_MEM_UNDEFINED(ptr - size, size);
415  save_ctr = ctr;
416 }
417 #endif /* RANDOMIZE_ALLOCATED_MEMORY */
418 
419 
420 /*
421  * Public routines
422  */
423 
424 
425 /*
426  * AllocSetContextCreate
427  * Create a new AllocSet context.
428  *
429  * parent: parent context, or NULL if top-level context
430  * name: name of context (for debugging only, need not be unique)
431  * minContextSize: minimum context size
432  * initBlockSize: initial allocation block size
433  * maxBlockSize: maximum allocation block size
434  *
435  * Notes: the name string will be copied into context-lifespan storage.
436  * Most callers should abstract the context size parameters using a macro
437  * such as ALLOCSET_DEFAULT_SIZES.
438  */
441  const char *name,
442  Size minContextSize,
443  Size initBlockSize,
444  Size maxBlockSize)
445 {
446  AllocSet set;
447 
448  /*
449  * First, validate allocation parameters. (If we're going to throw an
450  * error, we should do so before the context is created, not after.) We
451  * somewhat arbitrarily enforce a minimum 1K block size.
452  */
453  if (initBlockSize != MAXALIGN(initBlockSize) ||
454  initBlockSize < 1024)
455  elog(ERROR, "invalid initBlockSize for memory context: %zu",
456  initBlockSize);
457  if (maxBlockSize != MAXALIGN(maxBlockSize) ||
458  maxBlockSize < initBlockSize ||
459  !AllocHugeSizeIsValid(maxBlockSize)) /* must be safe to double */
460  elog(ERROR, "invalid maxBlockSize for memory context: %zu",
461  maxBlockSize);
462  if (minContextSize != 0 &&
463  (minContextSize != MAXALIGN(minContextSize) ||
464  minContextSize <= ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
465  elog(ERROR, "invalid minContextSize for memory context: %zu",
466  minContextSize);
467 
468  /* Do the type-independent part of context creation */
470  sizeof(AllocSetContext),
472  parent,
473  name);
474 
475  /* Save allocation parameters */
476  set->initBlockSize = initBlockSize;
477  set->maxBlockSize = maxBlockSize;
478  set->nextBlockSize = initBlockSize;
479 
480  /*
481  * Compute the allocation chunk size limit for this context. It can't be
482  * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
483  * If maxBlockSize is small then requests exceeding the maxBlockSize, or
484  * even a significant fraction of it, should be treated as large chunks
485  * too. For the typical case of maxBlockSize a power of 2, the chunk size
486  * limit will be at most 1/8th maxBlockSize, so that given a stream of
487  * requests that are all the maximum chunk size we will waste at most
488  * 1/8th of the allocated space.
489  *
490  * We have to have allocChunkLimit a power of two, because the requested
491  * and actually-allocated sizes of any chunk must be on the same side of
492  * the limit, else we get confused about whether the chunk is "big".
493  *
494  * Also, allocChunkLimit must not exceed ALLOCSET_SEPARATE_THRESHOLD.
495  */
497  "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD");
498 
500  while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
501  (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
502  set->allocChunkLimit >>= 1;
503 
504  /*
505  * Grab always-allocated space, if requested
506  */
507  if (minContextSize > 0)
508  {
509  Size blksize = minContextSize;
510  AllocBlock block;
511 
512  block = (AllocBlock) malloc(blksize);
513  if (block == NULL)
514  {
516  ereport(ERROR,
517  (errcode(ERRCODE_OUT_OF_MEMORY),
518  errmsg("out of memory"),
519  errdetail("Failed while creating memory context \"%s\".",
520  name)));
521  }
522  block->aset = set;
523  block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
524  block->endptr = ((char *) block) + blksize;
525  block->next = set->blocks;
526  set->blocks = block;
527  /* Mark block as not to be released at reset time */
528  set->keeper = block;
529 
530  /* Mark unallocated space NOACCESS; leave the block header alone. */
532  blksize - ALLOC_BLOCKHDRSZ);
533  }
534 
535  return (MemoryContext) set;
536 }
537 
538 /*
539  * AllocSetInit
540  * Context-type-specific initialization routine.
541  *
542  * This is called by MemoryContextCreate() after setting up the
543  * generic MemoryContext fields and before linking the new context
544  * into the context tree. We must do whatever is needed to make the
545  * new context minimally valid for deletion. We must *not* risk
546  * failure --- thus, for example, allocating more memory is not cool.
547  * (AllocSetContextCreate can allocate memory when it gets control
548  * back, however.)
549  */
550 static void
552 {
553  /*
554  * Since MemoryContextCreate already zeroed the context node, we don't
555  * have to do anything here: it's already OK.
556  */
557 }
558 
559 /*
560  * AllocSetReset
561  * Frees all memory which is allocated in the given set.
562  *
563  * Actually, this routine has some discretion about what to do.
564  * It should mark all allocated chunks freed, but it need not necessarily
565  * give back all the resources the set owns. Our actual implementation is
566  * that we hang onto any "keeper" block specified for the set. In this way,
567  * we don't thrash malloc() when a context is repeatedly reset after small
568  * allocations, which is typical behavior for per-tuple contexts.
569  */
570 static void
572 {
573  AllocSet set = (AllocSet) context;
574  AllocBlock block;
575 
577 
578 #ifdef MEMORY_CONTEXT_CHECKING
579  /* Check for corruption and leaks before freeing */
580  AllocSetCheck(context);
581 #endif
582 
583  /* Clear chunk freelists */
584  MemSetAligned(set->freelist, 0, sizeof(set->freelist));
585 
586  block = set->blocks;
587 
588  /* New blocks list is either empty or just the keeper block */
589  set->blocks = set->keeper;
590 
591  while (block != NULL)
592  {
593  AllocBlock next = block->next;
594 
595  if (block == set->keeper)
596  {
597  /* Reset the block, but don't return it to malloc */
598  char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
599 
600 #ifdef CLOBBER_FREED_MEMORY
601  wipe_mem(datastart, block->freeptr - datastart);
602 #else
603  /* wipe_mem() would have done this */
604  VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
605 #endif
606  block->freeptr = datastart;
607  block->next = NULL;
608  }
609  else
610  {
611  /* Normal case, release the block */
612 #ifdef CLOBBER_FREED_MEMORY
613  wipe_mem(block, block->freeptr - ((char *) block));
614 #endif
615  free(block);
616  }
617  block = next;
618  }
619 
620  /* Reset block size allocation sequence, too */
621  set->nextBlockSize = set->initBlockSize;
622 }
623 
624 /*
625  * AllocSetDelete
626  * Frees all memory which is allocated in the given set,
627  * in preparation for deletion of the set.
628  *
629  * Unlike AllocSetReset, this *must* free all resources of the set.
630  * But note we are not responsible for deleting the context node itself.
631  */
632 static void
634 {
635  AllocSet set = (AllocSet) context;
636  AllocBlock block = set->blocks;
637 
639 
640 #ifdef MEMORY_CONTEXT_CHECKING
641  /* Check for corruption and leaks before freeing */
642  AllocSetCheck(context);
643 #endif
644 
645  /* Make it look empty, just in case... */
646  MemSetAligned(set->freelist, 0, sizeof(set->freelist));
647  set->blocks = NULL;
648  set->keeper = NULL;
649 
650  while (block != NULL)
651  {
652  AllocBlock next = block->next;
653 
654 #ifdef CLOBBER_FREED_MEMORY
655  wipe_mem(block, block->freeptr - ((char *) block));
656 #endif
657  free(block);
658  block = next;
659  }
660 }
661 
662 /*
663  * AllocSetAlloc
664  * Returns pointer to allocated memory of given size or NULL if
665  * request could not be completed; memory is added to the set.
666  *
667  * No request may exceed:
668  * MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
669  * All callers use a much-lower limit.
670  */
671 static void *
673 {
674  AllocSet set = (AllocSet) context;
675  AllocBlock block;
676  AllocChunk chunk;
677  int fidx;
678  Size chunk_size;
679  Size blksize;
680 
682 
683  /*
684  * If requested size exceeds maximum for chunks, allocate an entire block
685  * for this request.
686  */
687  if (size > set->allocChunkLimit)
688  {
689  chunk_size = MAXALIGN(size);
690  blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
691  block = (AllocBlock) malloc(blksize);
692  if (block == NULL)
693  return NULL;
694  block->aset = set;
695  block->freeptr = block->endptr = ((char *) block) + blksize;
696 
697  chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
698  chunk->aset = set;
699  chunk->size = chunk_size;
700 #ifdef MEMORY_CONTEXT_CHECKING
701  /* Valgrind: Will be made NOACCESS below. */
702  chunk->requested_size = size;
703  /* set mark to catch clobber of "unused" space */
704  if (size < chunk_size)
705  set_sentinel(AllocChunkGetPointer(chunk), size);
706 #endif
707 #ifdef RANDOMIZE_ALLOCATED_MEMORY
708  /* fill the allocated space with junk */
709  randomize_mem((char *) AllocChunkGetPointer(chunk), size);
710 #endif
711 
712  /*
713  * Stick the new block underneath the active allocation block, so that
714  * we don't lose the use of the space remaining therein.
715  */
716  if (set->blocks != NULL)
717  {
718  block->next = set->blocks->next;
719  set->blocks->next = block;
720  }
721  else
722  {
723  block->next = NULL;
724  set->blocks = block;
725  }
726 
727  AllocAllocInfo(set, chunk);
728 
729  /*
730  * Chunk header public fields remain DEFINED. The requested
731  * allocation itself can be NOACCESS or UNDEFINED; our caller will
732  * soon make it UNDEFINED. Make extra space at the end of the chunk,
733  * if any, NOACCESS.
734  */
736  chunk_size + ALLOC_CHUNKHDRSZ - ALLOC_CHUNK_PUBLIC);
737 
738  return AllocChunkGetPointer(chunk);
739  }
740 
741  /*
742  * Request is small enough to be treated as a chunk. Look in the
743  * corresponding free list to see if there is a free chunk we could reuse.
744  * If one is found, remove it from the free list, make it again a member
745  * of the alloc set and return its data address.
746  */
747  fidx = AllocSetFreeIndex(size);
748  chunk = set->freelist[fidx];
749  if (chunk != NULL)
750  {
751  Assert(chunk->size >= size);
752 
753  set->freelist[fidx] = (AllocChunk) chunk->aset;
754 
755  chunk->aset = (void *) set;
756 
757 #ifdef MEMORY_CONTEXT_CHECKING
758  /* Valgrind: Free list requested_size should be DEFINED. */
759  chunk->requested_size = size;
760  VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
761  sizeof(chunk->requested_size));
762  /* set mark to catch clobber of "unused" space */
763  if (size < chunk->size)
764  set_sentinel(AllocChunkGetPointer(chunk), size);
765 #endif
766 #ifdef RANDOMIZE_ALLOCATED_MEMORY
767  /* fill the allocated space with junk */
768  randomize_mem((char *) AllocChunkGetPointer(chunk), size);
769 #endif
770 
771  AllocAllocInfo(set, chunk);
772  return AllocChunkGetPointer(chunk);
773  }
774 
775  /*
776  * Choose the actual chunk size to allocate.
777  */
778  chunk_size = (1 << ALLOC_MINBITS) << fidx;
779  Assert(chunk_size >= size);
780 
781  /*
782  * If there is enough room in the active allocation block, we will put the
783  * chunk into that block. Else must start a new one.
784  */
785  if ((block = set->blocks) != NULL)
786  {
787  Size availspace = block->endptr - block->freeptr;
788 
789  if (availspace < (chunk_size + ALLOC_CHUNKHDRSZ))
790  {
791  /*
792  * The existing active (top) block does not have enough room for
793  * the requested allocation, but it might still have a useful
794  * amount of space in it. Once we push it down in the block list,
795  * we'll never try to allocate more space from it. So, before we
796  * do that, carve up its free space into chunks that we can put on
797  * the set's freelists.
798  *
799  * Because we can only get here when there's less than
800  * ALLOC_CHUNK_LIMIT left in the block, this loop cannot iterate
801  * more than ALLOCSET_NUM_FREELISTS-1 times.
802  */
803  while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
804  {
805  Size availchunk = availspace - ALLOC_CHUNKHDRSZ;
806  int a_fidx = AllocSetFreeIndex(availchunk);
807 
808  /*
809  * In most cases, we'll get back the index of the next larger
810  * freelist than the one we need to put this chunk on. The
811  * exception is when availchunk is exactly a power of 2.
812  */
813  if (availchunk != ((Size) 1 << (a_fidx + ALLOC_MINBITS)))
814  {
815  a_fidx--;
816  Assert(a_fidx >= 0);
817  availchunk = ((Size) 1 << (a_fidx + ALLOC_MINBITS));
818  }
819 
820  chunk = (AllocChunk) (block->freeptr);
821 
822  /* Prepare to initialize the chunk header. */
824 
825  block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
826  availspace -= (availchunk + ALLOC_CHUNKHDRSZ);
827 
828  chunk->size = availchunk;
829 #ifdef MEMORY_CONTEXT_CHECKING
830  chunk->requested_size = 0; /* mark it free */
831 #endif
832  chunk->aset = (void *) set->freelist[a_fidx];
833  set->freelist[a_fidx] = chunk;
834  }
835 
836  /* Mark that we need to create a new block */
837  block = NULL;
838  }
839  }
840 
841  /*
842  * Time to create a new regular (multi-chunk) block?
843  */
844  if (block == NULL)
845  {
846  Size required_size;
847 
848  /*
849  * The first such block has size initBlockSize, and we double the
850  * space in each succeeding block, but not more than maxBlockSize.
851  */
852  blksize = set->nextBlockSize;
853  set->nextBlockSize <<= 1;
854  if (set->nextBlockSize > set->maxBlockSize)
855  set->nextBlockSize = set->maxBlockSize;
856 
857  /*
858  * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
859  * space... but try to keep it a power of 2.
860  */
861  required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
862  while (blksize < required_size)
863  blksize <<= 1;
864 
865  /* Try to allocate it */
866  block = (AllocBlock) malloc(blksize);
867 
868  /*
869  * We could be asking for pretty big blocks here, so cope if malloc
870  * fails. But give up if there's less than a meg or so available...
871  */
872  while (block == NULL && blksize > 1024 * 1024)
873  {
874  blksize >>= 1;
875  if (blksize < required_size)
876  break;
877  block = (AllocBlock) malloc(blksize);
878  }
879 
880  if (block == NULL)
881  return NULL;
882 
883  block->aset = set;
884  block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
885  block->endptr = ((char *) block) + blksize;
886 
887  /*
888  * If this is the first block of the set, make it the "keeper" block.
889  * Formerly, a keeper block could only be created during context
890  * creation, but allowing it to happen here lets us have fast reset
891  * cycling even for contexts created with minContextSize = 0; that way
892  * we don't have to force space to be allocated in contexts that might
893  * never need any space. Don't mark an oversize block as a keeper,
894  * however.
895  */
896  if (set->keeper == NULL && blksize == set->initBlockSize)
897  set->keeper = block;
898 
899  /* Mark unallocated space NOACCESS. */
901  blksize - ALLOC_BLOCKHDRSZ);
902 
903  block->next = set->blocks;
904  set->blocks = block;
905  }
906 
907  /*
908  * OK, do the allocation
909  */
910  chunk = (AllocChunk) (block->freeptr);
911 
912  /* Prepare to initialize the chunk header. */
914 
915  block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
916  Assert(block->freeptr <= block->endptr);
917 
918  chunk->aset = (void *) set;
919  chunk->size = chunk_size;
920 #ifdef MEMORY_CONTEXT_CHECKING
921  chunk->requested_size = size;
922  VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
923  sizeof(chunk->requested_size));
924  /* set mark to catch clobber of "unused" space */
925  if (size < chunk->size)
926  set_sentinel(AllocChunkGetPointer(chunk), size);
927 #endif
928 #ifdef RANDOMIZE_ALLOCATED_MEMORY
929  /* fill the allocated space with junk */
930  randomize_mem((char *) AllocChunkGetPointer(chunk), size);
931 #endif
932 
933  AllocAllocInfo(set, chunk);
934  return AllocChunkGetPointer(chunk);
935 }
936 
937 /*
938  * AllocSetFree
939  * Frees allocated memory; memory is removed from the set.
940  */
941 static void
942 AllocSetFree(MemoryContext context, void *pointer)
943 {
944  AllocSet set = (AllocSet) context;
945  AllocChunk chunk = AllocPointerGetChunk(pointer);
946 
947  AllocFreeInfo(set, chunk);
948 
949 #ifdef MEMORY_CONTEXT_CHECKING
950  VALGRIND_MAKE_MEM_DEFINED(&chunk->requested_size,
951  sizeof(chunk->requested_size));
952  /* Test for someone scribbling on unused space in chunk */
953  if (chunk->requested_size < chunk->size)
954  if (!sentinel_ok(pointer, chunk->requested_size))
955  elog(WARNING, "detected write past chunk end in %s %p",
956  set->header.name, chunk);
957 #endif
958 
959  if (chunk->size > set->allocChunkLimit)
960  {
961  /*
962  * Big chunks are certain to have been allocated as single-chunk
963  * blocks. Find the containing block and return it to malloc().
964  */
965  AllocBlock block = set->blocks;
966  AllocBlock prevblock = NULL;
967 
968  while (block != NULL)
969  {
970  if (chunk == (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ))
971  break;
972  prevblock = block;
973  block = block->next;
974  }
975  if (block == NULL)
976  elog(ERROR, "could not find block containing chunk %p", chunk);
977  /* let's just make sure chunk is the only one in the block */
978  Assert(block->freeptr == ((char *) block) +
979  (chunk->size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ));
980 
981  /* OK, remove block from aset's list and free it */
982  if (prevblock == NULL)
983  set->blocks = block->next;
984  else
985  prevblock->next = block->next;
986 #ifdef CLOBBER_FREED_MEMORY
987  wipe_mem(block, block->freeptr - ((char *) block));
988 #endif
989  free(block);
990  }
991  else
992  {
993  /* Normal case, put the chunk into appropriate freelist */
994  int fidx = AllocSetFreeIndex(chunk->size);
995 
996  chunk->aset = (void *) set->freelist[fidx];
997 
998 #ifdef CLOBBER_FREED_MEMORY
999  wipe_mem(pointer, chunk->size);
1000 #endif
1001 
1002 #ifdef MEMORY_CONTEXT_CHECKING
1003  /* Reset requested_size to 0 in chunks that are on freelist */
1004  chunk->requested_size = 0;
1005 #endif
1006  set->freelist[fidx] = chunk;
1007  }
1008 }
1009 
1010 /*
1011  * AllocSetRealloc
1012  * Returns new pointer to allocated memory of given size or NULL if
1013  * request could not be completed; this memory is added to the set.
1014  * Memory associated with given pointer is copied into the new memory,
1015  * and the old memory is freed.
1016  *
1017  * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size. This
1018  * makes our Valgrind client requests less-precise, hazarding false negatives.
1019  * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old
1020  * request size.)
1021  */
1022 static void *
1023 AllocSetRealloc(MemoryContext context, void *pointer, Size size)
1024 {
1025  AllocSet set = (AllocSet) context;
1026  AllocChunk chunk = AllocPointerGetChunk(pointer);
1027  Size oldsize = chunk->size;
1028 
1029 #ifdef MEMORY_CONTEXT_CHECKING
1030  VALGRIND_MAKE_MEM_DEFINED(&chunk->requested_size,
1031  sizeof(chunk->requested_size));
1032  /* Test for someone scribbling on unused space in chunk */
1033  if (chunk->requested_size < oldsize)
1034  if (!sentinel_ok(pointer, chunk->requested_size))
1035  elog(WARNING, "detected write past chunk end in %s %p",
1036  set->header.name, chunk);
1037 #endif
1038 
1039  /*
1040  * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1041  * allocated area already is >= the new size. (In particular, we always
1042  * fall out here if the requested size is a decrease.)
1043  */
1044  if (oldsize >= size)
1045  {
1046 #ifdef MEMORY_CONTEXT_CHECKING
1047  Size oldrequest = chunk->requested_size;
1048 
1049 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1050  /* We can only fill the extra space if we know the prior request */
1051  if (size > oldrequest)
1052  randomize_mem((char *) pointer + oldrequest,
1053  size - oldrequest);
1054 #endif
1055 
1056  chunk->requested_size = size;
1057  VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
1058  sizeof(chunk->requested_size));
1059 
1060  /*
1061  * If this is an increase, mark any newly-available part UNDEFINED.
1062  * Otherwise, mark the obsolete part NOACCESS.
1063  */
1064  if (size > oldrequest)
1065  VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1066  size - oldrequest);
1067  else
1068  VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1069  oldsize - size);
1070 
1071  /* set mark to catch clobber of "unused" space */
1072  if (size < oldsize)
1073  set_sentinel(pointer, size);
1074 #else /* !MEMORY_CONTEXT_CHECKING */
1075 
1076  /*
1077  * We don't have the information to determine whether we're growing
1078  * the old request or shrinking it, so we conservatively mark the
1079  * entire new allocation DEFINED.
1080  */
1081  VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
1082  VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1083 #endif
1084 
1085  return pointer;
1086  }
1087 
1088  if (oldsize > set->allocChunkLimit)
1089  {
1090  /*
1091  * The chunk must have been allocated as a single-chunk block. Find
1092  * the containing block and use realloc() to make it bigger with
1093  * minimum space wastage.
1094  */
1095  AllocBlock block = set->blocks;
1096  AllocBlock prevblock = NULL;
1097  Size chksize;
1098  Size blksize;
1099 
1100  while (block != NULL)
1101  {
1102  if (chunk == (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ))
1103  break;
1104  prevblock = block;
1105  block = block->next;
1106  }
1107  if (block == NULL)
1108  elog(ERROR, "could not find block containing chunk %p", chunk);
1109  /* let's just make sure chunk is the only one in the block */
1110  Assert(block->freeptr == ((char *) block) +
1111  (chunk->size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ));
1112 
1113  /* Do the realloc */
1114  chksize = MAXALIGN(size);
1115  blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
1116  block = (AllocBlock) realloc(block, blksize);
1117  if (block == NULL)
1118  return NULL;
1119  block->freeptr = block->endptr = ((char *) block) + blksize;
1120 
1121  /* Update pointers since block has likely been moved */
1122  chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
1123  pointer = AllocChunkGetPointer(chunk);
1124  if (prevblock == NULL)
1125  set->blocks = block;
1126  else
1127  prevblock->next = block;
1128  chunk->size = chksize;
1129 
1130 #ifdef MEMORY_CONTEXT_CHECKING
1131 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1132  /* We can only fill the extra space if we know the prior request */
1133  randomize_mem((char *) pointer + chunk->requested_size,
1134  size - chunk->requested_size);
1135 #endif
1136 
1137  /*
1138  * realloc() (or randomize_mem()) will have left the newly-allocated
1139  * part UNDEFINED, but we may need to adjust trailing bytes from the
1140  * old allocation.
1141  */
1142  VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1143  oldsize - chunk->requested_size);
1144 
1145  chunk->requested_size = size;
1146  VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
1147  sizeof(chunk->requested_size));
1148 
1149  /* set mark to catch clobber of "unused" space */
1150  if (size < chunk->size)
1151  set_sentinel(AllocChunkGetPointer(chunk), size);
1152 #else /* !MEMORY_CONTEXT_CHECKING */
1153 
1154  /*
1155  * We don't know how much of the old chunk size was the actual
1156  * allocation; it could have been as small as one byte. We have to be
1157  * conservative and just mark the entire old portion DEFINED.
1158  */
1159  VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1160 #endif
1161 
1162  /* Make any trailing alignment padding NOACCESS. */
1163  VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1164  return AllocChunkGetPointer(chunk);
1165  }
1166  else
1167  {
1168  /*
1169  * Small-chunk case. We just do this by brute force, ie, allocate a
1170  * new chunk and copy the data. Since we know the existing data isn't
1171  * huge, this won't involve any great memcpy expense, so it's not
1172  * worth being smarter. (At one time we tried to avoid memcpy when it
1173  * was possible to enlarge the chunk in-place, but that turns out to
1174  * misbehave unpleasantly for repeated cycles of
1175  * palloc/repalloc/pfree: the eventually freed chunks go into the
1176  * wrong freelist for the next initial palloc request, and so we leak
1177  * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1178  */
1179  AllocPointer newPointer;
1180 
1181  /* allocate new chunk */
1182  newPointer = AllocSetAlloc((MemoryContext) set, size);
1183 
1184  /* leave immediately if request was not completed */
1185  if (newPointer == NULL)
1186  return NULL;
1187 
1188  /*
1189  * AllocSetAlloc() just made the region NOACCESS. Change it to
1190  * UNDEFINED for the moment; memcpy() will then transfer definedness
1191  * from the old allocation to the new. If we know the old allocation,
1192  * copy just that much. Otherwise, make the entire old chunk defined
1193  * to avoid errors as we copy the currently-NOACCESS trailing bytes.
1194  */
1195  VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
1196 #ifdef MEMORY_CONTEXT_CHECKING
1197  oldsize = chunk->requested_size;
1198 #else
1199  VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1200 #endif
1201 
1202  /* transfer existing data (certain to fit) */
1203  memcpy(newPointer, pointer, oldsize);
1204 
1205  /* free old chunk */
1206  AllocSetFree((MemoryContext) set, pointer);
1207 
1208  return newPointer;
1209  }
1210 }
1211 
1212 /*
1213  * AllocSetGetChunkSpace
1214  * Given a currently-allocated chunk, determine the total space
1215  * it occupies (including all memory-allocation overhead).
1216  */
1217 static Size
1218 AllocSetGetChunkSpace(MemoryContext context, void *pointer)
1219 {
1220  AllocChunk chunk = AllocPointerGetChunk(pointer);
1221 
1222  return chunk->size + ALLOC_CHUNKHDRSZ;
1223 }
1224 
1225 /*
1226  * AllocSetIsEmpty
1227  * Is an allocset empty of any allocated space?
1228  */
1229 static bool
1231 {
1232  /*
1233  * For now, we say "empty" only if the context is new or just reset. We
1234  * could examine the freelists to determine if all space has been freed,
1235  * but it's not really worth the trouble for present uses of this
1236  * functionality.
1237  */
1238  if (context->isReset)
1239  return true;
1240  return false;
1241 }
1242 
1243 /*
1244  * AllocSetStats
1245  * Compute stats about memory consumption of an allocset.
1246  *
1247  * level: recursion level (0 at top level); used for print indentation.
1248  * print: true to print stats to stderr.
1249  * totals: if not NULL, add stats about this allocset into *totals.
1250  */
1251 static void
1252 AllocSetStats(MemoryContext context, int level, bool print,
1253  MemoryContextCounters *totals)
1254 {
1255  AllocSet set = (AllocSet) context;
1256  Size nblocks = 0;
1257  Size freechunks = 0;
1258  Size totalspace = 0;
1259  Size freespace = 0;
1260  AllocBlock block;
1261  int fidx;
1262 
1263  for (block = set->blocks; block != NULL; block = block->next)
1264  {
1265  nblocks++;
1266  totalspace += block->endptr - ((char *) block);
1267  freespace += block->endptr - block->freeptr;
1268  }
1269  for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1270  {
1271  AllocChunk chunk;
1272 
1273  for (chunk = set->freelist[fidx]; chunk != NULL;
1274  chunk = (AllocChunk) chunk->aset)
1275  {
1276  freechunks++;
1277  freespace += chunk->size + ALLOC_CHUNKHDRSZ;
1278  }
1279  }
1280 
1281  if (print)
1282  {
1283  int i;
1284 
1285  for (i = 0; i < level; i++)
1286  fprintf(stderr, " ");
1287  fprintf(stderr,
1288  "%s: %zu total in %zd blocks; %zu free (%zd chunks); %zu used\n",
1289  set->header.name, totalspace, nblocks, freespace, freechunks,
1290  totalspace - freespace);
1291  }
1292 
1293  if (totals)
1294  {
1295  totals->nblocks += nblocks;
1296  totals->freechunks += freechunks;
1297  totals->totalspace += totalspace;
1298  totals->freespace += freespace;
1299  }
1300 }
1301 
1302 
1303 #ifdef MEMORY_CONTEXT_CHECKING
1304 
1305 /*
1306  * AllocSetCheck
1307  * Walk through chunks and check consistency of memory.
1308  *
1309  * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
1310  * find yourself in an infinite loop when trouble occurs, because this
1311  * routine will be entered again when elog cleanup tries to release memory!
1312  */
1313 static void
1314 AllocSetCheck(MemoryContext context)
1315 {
1316  AllocSet set = (AllocSet) context;
1317  char *name = set->header.name;
1318  AllocBlock block;
1319 
1320  for (block = set->blocks; block != NULL; block = block->next)
1321  {
1322  char *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
1323  long blk_used = block->freeptr - bpoz;
1324  long blk_data = 0;
1325  long nchunks = 0;
1326 
1327  /*
1328  * Empty block - empty can be keeper-block only
1329  */
1330  if (!blk_used)
1331  {
1332  if (set->keeper != block)
1333  elog(WARNING, "problem in alloc set %s: empty block %p",
1334  name, block);
1335  }
1336 
1337  /*
1338  * Chunk walker
1339  */
1340  while (bpoz < block->freeptr)
1341  {
1342  AllocChunk chunk = (AllocChunk) bpoz;
1343  Size chsize,
1344  dsize;
1345 
1346  chsize = chunk->size; /* aligned chunk size */
1347  VALGRIND_MAKE_MEM_DEFINED(&chunk->requested_size,
1348  sizeof(chunk->requested_size));
1349  dsize = chunk->requested_size; /* real data */
1350  if (dsize > 0) /* not on a free list */
1351  VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
1352  sizeof(chunk->requested_size));
1353 
1354  /*
1355  * Check chunk size
1356  */
1357  if (dsize > chsize)
1358  elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
1359  name, chunk, block);
1360  if (chsize < (1 << ALLOC_MINBITS))
1361  elog(WARNING, "problem in alloc set %s: bad size %zu for chunk %p in block %p",
1362  name, chsize, chunk, block);
1363 
1364  /* single-chunk block? */
1365  if (chsize > set->allocChunkLimit &&
1366  chsize + ALLOC_CHUNKHDRSZ != blk_used)
1367  elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p",
1368  name, chunk, block);
1369 
1370  /*
1371  * If chunk is allocated, check for correct aset pointer. (If it's
1372  * free, the aset is the freelist pointer, which we can't check as
1373  * easily...)
1374  */
1375  if (dsize > 0 && chunk->aset != (void *) set)
1376  elog(WARNING, "problem in alloc set %s: bogus aset link in block %p, chunk %p",
1377  name, block, chunk);
1378 
1379  /*
1380  * Check for overwrite of "unallocated" space in chunk
1381  */
1382  if (dsize > 0 && dsize < chsize &&
1383  !sentinel_ok(chunk, ALLOC_CHUNKHDRSZ + dsize))
1384  elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
1385  name, block, chunk);
1386 
1387  blk_data += chsize;
1388  nchunks++;
1389 
1390  bpoz += ALLOC_CHUNKHDRSZ + chsize;
1391  }
1392 
1393  if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used)
1394  elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p",
1395  name, block);
1396  }
1397 }
1398 
1399 #endif /* MEMORY_CONTEXT_CHECKING */
int remaining
Definition: informix.c:692
#define MemSetAligned(start, val, len)
Definition: c.h:886
Size initBlockSize
Definition: aset.c:180
static void AllocSetInit(MemoryContext context)
Definition: aset.c:551
#define AllocFreeInfo(_cxt, _chunk)
Definition: aset.c:305
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
AllocBlock blocks
Definition: aset.c:177
static int32 next
Definition: blutils.c:210
void print(const void *obj)
Definition: print.c:35
#define ALLOC_CHUNK_PUBLIC
Definition: aset.c:143
static Size AllocSetGetChunkSpace(MemoryContext context, void *pointer)
Definition: aset.c:1218
MemoryContextData header
Definition: aset.c:175
void * AllocPointer
Definition: aset.c:162
#define AllocSetIsValid(set)
Definition: aset.c:238
#define VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
Definition: memdebug.h:28
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition: memdebug.h:27
static int AllocSetFreeIndex(Size size)
Definition: aset.c:318
static void AllocSetReset(MemoryContext context)
Definition: aset.c:571
#define AllocChunkGetPointer(chk)
Definition: aset.c:242
#define LT16(n)
Definition: aset.c:284
int errcode(int sqlerrcode)
Definition: elog.c:575
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:264
#define ALLOC_BLOCKHDRSZ
Definition: aset.c:139
AllocBlock keeper
Definition: aset.c:184
AllocSet aset
Definition: aset.c:203
char * freeptr
Definition: aset.c:205
#define ALLOC_CHUNK_USED
Definition: aset.c:151
#define malloc(a)
Definition: header.h:45
struct AllocBlockData AllocBlockData
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:753
#define ERROR
Definition: elog.h:43
#define ALLOC_CHUNK_LIMIT
Definition: aset.c:123
void MemoryContextStats(MemoryContext context)
Definition: mcxt.c:483
char * endptr
Definition: aset.c:206
#define ALLOCSET_NUM_FREELISTS
Definition: aset.c:122
static void * AllocSetRealloc(MemoryContext context, void *pointer, Size size)
Definition: aset.c:1023
#define ALLOCSET_SEPARATE_THRESHOLD
Definition: memutils.h:172
int errdetail(const char *fmt,...)
Definition: elog.c:873
MemoryContext MemoryContextCreate(NodeTag tag, Size size, MemoryContextMethods *methods, MemoryContext parent, const char *name)
Definition: mcxt.c:682
#define ALLOC_CHUNKHDRSZ
Definition: aset.c:140
static MemoryContextMethods AllocSetMethods
Definition: aset.c:266
AllocBlock next
Definition: aset.c:204
#define ereport(elevel, rest)
Definition: elog.h:122
#define AssertArg(condition)
Definition: c.h:673
MemoryContext TopMemoryContext
Definition: mcxt.c:43
static bool AllocSetIsEmpty(MemoryContext context)
Definition: aset.c:1230
#define WARNING
Definition: elog.h:40
static const unsigned char LogTable256[256]
Definition: aset.c:286
MemoryContext AllocSetContextCreate(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition: aset.c:440
#define ALLOC_CHUNK_FRACTION
Definition: aset.c:125
struct AllocBlockData * AllocBlock
Definition: aset.c:155
AllocChunk freelist[ALLOCSET_NUM_FREELISTS]
Definition: aset.c:178
Size nextBlockSize
Definition: aset.c:182
#define free(a)
Definition: header.h:60
Size allocChunkLimit
Definition: aset.c:183
struct AllocChunkData * AllocChunk
Definition: aset.c:156
#define NULL
Definition: c.h:226
#define Assert(condition)
Definition: c.h:671
static void AllocSetFree(MemoryContext context, void *pointer)
Definition: aset.c:942
size_t Size
Definition: c.h:353
#define MAXALIGN(LEN)
Definition: c.h:584
static void AllocSetStats(MemoryContext context, int level, bool print, MemoryContextCounters *totals)
Definition: aset.c:1252
static void AllocSetDelete(MemoryContext context)
Definition: aset.c:633
#define realloc(a, b)
Definition: header.h:55
const char * name
Definition: encode.c:521
#define AllocHugeSizeIsValid(size)
Definition: memutils.h:46
void * aset
Definition: aset.c:218
int errmsg(const char *fmt,...)
Definition: elog.c:797
int i
static void * AllocSetAlloc(MemoryContext context, Size size)
Definition: aset.c:672
AllocSetContext * AllocSet
Definition: aset.c:187
struct AllocSetContext AllocSetContext
#define ALLOC_MINBITS
Definition: aset.c:121
#define elog
Definition: elog.h:219
Size maxBlockSize
Definition: aset.c:181
#define AllocAllocInfo(_cxt, _chunk)
Definition: aset.c:306
Size size
Definition: aset.c:220
struct AllocChunkData AllocChunkData
#define AllocPointerGetChunk(ptr)
Definition: aset.c:240