PostgreSQL Source Code  git master
dsm.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * dsm.c
4  * manage dynamic shared memory segments
5  *
6  * This file provides a set of services to make programming with dynamic
7  * shared memory segments more convenient. Unlike the low-level
8  * facilities provided by dsm_impl.h and dsm_impl.c, mappings and segments
9  * created using this module will be cleaned up automatically. Mappings
10  * will be removed when the resource owner under which they were created
11  * is cleaned up, unless dsm_pin_mapping() is used, in which case they
12  * have session lifespan. Segments will be removed when there are no
13  * remaining mappings, or at postmaster shutdown in any case. After a
14  * hard postmaster crash, remaining segments will be removed, if they
15  * still exist, at the next postmaster startup.
16  *
17  * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
18  * Portions Copyright (c) 1994, Regents of the University of California
19  *
20  *
21  * IDENTIFICATION
22  * src/backend/storage/ipc/dsm.c
23  *
24  *-------------------------------------------------------------------------
25  */
26 
27 #include "postgres.h"
28 
29 #include <fcntl.h>
30 #include <unistd.h>
31 #ifndef WIN32
32 #include <sys/mman.h>
33 #endif
34 #include <sys/stat.h>
35 
36 #include "common/pg_prng.h"
37 #include "lib/ilist.h"
38 #include "miscadmin.h"
39 #include "port/pg_bitutils.h"
40 #include "storage/dsm.h"
41 #include "storage/fd.h"
42 #include "storage/ipc.h"
43 #include "storage/lwlock.h"
44 #include "storage/pg_shmem.h"
45 #include "storage/shmem.h"
46 #include "utils/freepage.h"
47 #include "utils/guc.h"
48 #include "utils/memutils.h"
49 #include "utils/resowner.h"
50 
51 #define PG_DYNSHMEM_CONTROL_MAGIC 0x9a503d32
52 
53 #define PG_DYNSHMEM_FIXED_SLOTS 64
54 #define PG_DYNSHMEM_SLOTS_PER_BACKEND 5
55 
56 #define INVALID_CONTROL_SLOT ((uint32) -1)
57 
58 /* Backend-local tracking for on-detach callbacks. */
60 {
65 
66 /* Backend-local state for a dynamic shared memory segment. */
68 {
69  dlist_node node; /* List link in dsm_segment_list. */
70  ResourceOwner resowner; /* Resource owner. */
71  dsm_handle handle; /* Segment name. */
72  uint32 control_slot; /* Slot in control segment. */
73  void *impl_private; /* Implementation-specific private data. */
74  void *mapped_address; /* Mapping address, or NULL if unmapped. */
75  Size mapped_size; /* Size of our mapping. */
76  slist_head on_detach; /* On-detach callbacks. */
77 };
78 
79 /* Shared-memory state for a dynamic shared memory segment. */
80 typedef struct dsm_control_item
81 {
83  uint32 refcnt; /* 2+ = active, 1 = moribund, 0 = gone */
84  size_t first_page;
85  size_t npages;
86  void *impl_private_pm_handle; /* only needed on Windows */
87  bool pinned;
89 
90 /* Layout of the dynamic shared memory control segment. */
91 typedef struct dsm_control_header
92 {
98 
99 static void dsm_cleanup_for_mmap(void);
100 static void dsm_postmaster_shutdown(int code, Datum arg);
101 static dsm_segment *dsm_create_descriptor(void);
102 static bool dsm_control_segment_sane(dsm_control_header *control,
103  Size mapped_size);
104 static uint64 dsm_control_bytes_needed(uint32 nitems);
105 static inline dsm_handle make_main_region_dsm_handle(int slot);
106 static inline bool is_main_region_dsm_handle(dsm_handle handle);
107 
108 /* Has this backend initialized the dynamic shared memory system yet? */
109 static bool dsm_init_done = false;
110 
111 /* Preallocated DSM space in the main shared memory region. */
112 static void *dsm_main_space_begin = NULL;
113 
114 /*
115  * List of dynamic shared memory segments used by this backend.
116  *
117  * At process exit time, we must decrement the reference count of each
118  * segment we have attached; this list makes it possible to find all such
119  * segments.
120  *
121  * This list should always be empty in the postmaster. We could probably
122  * allow the postmaster to map dynamic shared memory segments before it
123  * begins to start child processes, provided that each process adjusted
124  * the reference counts for those segments in the control segment at
125  * startup time, but there's no obvious need for such a facility, which
126  * would also be complex to handle in the EXEC_BACKEND case. Once the
127  * postmaster has begun spawning children, there's an additional problem:
128  * each new mapping would require an update to the control segment,
129  * which requires locking, in which the postmaster must not be involved.
130  */
132 
133 /*
134  * Control segment information.
135  *
136  * Unlike ordinary shared memory segments, the control segment is not
137  * reference counted; instead, it lasts for the postmaster's entire
138  * life cycle. For simplicity, it doesn't have a dsm_segment object either.
139  */
143 static void *dsm_control_impl_private = NULL;
144 
145 
146 /* ResourceOwner callbacks to hold DSM segments */
147 static void ResOwnerReleaseDSM(Datum res);
148 static char *ResOwnerPrintDSM(Datum res);
149 
151 {
152  .name = "dynamic shared memory segment",
153  .release_phase = RESOURCE_RELEASE_BEFORE_LOCKS,
154  .release_priority = RELEASE_PRIO_DSMS,
155  .ReleaseResource = ResOwnerReleaseDSM,
156  .DebugPrint = ResOwnerPrintDSM
157 };
158 
159 /* Convenience wrappers over ResourceOwnerRemember/Forget */
160 static inline void
162 {
164 }
165 static inline void
167 {
169 }
170 
171 /*
172  * Start up the dynamic shared memory system.
173  *
174  * This is called just once during each cluster lifetime, at postmaster
175  * startup time.
176  */
177 void
179 {
180  void *dsm_control_address = NULL;
181  uint32 maxitems;
182  Size segsize;
183 
185 
186  /*
187  * If we're using the mmap implementations, clean up any leftovers.
188  * Cleanup isn't needed on Windows, and happens earlier in startup for
189  * POSIX and System V shared memory, via a direct call to
190  * dsm_cleanup_using_control_segment.
191  */
194 
195  /* Determine size for new control segment. */
196  maxitems = PG_DYNSHMEM_FIXED_SLOTS
198  elog(DEBUG2, "dynamic shared memory system will support %u segments",
199  maxitems);
200  segsize = dsm_control_bytes_needed(maxitems);
201 
202  /*
203  * Loop until we find an unused identifier for the new control segment. We
204  * sometimes use DSM_HANDLE_INVALID as a sentinel value indicating "no
205  * control segment", so avoid generating that value for a real handle.
206  */
207  for (;;)
208  {
209  Assert(dsm_control_address == NULL);
211  /* Use even numbers only */
214  continue;
216  &dsm_control_impl_private, &dsm_control_address,
218  break;
219  }
220  dsm_control = dsm_control_address;
222  elog(DEBUG2,
223  "created dynamic shared memory control segment %u (%zu bytes)",
224  dsm_control_handle, segsize);
226 
227  /* Initialize control segment. */
229  dsm_control->nitems = 0;
230  dsm_control->maxitems = maxitems;
231 }
232 
233 /*
234  * Determine whether the control segment from the previous postmaster
235  * invocation still exists. If so, remove the dynamic shared memory
236  * segments to which it refers, and then the control segment itself.
237  */
238 void
240 {
241  void *mapped_address = NULL;
242  void *junk_mapped_address = NULL;
243  void *impl_private = NULL;
244  void *junk_impl_private = NULL;
245  Size mapped_size = 0;
246  Size junk_mapped_size = 0;
247  uint32 nitems;
248  uint32 i;
249  dsm_control_header *old_control;
250 
251  /*
252  * Try to attach the segment. If this fails, it probably just means that
253  * the operating system has been rebooted and the segment no longer
254  * exists, or an unrelated process has used the same shm ID. So just fall
255  * out quietly.
256  */
257  if (!dsm_impl_op(DSM_OP_ATTACH, old_control_handle, 0, &impl_private,
258  &mapped_address, &mapped_size, DEBUG1))
259  return;
260 
261  /*
262  * We've managed to reattach it, but the contents might not be sane. If
263  * they aren't, we disregard the segment after all.
264  */
265  old_control = (dsm_control_header *) mapped_address;
266  if (!dsm_control_segment_sane(old_control, mapped_size))
267  {
268  dsm_impl_op(DSM_OP_DETACH, old_control_handle, 0, &impl_private,
269  &mapped_address, &mapped_size, LOG);
270  return;
271  }
272 
273  /*
274  * OK, the control segment looks basically valid, so we can use it to get
275  * a list of segments that need to be removed.
276  */
277  nitems = old_control->nitems;
278  for (i = 0; i < nitems; ++i)
279  {
280  dsm_handle handle;
281  uint32 refcnt;
282 
283  /* If the reference count is 0, the slot is actually unused. */
284  refcnt = old_control->item[i].refcnt;
285  if (refcnt == 0)
286  continue;
287 
288  /* If it was using the main shmem area, there is nothing to do. */
289  handle = old_control->item[i].handle;
290  if (is_main_region_dsm_handle(handle))
291  continue;
292 
293  /* Log debugging information. */
294  elog(DEBUG2, "cleaning up orphaned dynamic shared memory with ID %u (reference count %u)",
295  handle, refcnt);
296 
297  /* Destroy the referenced segment. */
298  dsm_impl_op(DSM_OP_DESTROY, handle, 0, &junk_impl_private,
299  &junk_mapped_address, &junk_mapped_size, LOG);
300  }
301 
302  /* Destroy the old control segment, too. */
303  elog(DEBUG2,
304  "cleaning up dynamic shared memory control segment with ID %u",
305  old_control_handle);
306  dsm_impl_op(DSM_OP_DESTROY, old_control_handle, 0, &impl_private,
307  &mapped_address, &mapped_size, LOG);
308 }
309 
310 /*
311  * When we're using the mmap shared memory implementation, "shared memory"
312  * segments might even manage to survive an operating system reboot.
313  * But there's no guarantee as to exactly what will survive: some segments
314  * may survive, and others may not, and the contents of some may be out
315  * of date. In particular, the control segment may be out of date, so we
316  * can't rely on it to figure out what to remove. However, since we know
317  * what directory contains the files we used as shared memory, we can simply
318  * scan the directory and blow everything away that shouldn't be there.
319  */
320 static void
322 {
323  DIR *dir;
324  struct dirent *dent;
325 
326  /* Scan the directory for something with a name of the correct format. */
328 
329  while ((dent = ReadDir(dir, PG_DYNSHMEM_DIR)) != NULL)
330  {
331  if (strncmp(dent->d_name, PG_DYNSHMEM_MMAP_FILE_PREFIX,
332  strlen(PG_DYNSHMEM_MMAP_FILE_PREFIX)) == 0)
333  {
334  char buf[MAXPGPATH + sizeof(PG_DYNSHMEM_DIR)];
335 
336  snprintf(buf, sizeof(buf), PG_DYNSHMEM_DIR "/%s", dent->d_name);
337 
338  elog(DEBUG2, "removing file \"%s\"", buf);
339 
340  /* We found a matching file; so remove it. */
341  if (unlink(buf) != 0)
342  ereport(ERROR,
344  errmsg("could not remove file \"%s\": %m", buf)));
345  }
346  }
347 
348  /* Cleanup complete. */
349  FreeDir(dir);
350 }
351 
352 /*
353  * At shutdown time, we iterate over the control segment and remove all
354  * remaining dynamic shared memory segments. We avoid throwing errors here;
355  * the postmaster is shutting down either way, and this is just non-critical
356  * resource cleanup.
357  */
358 static void
360 {
361  uint32 nitems;
362  uint32 i;
363  void *dsm_control_address;
364  void *junk_mapped_address = NULL;
365  void *junk_impl_private = NULL;
366  Size junk_mapped_size = 0;
368 
369  /*
370  * If some other backend exited uncleanly, it might have corrupted the
371  * control segment while it was dying. In that case, we warn and ignore
372  * the contents of the control segment. This may end up leaving behind
373  * stray shared memory segments, but there's not much we can do about that
374  * if the metadata is gone.
375  */
378  {
379  ereport(LOG,
380  (errmsg("dynamic shared memory control segment is corrupt")));
381  return;
382  }
383 
384  /* Remove any remaining segments. */
385  for (i = 0; i < nitems; ++i)
386  {
387  dsm_handle handle;
388 
389  /* If the reference count is 0, the slot is actually unused. */
390  if (dsm_control->item[i].refcnt == 0)
391  continue;
392 
393  handle = dsm_control->item[i].handle;
394  if (is_main_region_dsm_handle(handle))
395  continue;
396 
397  /* Log debugging information. */
398  elog(DEBUG2, "cleaning up orphaned dynamic shared memory with ID %u",
399  handle);
400 
401  /* Destroy the segment. */
402  dsm_impl_op(DSM_OP_DESTROY, handle, 0, &junk_impl_private,
403  &junk_mapped_address, &junk_mapped_size, LOG);
404  }
405 
406  /* Remove the control segment itself. */
407  elog(DEBUG2,
408  "cleaning up dynamic shared memory control segment with ID %u",
410  dsm_control_address = dsm_control;
412  &dsm_control_impl_private, &dsm_control_address,
414  dsm_control = dsm_control_address;
415  shim->dsm_control = 0;
416 }
417 
418 /*
419  * Prepare this backend for dynamic shared memory usage. Under EXEC_BACKEND,
420  * we must reread the state file and map the control segment; in other cases,
421  * we'll have inherited the postmaster's mapping and global variables.
422  */
423 static void
425 {
426 #ifdef EXEC_BACKEND
427  if (IsUnderPostmaster)
428  {
429  void *control_address = NULL;
430 
431  /* Attach control segment. */
434  &dsm_control_impl_private, &control_address,
436  dsm_control = control_address;
437  /* If control segment doesn't look sane, something is badly wrong. */
439  {
441  &dsm_control_impl_private, &control_address,
443  ereport(FATAL,
444  (errcode(ERRCODE_INTERNAL_ERROR),
445  errmsg("dynamic shared memory control segment is not valid")));
446  }
447  }
448 #endif
449 
450  dsm_init_done = true;
451 }
452 
453 #ifdef EXEC_BACKEND
454 /*
455  * When running under EXEC_BACKEND, we get a callback here when the main
456  * shared memory segment is re-attached, so that we can record the control
457  * handle retrieved from it.
458  */
459 void
460 dsm_set_control_handle(dsm_handle h)
461 {
462  Assert(dsm_control_handle == 0 && h != 0);
463  dsm_control_handle = h;
464 }
465 #endif
466 
467 /*
468  * Reserve some space in the main shared memory segment for DSM segments.
469  */
470 size_t
472 {
473  return 1024 * 1024 * (size_t) min_dynamic_shared_memory;
474 }
475 
476 /*
477  * Initialize space in the main shared memory segment for DSM segments.
478  */
479 void
481 {
482  size_t size = dsm_estimate_size();
483  bool found;
484 
485  if (size == 0)
486  return;
487 
488  dsm_main_space_begin = ShmemInitStruct("Preallocated DSM", size, &found);
489  if (!found)
490  {
492  size_t first_page = 0;
493  size_t pages;
494 
495  /* Reserve space for the FreePageManager. */
496  while (first_page * FPM_PAGE_SIZE < sizeof(FreePageManager))
497  ++first_page;
498 
499  /* Initialize it and give it all the rest of the space. */
501  pages = (size / FPM_PAGE_SIZE) - first_page;
502  FreePageManagerPut(fpm, first_page, pages);
503  }
504 }
505 
506 /*
507  * Create a new dynamic shared memory segment.
508  *
509  * If there is a non-NULL CurrentResourceOwner, the new segment is associated
510  * with it and must be detached before the resource owner releases, or a
511  * warning will be logged. If CurrentResourceOwner is NULL, the segment
512  * remains attached until explicitly detached or the session ends.
513  * Creating with a NULL CurrentResourceOwner is equivalent to creating
514  * with a non-NULL CurrentResourceOwner and then calling dsm_pin_mapping.
515  */
516 dsm_segment *
517 dsm_create(Size size, int flags)
518 {
519  dsm_segment *seg;
520  uint32 i;
521  uint32 nitems;
522  size_t npages = 0;
523  size_t first_page = 0;
524  FreePageManager *dsm_main_space_fpm = dsm_main_space_begin;
525  bool using_main_dsm_region = false;
526 
527  /*
528  * Unsafe in postmaster. It might seem pointless to allow use of dsm in
529  * single user mode, but otherwise some subsystems will need dedicated
530  * single user mode code paths.
531  */
533 
534  if (!dsm_init_done)
536 
537  /* Create a new segment descriptor. */
538  seg = dsm_create_descriptor();
539 
540  /*
541  * Lock the control segment while we try to allocate from the main shared
542  * memory area, if configured.
543  */
544  if (dsm_main_space_fpm)
545  {
546  npages = size / FPM_PAGE_SIZE;
547  if (size % FPM_PAGE_SIZE > 0)
548  ++npages;
549 
550  LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE);
551  if (FreePageManagerGet(dsm_main_space_fpm, npages, &first_page))
552  {
553  /* We can carve out a piece of the main shared memory segment. */
554  seg->mapped_address = (char *) dsm_main_space_begin +
555  first_page * FPM_PAGE_SIZE;
556  seg->mapped_size = npages * FPM_PAGE_SIZE;
557  using_main_dsm_region = true;
558  /* We'll choose a handle below. */
559  }
560  }
561 
562  if (!using_main_dsm_region)
563  {
564  /*
565  * We need to create a new memory segment. Loop until we find an
566  * unused segment identifier.
567  */
568  if (dsm_main_space_fpm)
569  LWLockRelease(DynamicSharedMemoryControlLock);
570  for (;;)
571  {
572  Assert(seg->mapped_address == NULL && seg->mapped_size == 0);
573  /* Use even numbers only */
575  if (seg->handle == DSM_HANDLE_INVALID) /* Reserve sentinel */
576  continue;
577  if (dsm_impl_op(DSM_OP_CREATE, seg->handle, size, &seg->impl_private,
578  &seg->mapped_address, &seg->mapped_size, ERROR))
579  break;
580  }
581  LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE);
582  }
583 
584  /* Search the control segment for an unused slot. */
586  for (i = 0; i < nitems; ++i)
587  {
588  if (dsm_control->item[i].refcnt == 0)
589  {
590  if (using_main_dsm_region)
591  {
593  dsm_control->item[i].first_page = first_page;
594  dsm_control->item[i].npages = npages;
595  }
596  else
598  dsm_control->item[i].handle = seg->handle;
599  /* refcnt of 1 triggers destruction, so start at 2 */
600  dsm_control->item[i].refcnt = 2;
602  dsm_control->item[i].pinned = false;
603  seg->control_slot = i;
604  LWLockRelease(DynamicSharedMemoryControlLock);
605  return seg;
606  }
607  }
608 
609  /* Verify that we can support an additional mapping. */
610  if (nitems >= dsm_control->maxitems)
611  {
612  if (using_main_dsm_region)
613  FreePageManagerPut(dsm_main_space_fpm, first_page, npages);
614  LWLockRelease(DynamicSharedMemoryControlLock);
615  if (!using_main_dsm_region)
617  &seg->mapped_address, &seg->mapped_size, WARNING);
618  if (seg->resowner != NULL)
619  ResourceOwnerForgetDSM(seg->resowner, seg);
620  dlist_delete(&seg->node);
621  pfree(seg);
622 
623  if ((flags & DSM_CREATE_NULL_IF_MAXSEGMENTS) != 0)
624  return NULL;
625  ereport(ERROR,
626  (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
627  errmsg("too many dynamic shared memory segments")));
628  }
629 
630  /* Enter the handle into a new array slot. */
631  if (using_main_dsm_region)
632  {
634  dsm_control->item[i].first_page = first_page;
635  dsm_control->item[i].npages = npages;
636  }
638  /* refcnt of 1 triggers destruction, so start at 2 */
641  dsm_control->item[nitems].pinned = false;
642  seg->control_slot = nitems;
643  dsm_control->nitems++;
644  LWLockRelease(DynamicSharedMemoryControlLock);
645 
646  return seg;
647 }
648 
649 /*
650  * Attach a dynamic shared memory segment.
651  *
652  * See comments for dsm_segment_handle() for an explanation of how this
653  * is intended to be used.
654  *
655  * This function will return NULL if the segment isn't known to the system.
656  * This can happen if we're asked to attach the segment, but then everyone
657  * else detaches it (causing it to be destroyed) before we get around to
658  * attaching it.
659  *
660  * If there is a non-NULL CurrentResourceOwner, the attached segment is
661  * associated with it and must be detached before the resource owner releases,
662  * or a warning will be logged. Otherwise the segment remains attached until
663  * explicitly detached or the session ends. See the note atop dsm_create().
664  */
665 dsm_segment *
667 {
668  dsm_segment *seg;
669  dlist_iter iter;
670  uint32 i;
671  uint32 nitems;
672 
673  /* Unsafe in postmaster (and pointless in a stand-alone backend). */
675 
676  if (!dsm_init_done)
678 
679  /*
680  * Since this is just a debugging cross-check, we could leave it out
681  * altogether, or include it only in assert-enabled builds. But since the
682  * list of attached segments should normally be very short, let's include
683  * it always for right now.
684  *
685  * If you're hitting this error, you probably want to attempt to find an
686  * existing mapping via dsm_find_mapping() before calling dsm_attach() to
687  * create a new one.
688  */
690  {
691  seg = dlist_container(dsm_segment, node, iter.cur);
692  if (seg->handle == h)
693  elog(ERROR, "can't attach the same segment more than once");
694  }
695 
696  /* Create a new segment descriptor. */
697  seg = dsm_create_descriptor();
698  seg->handle = h;
699 
700  /* Bump reference count for this segment in shared memory. */
701  LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE);
703  for (i = 0; i < nitems; ++i)
704  {
705  /*
706  * If the reference count is 0, the slot is actually unused. If the
707  * reference count is 1, the slot is still in use, but the segment is
708  * in the process of going away; even if the handle matches, another
709  * slot may already have started using the same handle value by
710  * coincidence so we have to keep searching.
711  */
712  if (dsm_control->item[i].refcnt <= 1)
713  continue;
714 
715  /* If the handle doesn't match, it's not the slot we want. */
716  if (dsm_control->item[i].handle != seg->handle)
717  continue;
718 
719  /* Otherwise we've found a match. */
720  dsm_control->item[i].refcnt++;
721  seg->control_slot = i;
723  {
724  seg->mapped_address = (char *) dsm_main_space_begin +
727  }
728  break;
729  }
730  LWLockRelease(DynamicSharedMemoryControlLock);
731 
732  /*
733  * If we didn't find the handle we're looking for in the control segment,
734  * it probably means that everyone else who had it mapped, including the
735  * original creator, died before we got to this point. It's up to the
736  * caller to decide what to do about that.
737  */
739  {
740  dsm_detach(seg);
741  return NULL;
742  }
743 
744  /* Here's where we actually try to map the segment. */
747  &seg->mapped_address, &seg->mapped_size, ERROR);
748 
749  return seg;
750 }
751 
752 /*
753  * At backend shutdown time, detach any segments that are still attached.
754  * (This is similar to dsm_detach_all, except that there's no reason to
755  * unmap the control segment before exiting, so we don't bother.)
756  */
757 void
759 {
761  {
762  dsm_segment *seg;
763 
765  dsm_detach(seg);
766  }
767 }
768 
769 /*
770  * Detach all shared memory segments, including the control segments. This
771  * should be called, along with PGSharedMemoryDetach, in processes that
772  * might inherit mappings but are not intended to be connected to dynamic
773  * shared memory.
774  */
775 void
777 {
778  void *control_address = dsm_control;
779 
781  {
782  dsm_segment *seg;
783 
785  dsm_detach(seg);
786  }
787 
788  if (control_address != NULL)
790  &dsm_control_impl_private, &control_address,
792 }
793 
794 /*
795  * Detach from a shared memory segment, destroying the segment if we
796  * remove the last reference.
797  *
798  * This function should never fail. It will often be invoked when aborting
799  * a transaction, and a further error won't serve any purpose. It's not a
800  * complete disaster if we fail to unmap or destroy the segment; it means a
801  * resource leak, but that doesn't necessarily preclude further operations.
802  */
803 void
805 {
806  /*
807  * Invoke registered callbacks. Just in case one of those callbacks
808  * throws a further error that brings us back here, pop the callback
809  * before invoking it, to avoid infinite error recursion. Don't allow
810  * interrupts while running the individual callbacks in non-error code
811  * paths, to avoid leaving cleanup work unfinished if we're interrupted by
812  * a statement timeout or similar.
813  */
814  HOLD_INTERRUPTS();
815  while (!slist_is_empty(&seg->on_detach))
816  {
817  slist_node *node;
819  on_dsm_detach_callback function;
820  Datum arg;
821 
822  node = slist_pop_head_node(&seg->on_detach);
824  function = cb->function;
825  arg = cb->arg;
826  pfree(cb);
827 
828  function(seg, arg);
829  }
831 
832  /*
833  * Try to remove the mapping, if one exists. Normally, there will be, but
834  * maybe not, if we failed partway through a create or attach operation.
835  * We remove the mapping before decrementing the reference count so that
836  * the process that sees a zero reference count can be certain that no
837  * remaining mappings exist. Even if this fails, we pretend that it
838  * works, because retrying is likely to fail in the same way.
839  */
840  if (seg->mapped_address != NULL)
841  {
844  &seg->mapped_address, &seg->mapped_size, WARNING);
845  seg->impl_private = NULL;
846  seg->mapped_address = NULL;
847  seg->mapped_size = 0;
848  }
849 
850  /* Reduce reference count, if we previously increased it. */
852  {
853  uint32 refcnt;
854  uint32 control_slot = seg->control_slot;
855 
856  LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE);
857  Assert(dsm_control->item[control_slot].handle == seg->handle);
858  Assert(dsm_control->item[control_slot].refcnt > 1);
859  refcnt = --dsm_control->item[control_slot].refcnt;
861  LWLockRelease(DynamicSharedMemoryControlLock);
862 
863  /* If new reference count is 1, try to destroy the segment. */
864  if (refcnt == 1)
865  {
866  /* A pinned segment should never reach 1. */
867  Assert(!dsm_control->item[control_slot].pinned);
868 
869  /*
870  * If we fail to destroy the segment here, or are killed before we
871  * finish doing so, the reference count will remain at 1, which
872  * will mean that nobody else can attach to the segment. At
873  * postmaster shutdown time, or when a new postmaster is started
874  * after a hard kill, another attempt will be made to remove the
875  * segment.
876  *
877  * The main case we're worried about here is being killed by a
878  * signal before we can finish removing the segment. In that
879  * case, it's important to be sure that the segment still gets
880  * removed. If we actually fail to remove the segment for some
881  * other reason, the postmaster may not have any better luck than
882  * we did. There's not much we can do about that, though.
883  */
884  if (is_main_region_dsm_handle(seg->handle) ||
886  &seg->mapped_address, &seg->mapped_size, WARNING))
887  {
888  LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE);
891  dsm_control->item[control_slot].first_page,
892  dsm_control->item[control_slot].npages);
893  Assert(dsm_control->item[control_slot].handle == seg->handle);
894  Assert(dsm_control->item[control_slot].refcnt == 1);
895  dsm_control->item[control_slot].refcnt = 0;
896  LWLockRelease(DynamicSharedMemoryControlLock);
897  }
898  }
899  }
900 
901  /* Clean up our remaining backend-private data structures. */
902  if (seg->resowner != NULL)
903  ResourceOwnerForgetDSM(seg->resowner, seg);
904  dlist_delete(&seg->node);
905  pfree(seg);
906 }
907 
908 /*
909  * Keep a dynamic shared memory mapping until end of session.
910  *
911  * By default, mappings are owned by the current resource owner, which
912  * typically means they stick around for the duration of the current query
913  * only.
914  */
915 void
917 {
918  if (seg->resowner != NULL)
919  {
920  ResourceOwnerForgetDSM(seg->resowner, seg);
921  seg->resowner = NULL;
922  }
923 }
924 
925 /*
926  * Arrange to remove a dynamic shared memory mapping at cleanup time.
927  *
928  * dsm_pin_mapping() can be used to preserve a mapping for the entire
929  * lifetime of a process; this function reverses that decision, making
930  * the segment owned by the current resource owner. This may be useful
931  * just before performing some operation that will invalidate the segment
932  * for future use by this backend.
933  */
934 void
936 {
937  Assert(seg->resowner == NULL);
941 }
942 
943 /*
944  * Keep a dynamic shared memory segment until postmaster shutdown, or until
945  * dsm_unpin_segment is called.
946  *
947  * This function should not be called more than once per segment, unless the
948  * segment is explicitly unpinned with dsm_unpin_segment in between calls.
949  *
950  * Note that this function does not arrange for the current process to
951  * keep the segment mapped indefinitely; if that behavior is desired,
952  * dsm_pin_mapping() should be used from each process that needs to
953  * retain the mapping.
954  */
955 void
957 {
958  void *handle = NULL;
959 
960  /*
961  * Bump reference count for this segment in shared memory. This will
962  * ensure that even if there is no session which is attached to this
963  * segment, it will remain until postmaster shutdown or an explicit call
964  * to unpin.
965  */
966  LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE);
967  if (dsm_control->item[seg->control_slot].pinned)
968  elog(ERROR, "cannot pin a segment that is already pinned");
970  dsm_impl_pin_segment(seg->handle, seg->impl_private, &handle);
971  dsm_control->item[seg->control_slot].pinned = true;
974  LWLockRelease(DynamicSharedMemoryControlLock);
975 }
976 
977 /*
978  * Unpin a dynamic shared memory segment that was previously pinned with
979  * dsm_pin_segment. This function should not be called unless dsm_pin_segment
980  * was previously called for this segment.
981  *
982  * The argument is a dsm_handle rather than a dsm_segment in case you want
983  * to unpin a segment to which you haven't attached. This turns out to be
984  * useful if, for example, a reference to one shared memory segment is stored
985  * within another shared memory segment. You might want to unpin the
986  * referenced segment before destroying the referencing segment.
987  */
988 void
990 {
991  uint32 control_slot = INVALID_CONTROL_SLOT;
992  bool destroy = false;
993  uint32 i;
994 
995  /* Find the control slot for the given handle. */
996  LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE);
997  for (i = 0; i < dsm_control->nitems; ++i)
998  {
999  /* Skip unused slots and segments that are concurrently going away. */
1000  if (dsm_control->item[i].refcnt <= 1)
1001  continue;
1002 
1003  /* If we've found our handle, we can stop searching. */
1004  if (dsm_control->item[i].handle == handle)
1005  {
1006  control_slot = i;
1007  break;
1008  }
1009  }
1010 
1011  /*
1012  * We should definitely have found the slot, and it should not already be
1013  * in the process of going away, because this function should only be
1014  * called on a segment which is pinned.
1015  */
1016  if (control_slot == INVALID_CONTROL_SLOT)
1017  elog(ERROR, "cannot unpin unknown segment handle");
1018  if (!dsm_control->item[control_slot].pinned)
1019  elog(ERROR, "cannot unpin a segment that is not pinned");
1020  Assert(dsm_control->item[control_slot].refcnt > 1);
1021 
1022  /*
1023  * Allow implementation-specific code to run. We have to do this before
1024  * releasing the lock, because impl_private_pm_handle may get modified by
1025  * dsm_impl_unpin_segment.
1026  */
1027  if (!is_main_region_dsm_handle(handle))
1028  dsm_impl_unpin_segment(handle,
1029  &dsm_control->item[control_slot].impl_private_pm_handle);
1030 
1031  /* Note that 1 means no references (0 means unused slot). */
1032  if (--dsm_control->item[control_slot].refcnt == 1)
1033  destroy = true;
1034  dsm_control->item[control_slot].pinned = false;
1035 
1036  /* Now we can release the lock. */
1037  LWLockRelease(DynamicSharedMemoryControlLock);
1038 
1039  /* Clean up resources if that was the last reference. */
1040  if (destroy)
1041  {
1042  void *junk_impl_private = NULL;
1043  void *junk_mapped_address = NULL;
1044  Size junk_mapped_size = 0;
1045 
1046  /*
1047  * For an explanation of how error handling works in this case, see
1048  * comments in dsm_detach. Note that if we reach this point, the
1049  * current process certainly does not have the segment mapped, because
1050  * if it did, the reference count would have still been greater than 1
1051  * even after releasing the reference count held by the pin. The fact
1052  * that there can't be a dsm_segment for this handle makes it OK to
1053  * pass the mapped size, mapped address, and private data as NULL
1054  * here.
1055  */
1056  if (is_main_region_dsm_handle(handle) ||
1057  dsm_impl_op(DSM_OP_DESTROY, handle, 0, &junk_impl_private,
1058  &junk_mapped_address, &junk_mapped_size, WARNING))
1059  {
1060  LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE);
1061  if (is_main_region_dsm_handle(handle))
1063  dsm_control->item[control_slot].first_page,
1064  dsm_control->item[control_slot].npages);
1065  Assert(dsm_control->item[control_slot].handle == handle);
1066  Assert(dsm_control->item[control_slot].refcnt == 1);
1067  dsm_control->item[control_slot].refcnt = 0;
1068  LWLockRelease(DynamicSharedMemoryControlLock);
1069  }
1070  }
1071 }
1072 
1073 /*
1074  * Find an existing mapping for a shared memory segment, if there is one.
1075  */
1076 dsm_segment *
1078 {
1079  dlist_iter iter;
1080  dsm_segment *seg;
1081 
1083  {
1084  seg = dlist_container(dsm_segment, node, iter.cur);
1085  if (seg->handle == handle)
1086  return seg;
1087  }
1088 
1089  return NULL;
1090 }
1091 
1092 /*
1093  * Get the address at which a dynamic shared memory segment is mapped.
1094  */
1095 void *
1097 {
1098  Assert(seg->mapped_address != NULL);
1099  return seg->mapped_address;
1100 }
1101 
1102 /*
1103  * Get the size of a mapping.
1104  */
1105 Size
1107 {
1108  Assert(seg->mapped_address != NULL);
1109  return seg->mapped_size;
1110 }
1111 
1112 /*
1113  * Get a handle for a mapping.
1114  *
1115  * To establish communication via dynamic shared memory between two backends,
1116  * one of them should first call dsm_create() to establish a new shared
1117  * memory mapping. That process should then call dsm_segment_handle() to
1118  * obtain a handle for the mapping, and pass that handle to the
1119  * coordinating backend via some means (e.g. bgw_main_arg, or via the
1120  * main shared memory segment). The recipient, once in possession of the
1121  * handle, should call dsm_attach().
1122  */
1123 dsm_handle
1125 {
1126  return seg->handle;
1127 }
1128 
1129 /*
1130  * Register an on-detach callback for a dynamic shared memory segment.
1131  */
1132 void
1134 {
1136 
1138  sizeof(dsm_segment_detach_callback));
1139  cb->function = function;
1140  cb->arg = arg;
1141  slist_push_head(&seg->on_detach, &cb->node);
1142 }
1143 
1144 /*
1145  * Unregister an on-detach callback for a dynamic shared memory segment.
1146  */
1147 void
1149  Datum arg)
1150 {
1151  slist_mutable_iter iter;
1152 
1153  slist_foreach_modify(iter, &seg->on_detach)
1154  {
1156 
1158  if (cb->function == function && cb->arg == arg)
1159  {
1160  slist_delete_current(&iter);
1161  pfree(cb);
1162  break;
1163  }
1164  }
1165 }
1166 
1167 /*
1168  * Discard all registered on-detach callbacks without executing them.
1169  */
1170 void
1172 {
1173  dlist_iter iter;
1174 
1176  {
1177  dsm_segment *seg = dlist_container(dsm_segment, node, iter.cur);
1178 
1179  /* Throw away explicit on-detach actions one by one. */
1180  while (!slist_is_empty(&seg->on_detach))
1181  {
1182  slist_node *node;
1184 
1185  node = slist_pop_head_node(&seg->on_detach);
1186  cb = slist_container(dsm_segment_detach_callback, node, node);
1187  pfree(cb);
1188  }
1189 
1190  /*
1191  * Decrementing the reference count is a sort of implicit on-detach
1192  * action; make sure we don't do that, either.
1193  */
1195  }
1196 }
1197 
1198 /*
1199  * Create a segment descriptor.
1200  */
1201 static dsm_segment *
1203 {
1204  dsm_segment *seg;
1205 
1208 
1211 
1212  /* seg->handle must be initialized by the caller */
1214  seg->impl_private = NULL;
1215  seg->mapped_address = NULL;
1216  seg->mapped_size = 0;
1217 
1221 
1222  slist_init(&seg->on_detach);
1223 
1224  return seg;
1225 }
1226 
1227 /*
1228  * Sanity check a control segment.
1229  *
1230  * The goal here isn't to detect everything that could possibly be wrong with
1231  * the control segment; there's not enough information for that. Rather, the
1232  * goal is to make sure that someone can iterate over the items in the segment
1233  * without overrunning the end of the mapping and crashing. We also check
1234  * the magic number since, if that's messed up, this may not even be one of
1235  * our segments at all.
1236  */
1237 static bool
1239 {
1240  if (mapped_size < offsetof(dsm_control_header, item))
1241  return false; /* Mapped size too short to read header. */
1242  if (control->magic != PG_DYNSHMEM_CONTROL_MAGIC)
1243  return false; /* Magic number doesn't match. */
1244  if (dsm_control_bytes_needed(control->maxitems) > mapped_size)
1245  return false; /* Max item count won't fit in map. */
1246  if (control->nitems > control->maxitems)
1247  return false; /* Overfull. */
1248  return true;
1249 }
1250 
1251 /*
1252  * Compute the number of control-segment bytes needed to store a given
1253  * number of items.
1254  */
1255 static uint64
1257 {
1258  return offsetof(dsm_control_header, item)
1259  + sizeof(dsm_control_item) * (uint64) nitems;
1260 }
1261 
1262 static inline dsm_handle
1264 {
1265  dsm_handle handle;
1266 
1267  /*
1268  * We need to create a handle that doesn't collide with any existing extra
1269  * segment created by dsm_impl_op(), so we'll make it odd. It also
1270  * mustn't collide with any other main area pseudo-segment, so we'll
1271  * include the slot number in some of the bits. We also want to make an
1272  * effort to avoid newly created and recently destroyed handles from being
1273  * confused, so we'll make the rest of the bits random.
1274  */
1275  handle = 1;
1276  handle |= slot << 1;
1278  return handle;
1279 }
1280 
1281 static inline bool
1283 {
1284  return handle & 1;
1285 }
1286 
1287 /* ResourceOwner callbacks */
1288 
1289 static void
1291 {
1293 
1294  seg->resowner = NULL;
1295  dsm_detach(seg);
1296 }
1297 static char *
1299 {
1301 
1302  return psprintf("dynamic shared memory segment %u",
1303  dsm_segment_handle(seg));
1304 }
unsigned int uint32
Definition: c.h:495
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:387
size_t Size
Definition: c.h:594
dsm_handle dsm_segment_handle(dsm_segment *seg)
Definition: dsm.c:1124
size_t dsm_estimate_size(void)
Definition: dsm.c:471
static void dsm_backend_startup(void)
Definition: dsm.c:424
void * dsm_segment_address(dsm_segment *seg)
Definition: dsm.c:1096
static void * dsm_main_space_begin
Definition: dsm.c:112
void dsm_detach(dsm_segment *seg)
Definition: dsm.c:804
void on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg)
Definition: dsm.c:1133
dsm_segment * dsm_attach(dsm_handle h)
Definition: dsm.c:666
dsm_segment * dsm_create(Size size, int flags)
Definition: dsm.c:517
static dsm_handle dsm_control_handle
Definition: dsm.c:140
void dsm_pin_mapping(dsm_segment *seg)
Definition: dsm.c:916
static dlist_head dsm_segment_list
Definition: dsm.c:131
static char * ResOwnerPrintDSM(Datum res)
Definition: dsm.c:1298
static void dsm_postmaster_shutdown(int code, Datum arg)
Definition: dsm.c:359
void dsm_unpin_segment(dsm_handle handle)
Definition: dsm.c:989
void dsm_pin_segment(dsm_segment *seg)
Definition: dsm.c:956
static void ResourceOwnerForgetDSM(ResourceOwner owner, dsm_segment *seg)
Definition: dsm.c:166
void dsm_detach_all(void)
Definition: dsm.c:776
static void ResourceOwnerRememberDSM(ResourceOwner owner, dsm_segment *seg)
Definition: dsm.c:161
static dsm_handle make_main_region_dsm_handle(int slot)
Definition: dsm.c:1263
static const ResourceOwnerDesc dsm_resowner_desc
Definition: dsm.c:150
static bool dsm_init_done
Definition: dsm.c:109
void dsm_cleanup_using_control_segment(dsm_handle old_control_handle)
Definition: dsm.c:239
void dsm_postmaster_startup(PGShmemHeader *shim)
Definition: dsm.c:178
#define PG_DYNSHMEM_CONTROL_MAGIC
Definition: dsm.c:51
static dsm_control_header * dsm_control
Definition: dsm.c:141
static Size dsm_control_mapped_size
Definition: dsm.c:142
static dsm_segment * dsm_create_descriptor(void)
Definition: dsm.c:1202
static uint64 dsm_control_bytes_needed(uint32 nitems)
Definition: dsm.c:1256
void dsm_shmem_init(void)
Definition: dsm.c:480
#define PG_DYNSHMEM_SLOTS_PER_BACKEND
Definition: dsm.c:54
struct dsm_control_item dsm_control_item
#define PG_DYNSHMEM_FIXED_SLOTS
Definition: dsm.c:53
void dsm_backend_shutdown(void)
Definition: dsm.c:758
struct dsm_segment_detach_callback dsm_segment_detach_callback
void cancel_on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg)
Definition: dsm.c:1148
void reset_on_dsm_detach(void)
Definition: dsm.c:1171
dsm_segment * dsm_find_mapping(dsm_handle handle)
Definition: dsm.c:1077
static void dsm_cleanup_for_mmap(void)
Definition: dsm.c:321
static void ResOwnerReleaseDSM(Datum res)
Definition: dsm.c:1290
Size dsm_segment_map_length(dsm_segment *seg)
Definition: dsm.c:1106
void dsm_unpin_mapping(dsm_segment *seg)
Definition: dsm.c:935
struct dsm_control_header dsm_control_header
static bool dsm_control_segment_sane(dsm_control_header *control, Size mapped_size)
Definition: dsm.c:1238
#define INVALID_CONTROL_SLOT
Definition: dsm.c:56
static void * dsm_control_impl_private
Definition: dsm.c:143
static bool is_main_region_dsm_handle(dsm_handle handle)
Definition: dsm.c:1282
#define DSM_CREATE_NULL_IF_MAXSEGMENTS
Definition: dsm.h:20
void(* on_dsm_detach_callback)(dsm_segment *, Datum arg)
Definition: dsm.h:54
void dsm_impl_pin_segment(dsm_handle handle, void *impl_private, void **impl_private_pm_handle)
Definition: dsm_impl.c:963
int min_dynamic_shared_memory
Definition: dsm_impl.c:115
void dsm_impl_unpin_segment(dsm_handle handle, void **impl_private)
Definition: dsm_impl.c:1014
bool dsm_impl_op(dsm_op op, dsm_handle handle, Size request_size, void **impl_private, void **mapped_address, Size *mapped_size, int elevel)
Definition: dsm_impl.c:159
int dynamic_shared_memory_type
Definition: dsm_impl.c:112
uint32 dsm_handle
Definition: dsm_impl.h:55
@ DSM_OP_DETACH
Definition: dsm_impl.h:65
@ DSM_OP_CREATE
Definition: dsm_impl.h:63
@ DSM_OP_DESTROY
Definition: dsm_impl.h:66
@ DSM_OP_ATTACH
Definition: dsm_impl.h:64
#define PG_DYNSHMEM_MMAP_FILE_PREFIX
Definition: dsm_impl.h:52
#define PG_DYNSHMEM_DIR
Definition: dsm_impl.h:51
#define DSM_HANDLE_INVALID
Definition: dsm_impl.h:58
#define DSM_IMPL_MMAP
Definition: dsm_impl.h:20
int errcode_for_file_access(void)
Definition: elog.c:883
int errcode(int sqlerrcode)
Definition: elog.c:860
int errmsg(const char *fmt,...)
Definition: elog.c:1075
#define LOG
Definition: elog.h:31
#define FATAL
Definition: elog.h:41
#define WARNING
Definition: elog.h:36
#define DEBUG2
Definition: elog.h:29
#define DEBUG1
Definition: elog.h:30
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
struct dirent * ReadDir(DIR *dir, const char *dirname)
Definition: fd.c:2909
int FreeDir(DIR *dir)
Definition: fd.c:2961
DIR * AllocateDir(const char *dirname)
Definition: fd.c:2843
bool FreePageManagerGet(FreePageManager *fpm, Size npages, Size *first_page)
Definition: freepage.c:210
void FreePageManagerPut(FreePageManager *fpm, Size first_page, Size npages)
Definition: freepage.c:379
void FreePageManagerInitialize(FreePageManager *fpm, char *base)
Definition: freepage.c:183
#define FPM_PAGE_SIZE
Definition: freepage.h:30
bool IsUnderPostmaster
Definition: globals.c:116
int MaxBackends
Definition: globals.c:143
bool IsPostmasterEnvironment
Definition: globals.c:115
static void slist_delete_current(slist_mutable_iter *iter)
Definition: ilist.h:1084
#define dlist_foreach(iter, lhead)
Definition: ilist.h:623
#define dlist_head_element(type, membername, lhead)
Definition: ilist.h:603
static void dlist_delete(dlist_node *node)
Definition: ilist.h:405
#define slist_foreach_modify(iter, lhead)
Definition: ilist.h:1148
static void slist_init(slist_head *head)
Definition: ilist.h:986
static bool slist_is_empty(const slist_head *head)
Definition: ilist.h:995
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:347
static bool dlist_is_empty(const dlist_head *head)
Definition: ilist.h:336
static void slist_push_head(slist_head *head, slist_node *node)
Definition: ilist.h:1006
#define slist_container(type, membername, ptr)
Definition: ilist.h:1106
#define DLIST_STATIC_INIT(name)
Definition: ilist.h:281
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
static slist_node * slist_pop_head_node(slist_head *head)
Definition: ilist.h:1028
#define nitems(x)
Definition: indent.h:31
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:365
int i
Definition: isn.c:73
Assert(fmt[strlen(fmt) - 1] !='\n')
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1168
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1781
@ LW_EXCLUSIVE
Definition: lwlock.h:116
void pfree(void *pointer)
Definition: mcxt.c:1431
MemoryContext TopMemoryContext
Definition: mcxt.c:141
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1034
#define RESUME_INTERRUPTS()
Definition: miscadmin.h:135
#define HOLD_INTERRUPTS()
Definition: miscadmin.h:133
void * arg
static int pg_leftmost_one_pos32(uint32 word)
Definition: pg_bitutils.h:41
#define MAXPGPATH
uint32 pg_prng_uint32(pg_prng_state *state)
Definition: pg_prng.c:191
pg_prng_state pg_global_prng_state
Definition: pg_prng.c:34
static char * buf
Definition: pg_test_fsync.c:73
#define snprintf
Definition: port.h:238
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:322
uintptr_t Datum
Definition: postgres.h:64
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:312
char * psprintf(const char *fmt,...)
Definition: psprintf.c:46
ResourceOwner CurrentResourceOwner
Definition: resowner.c:165
void ResourceOwnerForget(ResourceOwner owner, Datum value, const ResourceOwnerDesc *kind)
Definition: resowner.c:554
void ResourceOwnerRemember(ResourceOwner owner, Datum value, const ResourceOwnerDesc *kind)
Definition: resowner.c:514
void ResourceOwnerEnlarge(ResourceOwner owner)
Definition: resowner.c:442
@ RESOURCE_RELEASE_BEFORE_LOCKS
Definition: resowner.h:54
#define RELEASE_PRIO_DSMS
Definition: resowner.h:65
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:388
Definition: dirent.c:26
dsm_handle dsm_control
Definition: pg_shmem.h:36
const char * name
Definition: resowner.h:93
Definition: dirent.h:10
char d_name[MAX_PATH]
Definition: dirent.h:15
dlist_node * cur
Definition: ilist.h:179
uint32 maxitems
Definition: dsm.c:95
uint32 nitems
Definition: dsm.c:94
uint32 magic
Definition: dsm.c:93
dsm_control_item item[FLEXIBLE_ARRAY_MEMBER]
Definition: dsm.c:96
size_t npages
Definition: dsm.c:85
dsm_handle handle
Definition: dsm.c:82
size_t first_page
Definition: dsm.c:84
bool pinned
Definition: dsm.c:87
void * impl_private_pm_handle
Definition: dsm.c:86
uint32 refcnt
Definition: dsm.c:83
on_dsm_detach_callback function
Definition: dsm.c:61
uint32 control_slot
Definition: dsm.c:72
dsm_handle handle
Definition: dsm.c:71
Size mapped_size
Definition: dsm.c:75
void * impl_private
Definition: dsm.c:73
slist_head on_detach
Definition: dsm.c:76
dlist_node node
Definition: dsm.c:69
ResourceOwner resowner
Definition: dsm.c:70
void * mapped_address
Definition: dsm.c:74
slist_node * cur
Definition: ilist.h:274