PostgreSQL Source Code  git master
dsm.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * dsm.c
4  * manage dynamic shared memory segments
5  *
6  * This file provides a set of services to make programming with dynamic
7  * shared memory segments more convenient. Unlike the low-level
8  * facilities provided by dsm_impl.h and dsm_impl.c, mappings and segments
9  * created using this module will be cleaned up automatically. Mappings
10  * will be removed when the resource owner under which they were created
11  * is cleaned up, unless dsm_pin_mapping() is used, in which case they
12  * have session lifespan. Segments will be removed when there are no
13  * remaining mappings, or at postmaster shutdown in any case. After a
14  * hard postmaster crash, remaining segments will be removed, if they
15  * still exist, at the next postmaster startup.
16  *
17  * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
18  * Portions Copyright (c) 1994, Regents of the University of California
19  *
20  *
21  * IDENTIFICATION
22  * src/backend/storage/ipc/dsm.c
23  *
24  *-------------------------------------------------------------------------
25  */
26 
27 #include "postgres.h"
28 
29 #include <fcntl.h>
30 #include <unistd.h>
31 #ifndef WIN32
32 #include <sys/mman.h>
33 #endif
34 #include <sys/stat.h>
35 
36 #include "common/pg_prng.h"
37 #include "lib/ilist.h"
38 #include "miscadmin.h"
39 #include "port/pg_bitutils.h"
40 #include "storage/dsm.h"
41 #include "storage/ipc.h"
42 #include "storage/lwlock.h"
43 #include "storage/pg_shmem.h"
44 #include "utils/freepage.h"
45 #include "utils/guc.h"
46 #include "utils/memutils.h"
47 #include "utils/resowner_private.h"
48 
49 #define PG_DYNSHMEM_CONTROL_MAGIC 0x9a503d32
50 
51 #define PG_DYNSHMEM_FIXED_SLOTS 64
52 #define PG_DYNSHMEM_SLOTS_PER_BACKEND 5
53 
54 #define INVALID_CONTROL_SLOT ((uint32) -1)
55 
56 /* Backend-local tracking for on-detach callbacks. */
58 {
63 
64 /* Backend-local state for a dynamic shared memory segment. */
66 {
67  dlist_node node; /* List link in dsm_segment_list. */
68  ResourceOwner resowner; /* Resource owner. */
69  dsm_handle handle; /* Segment name. */
70  uint32 control_slot; /* Slot in control segment. */
71  void *impl_private; /* Implementation-specific private data. */
72  void *mapped_address; /* Mapping address, or NULL if unmapped. */
73  Size mapped_size; /* Size of our mapping. */
74  slist_head on_detach; /* On-detach callbacks. */
75 };
76 
77 /* Shared-memory state for a dynamic shared memory segment. */
78 typedef struct dsm_control_item
79 {
81  uint32 refcnt; /* 2+ = active, 1 = moribund, 0 = gone */
82  size_t first_page;
83  size_t npages;
84  void *impl_private_pm_handle; /* only needed on Windows */
85  bool pinned;
87 
88 /* Layout of the dynamic shared memory control segment. */
89 typedef struct dsm_control_header
90 {
96 
97 static void dsm_cleanup_for_mmap(void);
98 static void dsm_postmaster_shutdown(int code, Datum arg);
100 static bool dsm_control_segment_sane(dsm_control_header *control,
101  Size mapped_size);
102 static uint64 dsm_control_bytes_needed(uint32 nitems);
103 static inline dsm_handle make_main_region_dsm_handle(int slot);
104 static inline bool is_main_region_dsm_handle(dsm_handle handle);
105 
106 /* Has this backend initialized the dynamic shared memory system yet? */
107 static bool dsm_init_done = false;
108 
109 /* Preallocated DSM space in the main shared memory region. */
110 static void *dsm_main_space_begin = NULL;
111 
112 /*
113  * List of dynamic shared memory segments used by this backend.
114  *
115  * At process exit time, we must decrement the reference count of each
116  * segment we have attached; this list makes it possible to find all such
117  * segments.
118  *
119  * This list should always be empty in the postmaster. We could probably
120  * allow the postmaster to map dynamic shared memory segments before it
121  * begins to start child processes, provided that each process adjusted
122  * the reference counts for those segments in the control segment at
123  * startup time, but there's no obvious need for such a facility, which
124  * would also be complex to handle in the EXEC_BACKEND case. Once the
125  * postmaster has begun spawning children, there's an additional problem:
126  * each new mapping would require an update to the control segment,
127  * which requires locking, in which the postmaster must not be involved.
128  */
130 
131 /*
132  * Control segment information.
133  *
134  * Unlike ordinary shared memory segments, the control segment is not
135  * reference counted; instead, it lasts for the postmaster's entire
136  * life cycle. For simplicity, it doesn't have a dsm_segment object either.
137  */
141 static void *dsm_control_impl_private = NULL;
142 
143 /*
144  * Start up the dynamic shared memory system.
145  *
146  * This is called just once during each cluster lifetime, at postmaster
147  * startup time.
148  */
149 void
151 {
152  void *dsm_control_address = NULL;
153  uint32 maxitems;
154  Size segsize;
155 
157 
158  /*
159  * If we're using the mmap implementations, clean up any leftovers.
160  * Cleanup isn't needed on Windows, and happens earlier in startup for
161  * POSIX and System V shared memory, via a direct call to
162  * dsm_cleanup_using_control_segment.
163  */
166 
167  /* Determine size for new control segment. */
168  maxitems = PG_DYNSHMEM_FIXED_SLOTS
170  elog(DEBUG2, "dynamic shared memory system will support %u segments",
171  maxitems);
172  segsize = dsm_control_bytes_needed(maxitems);
173 
174  /*
175  * Loop until we find an unused identifier for the new control segment. We
176  * sometimes use 0 as a sentinel value indicating that no control segment
177  * is known to exist, so avoid using that value for a real control
178  * segment.
179  */
180  for (;;)
181  {
182  Assert(dsm_control_address == NULL);
184  /* Use even numbers only */
187  continue;
189  &dsm_control_impl_private, &dsm_control_address,
191  break;
192  }
193  dsm_control = dsm_control_address;
195  elog(DEBUG2,
196  "created dynamic shared memory control segment %u (%zu bytes)",
197  dsm_control_handle, segsize);
199 
200  /* Initialize control segment. */
202  dsm_control->nitems = 0;
203  dsm_control->maxitems = maxitems;
204 }
205 
206 /*
207  * Determine whether the control segment from the previous postmaster
208  * invocation still exists. If so, remove the dynamic shared memory
209  * segments to which it refers, and then the control segment itself.
210  */
211 void
213 {
214  void *mapped_address = NULL;
215  void *junk_mapped_address = NULL;
216  void *impl_private = NULL;
217  void *junk_impl_private = NULL;
218  Size mapped_size = 0;
219  Size junk_mapped_size = 0;
220  uint32 nitems;
221  uint32 i;
222  dsm_control_header *old_control;
223 
224  /*
225  * Try to attach the segment. If this fails, it probably just means that
226  * the operating system has been rebooted and the segment no longer
227  * exists, or an unrelated process has used the same shm ID. So just fall
228  * out quietly.
229  */
230  if (!dsm_impl_op(DSM_OP_ATTACH, old_control_handle, 0, &impl_private,
231  &mapped_address, &mapped_size, DEBUG1))
232  return;
233 
234  /*
235  * We've managed to reattach it, but the contents might not be sane. If
236  * they aren't, we disregard the segment after all.
237  */
238  old_control = (dsm_control_header *) mapped_address;
239  if (!dsm_control_segment_sane(old_control, mapped_size))
240  {
241  dsm_impl_op(DSM_OP_DETACH, old_control_handle, 0, &impl_private,
242  &mapped_address, &mapped_size, LOG);
243  return;
244  }
245 
246  /*
247  * OK, the control segment looks basically valid, so we can use it to get
248  * a list of segments that need to be removed.
249  */
250  nitems = old_control->nitems;
251  for (i = 0; i < nitems; ++i)
252  {
253  dsm_handle handle;
254  uint32 refcnt;
255 
256  /* If the reference count is 0, the slot is actually unused. */
257  refcnt = old_control->item[i].refcnt;
258  if (refcnt == 0)
259  continue;
260 
261  /* If it was using the main shmem area, there is nothing to do. */
262  handle = old_control->item[i].handle;
263  if (is_main_region_dsm_handle(handle))
264  continue;
265 
266  /* Log debugging information. */
267  elog(DEBUG2, "cleaning up orphaned dynamic shared memory with ID %u (reference count %u)",
268  handle, refcnt);
269 
270  /* Destroy the referenced segment. */
271  dsm_impl_op(DSM_OP_DESTROY, handle, 0, &junk_impl_private,
272  &junk_mapped_address, &junk_mapped_size, LOG);
273  }
274 
275  /* Destroy the old control segment, too. */
276  elog(DEBUG2,
277  "cleaning up dynamic shared memory control segment with ID %u",
278  old_control_handle);
279  dsm_impl_op(DSM_OP_DESTROY, old_control_handle, 0, &impl_private,
280  &mapped_address, &mapped_size, LOG);
281 }
282 
283 /*
284  * When we're using the mmap shared memory implementation, "shared memory"
285  * segments might even manage to survive an operating system reboot.
286  * But there's no guarantee as to exactly what will survive: some segments
287  * may survive, and others may not, and the contents of some may be out
288  * of date. In particular, the control segment may be out of date, so we
289  * can't rely on it to figure out what to remove. However, since we know
290  * what directory contains the files we used as shared memory, we can simply
291  * scan the directory and blow everything away that shouldn't be there.
292  */
293 static void
295 {
296  DIR *dir;
297  struct dirent *dent;
298 
299  /* Scan the directory for something with a name of the correct format. */
301 
302  while ((dent = ReadDir(dir, PG_DYNSHMEM_DIR)) != NULL)
303  {
304  if (strncmp(dent->d_name, PG_DYNSHMEM_MMAP_FILE_PREFIX,
305  strlen(PG_DYNSHMEM_MMAP_FILE_PREFIX)) == 0)
306  {
307  char buf[MAXPGPATH + sizeof(PG_DYNSHMEM_DIR)];
308 
309  snprintf(buf, sizeof(buf), PG_DYNSHMEM_DIR "/%s", dent->d_name);
310 
311  elog(DEBUG2, "removing file \"%s\"", buf);
312 
313  /* We found a matching file; so remove it. */
314  if (unlink(buf) != 0)
315  ereport(ERROR,
317  errmsg("could not remove file \"%s\": %m", buf)));
318  }
319  }
320 
321  /* Cleanup complete. */
322  FreeDir(dir);
323 }
324 
325 /*
326  * At shutdown time, we iterate over the control segment and remove all
327  * remaining dynamic shared memory segments. We avoid throwing errors here;
328  * the postmaster is shutting down either way, and this is just non-critical
329  * resource cleanup.
330  */
331 static void
333 {
334  uint32 nitems;
335  uint32 i;
336  void *dsm_control_address;
337  void *junk_mapped_address = NULL;
338  void *junk_impl_private = NULL;
339  Size junk_mapped_size = 0;
341 
342  /*
343  * If some other backend exited uncleanly, it might have corrupted the
344  * control segment while it was dying. In that case, we warn and ignore
345  * the contents of the control segment. This may end up leaving behind
346  * stray shared memory segments, but there's not much we can do about that
347  * if the metadata is gone.
348  */
349  nitems = dsm_control->nitems;
351  {
352  ereport(LOG,
353  (errmsg("dynamic shared memory control segment is corrupt")));
354  return;
355  }
356 
357  /* Remove any remaining segments. */
358  for (i = 0; i < nitems; ++i)
359  {
360  dsm_handle handle;
361 
362  /* If the reference count is 0, the slot is actually unused. */
363  if (dsm_control->item[i].refcnt == 0)
364  continue;
365 
366  handle = dsm_control->item[i].handle;
367  if (is_main_region_dsm_handle(handle))
368  continue;
369 
370  /* Log debugging information. */
371  elog(DEBUG2, "cleaning up orphaned dynamic shared memory with ID %u",
372  handle);
373 
374  /* Destroy the segment. */
375  dsm_impl_op(DSM_OP_DESTROY, handle, 0, &junk_impl_private,
376  &junk_mapped_address, &junk_mapped_size, LOG);
377  }
378 
379  /* Remove the control segment itself. */
380  elog(DEBUG2,
381  "cleaning up dynamic shared memory control segment with ID %u",
383  dsm_control_address = dsm_control;
385  &dsm_control_impl_private, &dsm_control_address,
387  dsm_control = dsm_control_address;
388  shim->dsm_control = 0;
389 }
390 
391 /*
392  * Prepare this backend for dynamic shared memory usage. Under EXEC_BACKEND,
393  * we must reread the state file and map the control segment; in other cases,
394  * we'll have inherited the postmaster's mapping and global variables.
395  */
396 static void
398 {
399 #ifdef EXEC_BACKEND
400  if (IsUnderPostmaster)
401  {
402  void *control_address = NULL;
403 
404  /* Attach control segment. */
407  &dsm_control_impl_private, &control_address,
409  dsm_control = control_address;
410  /* If control segment doesn't look sane, something is badly wrong. */
412  {
414  &dsm_control_impl_private, &control_address,
416  ereport(FATAL,
417  (errcode(ERRCODE_INTERNAL_ERROR),
418  errmsg("dynamic shared memory control segment is not valid")));
419  }
420  }
421 #endif
422 
423  dsm_init_done = true;
424 }
425 
426 #ifdef EXEC_BACKEND
427 /*
428  * When running under EXEC_BACKEND, we get a callback here when the main
429  * shared memory segment is re-attached, so that we can record the control
430  * handle retrieved from it.
431  */
432 void
433 dsm_set_control_handle(dsm_handle h)
434 {
435  Assert(dsm_control_handle == 0 && h != 0);
436  dsm_control_handle = h;
437 }
438 #endif
439 
440 /*
441  * Reserve some space in the main shared memory segment for DSM segments.
442  */
443 size_t
445 {
446  return 1024 * 1024 * (size_t) min_dynamic_shared_memory;
447 }
448 
449 /*
450  * Initialize space in the main shared memory segment for DSM segments.
451  */
452 void
454 {
455  size_t size = dsm_estimate_size();
456  bool found;
457 
458  if (size == 0)
459  return;
460 
461  dsm_main_space_begin = ShmemInitStruct("Preallocated DSM", size, &found);
462  if (!found)
463  {
465  size_t first_page = 0;
466  size_t pages;
467 
468  /* Reserve space for the FreePageManager. */
469  while (first_page * FPM_PAGE_SIZE < sizeof(FreePageManager))
470  ++first_page;
471 
472  /* Initialize it and give it all the rest of the space. */
474  pages = (size / FPM_PAGE_SIZE) - first_page;
475  FreePageManagerPut(fpm, first_page, pages);
476  }
477 }
478 
479 /*
480  * Create a new dynamic shared memory segment.
481  *
482  * If there is a non-NULL CurrentResourceOwner, the new segment is associated
483  * with it and must be detached before the resource owner releases, or a
484  * warning will be logged. If CurrentResourceOwner is NULL, the segment
485  * remains attached until explicitly detached or the session ends.
486  * Creating with a NULL CurrentResourceOwner is equivalent to creating
487  * with a non-NULL CurrentResourceOwner and then calling dsm_pin_mapping.
488  */
489 dsm_segment *
490 dsm_create(Size size, int flags)
491 {
492  dsm_segment *seg;
493  uint32 i;
494  uint32 nitems;
495  size_t npages = 0;
496  size_t first_page = 0;
497  FreePageManager *dsm_main_space_fpm = dsm_main_space_begin;
498  bool using_main_dsm_region = false;
499 
500  /*
501  * Unsafe in postmaster. It might seem pointless to allow use of dsm in
502  * single user mode, but otherwise some subsystems will need dedicated
503  * single user mode code paths.
504  */
506 
507  if (!dsm_init_done)
509 
510  /* Create a new segment descriptor. */
511  seg = dsm_create_descriptor();
512 
513  /*
514  * Lock the control segment while we try to allocate from the main shared
515  * memory area, if configured.
516  */
517  if (dsm_main_space_fpm)
518  {
519  npages = size / FPM_PAGE_SIZE;
520  if (size % FPM_PAGE_SIZE > 0)
521  ++npages;
522 
523  LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE);
524  if (FreePageManagerGet(dsm_main_space_fpm, npages, &first_page))
525  {
526  /* We can carve out a piece of the main shared memory segment. */
527  seg->mapped_address = (char *) dsm_main_space_begin +
528  first_page * FPM_PAGE_SIZE;
529  seg->mapped_size = npages * FPM_PAGE_SIZE;
530  using_main_dsm_region = true;
531  /* We'll choose a handle below. */
532  }
533  }
534 
535  if (!using_main_dsm_region)
536  {
537  /*
538  * We need to create a new memory segment. Loop until we find an
539  * unused segment identifier.
540  */
541  if (dsm_main_space_fpm)
542  LWLockRelease(DynamicSharedMemoryControlLock);
543  for (;;)
544  {
545  Assert(seg->mapped_address == NULL && seg->mapped_size == 0);
546  /* Use even numbers only */
548  if (seg->handle == DSM_HANDLE_INVALID) /* Reserve sentinel */
549  continue;
550  if (dsm_impl_op(DSM_OP_CREATE, seg->handle, size, &seg->impl_private,
551  &seg->mapped_address, &seg->mapped_size, ERROR))
552  break;
553  }
554  LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE);
555  }
556 
557  /* Search the control segment for an unused slot. */
558  nitems = dsm_control->nitems;
559  for (i = 0; i < nitems; ++i)
560  {
561  if (dsm_control->item[i].refcnt == 0)
562  {
563  if (using_main_dsm_region)
564  {
566  dsm_control->item[i].first_page = first_page;
567  dsm_control->item[i].npages = npages;
568  }
569  else
571  dsm_control->item[i].handle = seg->handle;
572  /* refcnt of 1 triggers destruction, so start at 2 */
573  dsm_control->item[i].refcnt = 2;
575  dsm_control->item[i].pinned = false;
576  seg->control_slot = i;
577  LWLockRelease(DynamicSharedMemoryControlLock);
578  return seg;
579  }
580  }
581 
582  /* Verify that we can support an additional mapping. */
583  if (nitems >= dsm_control->maxitems)
584  {
585  if (using_main_dsm_region)
586  FreePageManagerPut(dsm_main_space_fpm, first_page, npages);
587  LWLockRelease(DynamicSharedMemoryControlLock);
588  if (!using_main_dsm_region)
590  &seg->mapped_address, &seg->mapped_size, WARNING);
591  if (seg->resowner != NULL)
592  ResourceOwnerForgetDSM(seg->resowner, seg);
593  dlist_delete(&seg->node);
594  pfree(seg);
595 
596  if ((flags & DSM_CREATE_NULL_IF_MAXSEGMENTS) != 0)
597  return NULL;
598  ereport(ERROR,
599  (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
600  errmsg("too many dynamic shared memory segments")));
601  }
602 
603  /* Enter the handle into a new array slot. */
604  if (using_main_dsm_region)
605  {
606  seg->handle = make_main_region_dsm_handle(nitems);
607  dsm_control->item[i].first_page = first_page;
608  dsm_control->item[i].npages = npages;
609  }
610  dsm_control->item[nitems].handle = seg->handle;
611  /* refcnt of 1 triggers destruction, so start at 2 */
612  dsm_control->item[nitems].refcnt = 2;
613  dsm_control->item[nitems].impl_private_pm_handle = NULL;
614  dsm_control->item[nitems].pinned = false;
615  seg->control_slot = nitems;
616  dsm_control->nitems++;
617  LWLockRelease(DynamicSharedMemoryControlLock);
618 
619  return seg;
620 }
621 
622 /*
623  * Attach a dynamic shared memory segment.
624  *
625  * See comments for dsm_segment_handle() for an explanation of how this
626  * is intended to be used.
627  *
628  * This function will return NULL if the segment isn't known to the system.
629  * This can happen if we're asked to attach the segment, but then everyone
630  * else detaches it (causing it to be destroyed) before we get around to
631  * attaching it.
632  *
633  * If there is a non-NULL CurrentResourceOwner, the attached segment is
634  * associated with it and must be detached before the resource owner releases,
635  * or a warning will be logged. Otherwise the segment remains attached until
636  * explicitly detached or the session ends. See the note atop dsm_create().
637  */
638 dsm_segment *
640 {
641  dsm_segment *seg;
642  dlist_iter iter;
643  uint32 i;
644  uint32 nitems;
645 
646  /* Unsafe in postmaster (and pointless in a stand-alone backend). */
648 
649  if (!dsm_init_done)
651 
652  /*
653  * Since this is just a debugging cross-check, we could leave it out
654  * altogether, or include it only in assert-enabled builds. But since the
655  * list of attached segments should normally be very short, let's include
656  * it always for right now.
657  *
658  * If you're hitting this error, you probably want to attempt to find an
659  * existing mapping via dsm_find_mapping() before calling dsm_attach() to
660  * create a new one.
661  */
663  {
664  seg = dlist_container(dsm_segment, node, iter.cur);
665  if (seg->handle == h)
666  elog(ERROR, "can't attach the same segment more than once");
667  }
668 
669  /* Create a new segment descriptor. */
670  seg = dsm_create_descriptor();
671  seg->handle = h;
672 
673  /* Bump reference count for this segment in shared memory. */
674  LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE);
675  nitems = dsm_control->nitems;
676  for (i = 0; i < nitems; ++i)
677  {
678  /*
679  * If the reference count is 0, the slot is actually unused. If the
680  * reference count is 1, the slot is still in use, but the segment is
681  * in the process of going away; even if the handle matches, another
682  * slot may already have started using the same handle value by
683  * coincidence so we have to keep searching.
684  */
685  if (dsm_control->item[i].refcnt <= 1)
686  continue;
687 
688  /* If the handle doesn't match, it's not the slot we want. */
689  if (dsm_control->item[i].handle != seg->handle)
690  continue;
691 
692  /* Otherwise we've found a match. */
693  dsm_control->item[i].refcnt++;
694  seg->control_slot = i;
696  {
697  seg->mapped_address = (char *) dsm_main_space_begin +
700  }
701  break;
702  }
703  LWLockRelease(DynamicSharedMemoryControlLock);
704 
705  /*
706  * If we didn't find the handle we're looking for in the control segment,
707  * it probably means that everyone else who had it mapped, including the
708  * original creator, died before we got to this point. It's up to the
709  * caller to decide what to do about that.
710  */
712  {
713  dsm_detach(seg);
714  return NULL;
715  }
716 
717  /* Here's where we actually try to map the segment. */
720  &seg->mapped_address, &seg->mapped_size, ERROR);
721 
722  return seg;
723 }
724 
725 /*
726  * At backend shutdown time, detach any segments that are still attached.
727  * (This is similar to dsm_detach_all, except that there's no reason to
728  * unmap the control segment before exiting, so we don't bother.)
729  */
730 void
732 {
734  {
735  dsm_segment *seg;
736 
738  dsm_detach(seg);
739  }
740 }
741 
742 /*
743  * Detach all shared memory segments, including the control segments. This
744  * should be called, along with PGSharedMemoryDetach, in processes that
745  * might inherit mappings but are not intended to be connected to dynamic
746  * shared memory.
747  */
748 void
750 {
751  void *control_address = dsm_control;
752 
754  {
755  dsm_segment *seg;
756 
758  dsm_detach(seg);
759  }
760 
761  if (control_address != NULL)
763  &dsm_control_impl_private, &control_address,
765 }
766 
767 /*
768  * Detach from a shared memory segment, destroying the segment if we
769  * remove the last reference.
770  *
771  * This function should never fail. It will often be invoked when aborting
772  * a transaction, and a further error won't serve any purpose. It's not a
773  * complete disaster if we fail to unmap or destroy the segment; it means a
774  * resource leak, but that doesn't necessarily preclude further operations.
775  */
776 void
778 {
779  /*
780  * Invoke registered callbacks. Just in case one of those callbacks
781  * throws a further error that brings us back here, pop the callback
782  * before invoking it, to avoid infinite error recursion. Don't allow
783  * interrupts while running the individual callbacks in non-error code
784  * paths, to avoid leaving cleanup work unfinished if we're interrupted by
785  * a statement timeout or similar.
786  */
787  HOLD_INTERRUPTS();
788  while (!slist_is_empty(&seg->on_detach))
789  {
790  slist_node *node;
792  on_dsm_detach_callback function;
793  Datum arg;
794 
795  node = slist_pop_head_node(&seg->on_detach);
797  function = cb->function;
798  arg = cb->arg;
799  pfree(cb);
800 
801  function(seg, arg);
802  }
804 
805  /*
806  * Try to remove the mapping, if one exists. Normally, there will be, but
807  * maybe not, if we failed partway through a create or attach operation.
808  * We remove the mapping before decrementing the reference count so that
809  * the process that sees a zero reference count can be certain that no
810  * remaining mappings exist. Even if this fails, we pretend that it
811  * works, because retrying is likely to fail in the same way.
812  */
813  if (seg->mapped_address != NULL)
814  {
817  &seg->mapped_address, &seg->mapped_size, WARNING);
818  seg->impl_private = NULL;
819  seg->mapped_address = NULL;
820  seg->mapped_size = 0;
821  }
822 
823  /* Reduce reference count, if we previously increased it. */
825  {
826  uint32 refcnt;
827  uint32 control_slot = seg->control_slot;
828 
829  LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE);
830  Assert(dsm_control->item[control_slot].handle == seg->handle);
831  Assert(dsm_control->item[control_slot].refcnt > 1);
832  refcnt = --dsm_control->item[control_slot].refcnt;
834  LWLockRelease(DynamicSharedMemoryControlLock);
835 
836  /* If new reference count is 1, try to destroy the segment. */
837  if (refcnt == 1)
838  {
839  /* A pinned segment should never reach 1. */
840  Assert(!dsm_control->item[control_slot].pinned);
841 
842  /*
843  * If we fail to destroy the segment here, or are killed before we
844  * finish doing so, the reference count will remain at 1, which
845  * will mean that nobody else can attach to the segment. At
846  * postmaster shutdown time, or when a new postmaster is started
847  * after a hard kill, another attempt will be made to remove the
848  * segment.
849  *
850  * The main case we're worried about here is being killed by a
851  * signal before we can finish removing the segment. In that
852  * case, it's important to be sure that the segment still gets
853  * removed. If we actually fail to remove the segment for some
854  * other reason, the postmaster may not have any better luck than
855  * we did. There's not much we can do about that, though.
856  */
857  if (is_main_region_dsm_handle(seg->handle) ||
859  &seg->mapped_address, &seg->mapped_size, WARNING))
860  {
861  LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE);
864  dsm_control->item[control_slot].first_page,
865  dsm_control->item[control_slot].npages);
866  Assert(dsm_control->item[control_slot].handle == seg->handle);
867  Assert(dsm_control->item[control_slot].refcnt == 1);
868  dsm_control->item[control_slot].refcnt = 0;
869  LWLockRelease(DynamicSharedMemoryControlLock);
870  }
871  }
872  }
873 
874  /* Clean up our remaining backend-private data structures. */
875  if (seg->resowner != NULL)
876  ResourceOwnerForgetDSM(seg->resowner, seg);
877  dlist_delete(&seg->node);
878  pfree(seg);
879 }
880 
881 /*
882  * Keep a dynamic shared memory mapping until end of session.
883  *
884  * By default, mappings are owned by the current resource owner, which
885  * typically means they stick around for the duration of the current query
886  * only.
887  */
888 void
890 {
891  if (seg->resowner != NULL)
892  {
893  ResourceOwnerForgetDSM(seg->resowner, seg);
894  seg->resowner = NULL;
895  }
896 }
897 
898 /*
899  * Arrange to remove a dynamic shared memory mapping at cleanup time.
900  *
901  * dsm_pin_mapping() can be used to preserve a mapping for the entire
902  * lifetime of a process; this function reverses that decision, making
903  * the segment owned by the current resource owner. This may be useful
904  * just before performing some operation that will invalidate the segment
905  * for future use by this backend.
906  */
907 void
909 {
910  Assert(seg->resowner == NULL);
914 }
915 
916 /*
917  * Keep a dynamic shared memory segment until postmaster shutdown, or until
918  * dsm_unpin_segment is called.
919  *
920  * This function should not be called more than once per segment, unless the
921  * segment is explicitly unpinned with dsm_unpin_segment in between calls.
922  *
923  * Note that this function does not arrange for the current process to
924  * keep the segment mapped indefinitely; if that behavior is desired,
925  * dsm_pin_mapping() should be used from each process that needs to
926  * retain the mapping.
927  */
928 void
930 {
931  void *handle;
932 
933  /*
934  * Bump reference count for this segment in shared memory. This will
935  * ensure that even if there is no session which is attached to this
936  * segment, it will remain until postmaster shutdown or an explicit call
937  * to unpin.
938  */
939  LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE);
940  if (dsm_control->item[seg->control_slot].pinned)
941  elog(ERROR, "cannot pin a segment that is already pinned");
942  dsm_impl_pin_segment(seg->handle, seg->impl_private, &handle);
943  dsm_control->item[seg->control_slot].pinned = true;
946  LWLockRelease(DynamicSharedMemoryControlLock);
947 }
948 
949 /*
950  * Unpin a dynamic shared memory segment that was previously pinned with
951  * dsm_pin_segment. This function should not be called unless dsm_pin_segment
952  * was previously called for this segment.
953  *
954  * The argument is a dsm_handle rather than a dsm_segment in case you want
955  * to unpin a segment to which you haven't attached. This turns out to be
956  * useful if, for example, a reference to one shared memory segment is stored
957  * within another shared memory segment. You might want to unpin the
958  * referenced segment before destroying the referencing segment.
959  */
960 void
962 {
963  uint32 control_slot = INVALID_CONTROL_SLOT;
964  bool destroy = false;
965  uint32 i;
966 
967  /* Find the control slot for the given handle. */
968  LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE);
969  for (i = 0; i < dsm_control->nitems; ++i)
970  {
971  /* Skip unused slots and segments that are concurrently going away. */
972  if (dsm_control->item[i].refcnt <= 1)
973  continue;
974 
975  /* If we've found our handle, we can stop searching. */
976  if (dsm_control->item[i].handle == handle)
977  {
978  control_slot = i;
979  break;
980  }
981  }
982 
983  /*
984  * We should definitely have found the slot, and it should not already be
985  * in the process of going away, because this function should only be
986  * called on a segment which is pinned.
987  */
988  if (control_slot == INVALID_CONTROL_SLOT)
989  elog(ERROR, "cannot unpin unknown segment handle");
990  if (!dsm_control->item[control_slot].pinned)
991  elog(ERROR, "cannot unpin a segment that is not pinned");
992  Assert(dsm_control->item[control_slot].refcnt > 1);
993 
994  /*
995  * Allow implementation-specific code to run. We have to do this before
996  * releasing the lock, because impl_private_pm_handle may get modified by
997  * dsm_impl_unpin_segment.
998  */
999  dsm_impl_unpin_segment(handle,
1000  &dsm_control->item[control_slot].impl_private_pm_handle);
1001 
1002  /* Note that 1 means no references (0 means unused slot). */
1003  if (--dsm_control->item[control_slot].refcnt == 1)
1004  destroy = true;
1005  dsm_control->item[control_slot].pinned = false;
1006 
1007  /* Now we can release the lock. */
1008  LWLockRelease(DynamicSharedMemoryControlLock);
1009 
1010  /* Clean up resources if that was the last reference. */
1011  if (destroy)
1012  {
1013  void *junk_impl_private = NULL;
1014  void *junk_mapped_address = NULL;
1015  Size junk_mapped_size = 0;
1016 
1017  /*
1018  * For an explanation of how error handling works in this case, see
1019  * comments in dsm_detach. Note that if we reach this point, the
1020  * current process certainly does not have the segment mapped, because
1021  * if it did, the reference count would have still been greater than 1
1022  * even after releasing the reference count held by the pin. The fact
1023  * that there can't be a dsm_segment for this handle makes it OK to
1024  * pass the mapped size, mapped address, and private data as NULL
1025  * here.
1026  */
1027  if (is_main_region_dsm_handle(handle) ||
1028  dsm_impl_op(DSM_OP_DESTROY, handle, 0, &junk_impl_private,
1029  &junk_mapped_address, &junk_mapped_size, WARNING))
1030  {
1031  LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE);
1032  if (is_main_region_dsm_handle(handle))
1034  dsm_control->item[control_slot].first_page,
1035  dsm_control->item[control_slot].npages);
1036  Assert(dsm_control->item[control_slot].handle == handle);
1037  Assert(dsm_control->item[control_slot].refcnt == 1);
1038  dsm_control->item[control_slot].refcnt = 0;
1039  LWLockRelease(DynamicSharedMemoryControlLock);
1040  }
1041  }
1042 }
1043 
1044 /*
1045  * Find an existing mapping for a shared memory segment, if there is one.
1046  */
1047 dsm_segment *
1049 {
1050  dlist_iter iter;
1051  dsm_segment *seg;
1052 
1054  {
1055  seg = dlist_container(dsm_segment, node, iter.cur);
1056  if (seg->handle == h)
1057  return seg;
1058  }
1059 
1060  return NULL;
1061 }
1062 
1063 /*
1064  * Get the address at which a dynamic shared memory segment is mapped.
1065  */
1066 void *
1068 {
1069  Assert(seg->mapped_address != NULL);
1070  return seg->mapped_address;
1071 }
1072 
1073 /*
1074  * Get the size of a mapping.
1075  */
1076 Size
1078 {
1079  Assert(seg->mapped_address != NULL);
1080  return seg->mapped_size;
1081 }
1082 
1083 /*
1084  * Get a handle for a mapping.
1085  *
1086  * To establish communication via dynamic shared memory between two backends,
1087  * one of them should first call dsm_create() to establish a new shared
1088  * memory mapping. That process should then call dsm_segment_handle() to
1089  * obtain a handle for the mapping, and pass that handle to the
1090  * coordinating backend via some means (e.g. bgw_main_arg, or via the
1091  * main shared memory segment). The recipient, once in possession of the
1092  * handle, should call dsm_attach().
1093  */
1094 dsm_handle
1096 {
1097  return seg->handle;
1098 }
1099 
1100 /*
1101  * Register an on-detach callback for a dynamic shared memory segment.
1102  */
1103 void
1105 {
1107 
1109  sizeof(dsm_segment_detach_callback));
1110  cb->function = function;
1111  cb->arg = arg;
1112  slist_push_head(&seg->on_detach, &cb->node);
1113 }
1114 
1115 /*
1116  * Unregister an on-detach callback for a dynamic shared memory segment.
1117  */
1118 void
1120  Datum arg)
1121 {
1122  slist_mutable_iter iter;
1123 
1124  slist_foreach_modify(iter, &seg->on_detach)
1125  {
1127 
1129  if (cb->function == function && cb->arg == arg)
1130  {
1131  slist_delete_current(&iter);
1132  pfree(cb);
1133  break;
1134  }
1135  }
1136 }
1137 
1138 /*
1139  * Discard all registered on-detach callbacks without executing them.
1140  */
1141 void
1143 {
1144  dlist_iter iter;
1145 
1147  {
1148  dsm_segment *seg = dlist_container(dsm_segment, node, iter.cur);
1149 
1150  /* Throw away explicit on-detach actions one by one. */
1151  while (!slist_is_empty(&seg->on_detach))
1152  {
1153  slist_node *node;
1155 
1156  node = slist_pop_head_node(&seg->on_detach);
1157  cb = slist_container(dsm_segment_detach_callback, node, node);
1158  pfree(cb);
1159  }
1160 
1161  /*
1162  * Decrementing the reference count is a sort of implicit on-detach
1163  * action; make sure we don't do that, either.
1164  */
1166  }
1167 }
1168 
1169 /*
1170  * Create a segment descriptor.
1171  */
1172 static dsm_segment *
1174 {
1175  dsm_segment *seg;
1176 
1179 
1182 
1183  /* seg->handle must be initialized by the caller */
1185  seg->impl_private = NULL;
1186  seg->mapped_address = NULL;
1187  seg->mapped_size = 0;
1188 
1192 
1193  slist_init(&seg->on_detach);
1194 
1195  return seg;
1196 }
1197 
1198 /*
1199  * Sanity check a control segment.
1200  *
1201  * The goal here isn't to detect everything that could possibly be wrong with
1202  * the control segment; there's not enough information for that. Rather, the
1203  * goal is to make sure that someone can iterate over the items in the segment
1204  * without overrunning the end of the mapping and crashing. We also check
1205  * the magic number since, if that's messed up, this may not even be one of
1206  * our segments at all.
1207  */
1208 static bool
1210 {
1211  if (mapped_size < offsetof(dsm_control_header, item))
1212  return false; /* Mapped size too short to read header. */
1213  if (control->magic != PG_DYNSHMEM_CONTROL_MAGIC)
1214  return false; /* Magic number doesn't match. */
1215  if (dsm_control_bytes_needed(control->maxitems) > mapped_size)
1216  return false; /* Max item count won't fit in map. */
1217  if (control->nitems > control->maxitems)
1218  return false; /* Overfull. */
1219  return true;
1220 }
1221 
1222 /*
1223  * Compute the number of control-segment bytes needed to store a given
1224  * number of items.
1225  */
1226 static uint64
1228 {
1229  return offsetof(dsm_control_header, item)
1230  + sizeof(dsm_control_item) * (uint64) nitems;
1231 }
1232 
1233 static inline dsm_handle
1235 {
1236  dsm_handle handle;
1237 
1238  /*
1239  * We need to create a handle that doesn't collide with any existing extra
1240  * segment created by dsm_impl_op(), so we'll make it odd. It also
1241  * mustn't collide with any other main area pseudo-segment, so we'll
1242  * include the slot number in some of the bits. We also want to make an
1243  * effort to avoid newly created and recently destroyed handles from being
1244  * confused, so we'll make the rest of the bits random.
1245  */
1246  handle = 1;
1247  handle |= slot << 1;
1249  return handle;
1250 }
1251 
1252 static inline bool
1254 {
1255  return handle & 1;
1256 }
unsigned int uint32
Definition: c.h:441
#define offsetof(type, field)
Definition: c.h:727
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:350
size_t Size
Definition: c.h:540
dsm_handle dsm_segment_handle(dsm_segment *seg)
Definition: dsm.c:1095
size_t dsm_estimate_size(void)
Definition: dsm.c:444
static void dsm_backend_startup(void)
Definition: dsm.c:397
void * dsm_segment_address(dsm_segment *seg)
Definition: dsm.c:1067
static void * dsm_main_space_begin
Definition: dsm.c:110
void dsm_detach(dsm_segment *seg)
Definition: dsm.c:777
void on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg)
Definition: dsm.c:1104
dsm_segment * dsm_attach(dsm_handle h)
Definition: dsm.c:639
dsm_segment * dsm_create(Size size, int flags)
Definition: dsm.c:490
static dsm_handle dsm_control_handle
Definition: dsm.c:138
void dsm_pin_mapping(dsm_segment *seg)
Definition: dsm.c:889
static dlist_head dsm_segment_list
Definition: dsm.c:129
static void dsm_postmaster_shutdown(int code, Datum arg)
Definition: dsm.c:332
void dsm_unpin_segment(dsm_handle handle)
Definition: dsm.c:961
void dsm_pin_segment(dsm_segment *seg)
Definition: dsm.c:929
void dsm_detach_all(void)
Definition: dsm.c:749
static dsm_handle make_main_region_dsm_handle(int slot)
Definition: dsm.c:1234
static bool dsm_init_done
Definition: dsm.c:107
void dsm_cleanup_using_control_segment(dsm_handle old_control_handle)
Definition: dsm.c:212
void dsm_postmaster_startup(PGShmemHeader *shim)
Definition: dsm.c:150
#define PG_DYNSHMEM_CONTROL_MAGIC
Definition: dsm.c:49
static dsm_control_header * dsm_control
Definition: dsm.c:139
static Size dsm_control_mapped_size
Definition: dsm.c:140
static dsm_segment * dsm_create_descriptor(void)
Definition: dsm.c:1173
static uint64 dsm_control_bytes_needed(uint32 nitems)
Definition: dsm.c:1227
void dsm_shmem_init(void)
Definition: dsm.c:453
#define PG_DYNSHMEM_SLOTS_PER_BACKEND
Definition: dsm.c:52
struct dsm_control_item dsm_control_item
#define PG_DYNSHMEM_FIXED_SLOTS
Definition: dsm.c:51
void dsm_backend_shutdown(void)
Definition: dsm.c:731
struct dsm_segment_detach_callback dsm_segment_detach_callback
void cancel_on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg)
Definition: dsm.c:1119
void reset_on_dsm_detach(void)
Definition: dsm.c:1142
dsm_segment * dsm_find_mapping(dsm_handle h)
Definition: dsm.c:1048
static void dsm_cleanup_for_mmap(void)
Definition: dsm.c:294
Size dsm_segment_map_length(dsm_segment *seg)
Definition: dsm.c:1077
void dsm_unpin_mapping(dsm_segment *seg)
Definition: dsm.c:908
struct dsm_control_header dsm_control_header
static bool dsm_control_segment_sane(dsm_control_header *control, Size mapped_size)
Definition: dsm.c:1209
#define INVALID_CONTROL_SLOT
Definition: dsm.c:54
static void * dsm_control_impl_private
Definition: dsm.c:141
static bool is_main_region_dsm_handle(dsm_handle handle)
Definition: dsm.c:1253
#define DSM_CREATE_NULL_IF_MAXSEGMENTS
Definition: dsm.h:20
void(* on_dsm_detach_callback)(dsm_segment *, Datum arg)
Definition: dsm.h:57
#define DSM_HANDLE_INVALID
Definition: dsm.h:23
void dsm_impl_pin_segment(dsm_handle handle, void *impl_private, void **impl_private_pm_handle)
Definition: dsm_impl.c:955
int min_dynamic_shared_memory
Definition: dsm_impl.c:117
void dsm_impl_unpin_segment(dsm_handle handle, void **impl_private)
Definition: dsm_impl.c:1006
bool dsm_impl_op(dsm_op op, dsm_handle handle, Size request_size, void **impl_private, void **mapped_address, Size *mapped_size, int elevel)
Definition: dsm_impl.c:161
int dynamic_shared_memory_type
Definition: dsm_impl.c:114
uint32 dsm_handle
Definition: dsm_impl.h:55
@ DSM_OP_DETACH
Definition: dsm_impl.h:62
@ DSM_OP_CREATE
Definition: dsm_impl.h:60
@ DSM_OP_DESTROY
Definition: dsm_impl.h:63
@ DSM_OP_ATTACH
Definition: dsm_impl.h:61
#define PG_DYNSHMEM_MMAP_FILE_PREFIX
Definition: dsm_impl.h:52
#define PG_DYNSHMEM_DIR
Definition: dsm_impl.h:51
#define DSM_IMPL_MMAP
Definition: dsm_impl.h:20
int errcode_for_file_access(void)
Definition: elog.c:716
int errcode(int sqlerrcode)
Definition: elog.c:693
int errmsg(const char *fmt,...)
Definition: elog.c:904
#define LOG
Definition: elog.h:25
#define FATAL
Definition: elog.h:35
#define WARNING
Definition: elog.h:30
#define DEBUG2
Definition: elog.h:23
#define DEBUG1
Definition: elog.h:24
#define ERROR
Definition: elog.h:33
#define elog(elevel,...)
Definition: elog.h:218
#define ereport(elevel,...)
Definition: elog.h:143
struct dirent * ReadDir(DIR *dir, const char *dirname)
Definition: fd.c:2788
int FreeDir(DIR *dir)
Definition: fd.c:2840
DIR * AllocateDir(const char *dirname)
Definition: fd.c:2722
bool FreePageManagerGet(FreePageManager *fpm, Size npages, Size *first_page)
Definition: freepage.c:210
void FreePageManagerPut(FreePageManager *fpm, Size first_page, Size npages)
Definition: freepage.c:379
void FreePageManagerInitialize(FreePageManager *fpm, char *base)
Definition: freepage.c:183
#define FPM_PAGE_SIZE
Definition: freepage.h:30
bool IsUnderPostmaster
Definition: globals.c:113
int MaxBackends
Definition: globals.c:140
bool IsPostmasterEnvironment
Definition: globals.c:112
static void slist_delete_current(slist_mutable_iter *iter)
Definition: ilist.h:671
static bool slist_is_empty(slist_head *head)
Definition: ilist.h:582
#define dlist_foreach(iter, lhead)
Definition: ilist.h:526
#define dlist_head_element(type, membername, lhead)
Definition: ilist.h:506
static bool dlist_is_empty(dlist_head *head)
Definition: ilist.h:289
static void dlist_delete(dlist_node *node)
Definition: ilist.h:358
#define slist_foreach_modify(iter, lhead)
Definition: ilist.h:735
static void slist_init(slist_head *head)
Definition: ilist.h:573
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:300
static void slist_push_head(slist_head *head, slist_node *node)
Definition: ilist.h:593
#define slist_container(type, membername, ptr)
Definition: ilist.h:693
#define DLIST_STATIC_INIT(name)
Definition: ilist.h:248
#define dlist_container(type, membername, ptr)
Definition: ilist.h:496
static slist_node * slist_pop_head_node(slist_head *head)
Definition: ilist.h:615
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:361
int i
Definition: isn.c:73
Assert(fmt[strlen(fmt) - 1] !='\n')
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1196
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1800
@ LW_EXCLUSIVE
Definition: lwlock.h:104
void pfree(void *pointer)
Definition: mcxt.c:1175
MemoryContext TopMemoryContext
Definition: mcxt.c:48
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:863
#define RESUME_INTERRUPTS()
Definition: miscadmin.h:134
#define HOLD_INTERRUPTS()
Definition: miscadmin.h:132
void * arg
static int pg_leftmost_one_pos32(uint32 word)
Definition: pg_bitutils.h:26
#define MAXPGPATH
uint32 pg_prng_uint32(pg_prng_state *state)
Definition: pg_prng.c:185
pg_prng_state pg_global_prng_state
Definition: pg_prng.c:28
static char * buf
Definition: pg_test_fsync.c:67
#define snprintf
Definition: port.h:225
uintptr_t Datum
Definition: postgres.h:411
#define DatumGetPointer(X)
Definition: postgres.h:593
#define PointerGetDatum(X)
Definition: postgres.h:600
void ResourceOwnerForgetDSM(ResourceOwner owner, dsm_segment *seg)
Definition: resowner.c:1350
ResourceOwner CurrentResourceOwner
Definition: resowner.c:146
void ResourceOwnerRememberDSM(ResourceOwner owner, dsm_segment *seg)
Definition: resowner.c:1341
void ResourceOwnerEnlargeDSMs(ResourceOwner owner)
Definition: resowner.c:1330
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:396
Definition: dirent.c:26
dsm_handle dsm_control
Definition: pg_shmem.h:36
Definition: dirent.h:10
char d_name[MAX_PATH]
Definition: dirent.h:15
dlist_node * cur
Definition: ilist.h:161
uint32 maxitems
Definition: dsm.c:93
uint32 nitems
Definition: dsm.c:92
uint32 magic
Definition: dsm.c:91
dsm_control_item item[FLEXIBLE_ARRAY_MEMBER]
Definition: dsm.c:94
size_t npages
Definition: dsm.c:83
dsm_handle handle
Definition: dsm.c:80
size_t first_page
Definition: dsm.c:82
bool pinned
Definition: dsm.c:85
void * impl_private_pm_handle
Definition: dsm.c:84
uint32 refcnt
Definition: dsm.c:81
on_dsm_detach_callback function
Definition: dsm.c:59
uint32 control_slot
Definition: dsm.c:70
dsm_handle handle
Definition: dsm.c:69
Size mapped_size
Definition: dsm.c:73
void * impl_private
Definition: dsm.c:71
slist_head on_detach
Definition: dsm.c:74
dlist_node node
Definition: dsm.c:67
ResourceOwner resowner
Definition: dsm.c:68
void * mapped_address
Definition: dsm.c:72
slist_node * cur
Definition: ilist.h:241