PostgreSQL Source Code  git master
portalmem.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * portalmem.c
4  * backend portal memory management
5  *
6  * Portals are objects representing the execution state of a query.
7  * This module provides memory management services for portals, but it
8  * doesn't actually run the executor for them.
9  *
10  *
11  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
12  * Portions Copyright (c) 1994, Regents of the University of California
13  *
14  * IDENTIFICATION
15  * src/backend/utils/mmgr/portalmem.c
16  *
17  *-------------------------------------------------------------------------
18  */
19 #include "postgres.h"
20 
21 #include "access/xact.h"
22 #include "catalog/pg_type.h"
23 #include "commands/portalcmds.h"
24 #include "miscadmin.h"
25 #include "storage/ipc.h"
26 #include "utils/builtins.h"
27 #include "utils/memutils.h"
28 #include "utils/snapmgr.h"
29 #include "utils/timestamp.h"
30 
31 /*
32  * Estimate of the maximum number of open portals a user would have,
33  * used in initially sizing the PortalHashTable in EnablePortalManager().
34  * Since the hash table can expand, there's no need to make this overly
35  * generous, and keeping it small avoids unnecessary overhead in the
36  * hash_seq_search() calls executed during transaction end.
37  */
38 #define PORTALS_PER_USER 16
39 
40 
41 /* ----------------
42  * Global state
43  * ----------------
44  */
45 
46 #define MAX_PORTALNAME_LEN NAMEDATALEN
47 
48 typedef struct portalhashent
49 {
53 
54 static HTAB *PortalHashTable = NULL;
55 
56 #define PortalHashTableLookup(NAME, PORTAL) \
57 do { \
58  PortalHashEnt *hentry; \
59  \
60  hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
61  (NAME), HASH_FIND, NULL); \
62  if (hentry) \
63  PORTAL = hentry->portal; \
64  else \
65  PORTAL = NULL; \
66 } while(0)
67 
68 #define PortalHashTableInsert(PORTAL, NAME) \
69 do { \
70  PortalHashEnt *hentry; bool found; \
71  \
72  hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
73  (NAME), HASH_ENTER, &found); \
74  if (found) \
75  elog(ERROR, "duplicate portal name"); \
76  hentry->portal = PORTAL; \
77  /* To avoid duplicate storage, make PORTAL->name point to htab entry */ \
78  PORTAL->name = hentry->portalname; \
79 } while(0)
80 
81 #define PortalHashTableDelete(PORTAL) \
82 do { \
83  PortalHashEnt *hentry; \
84  \
85  hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
86  PORTAL->name, HASH_REMOVE, NULL); \
87  if (hentry == NULL) \
88  elog(WARNING, "trying to delete portal name that does not exist"); \
89 } while(0)
90 
92 
93 
94 /* ----------------------------------------------------------------
95  * public portal interface functions
96  * ----------------------------------------------------------------
97  */
98 
99 /*
100  * EnablePortalManager
101  * Enables the portal management module at backend startup.
102  */
103 void
105 {
106  HASHCTL ctl;
107 
108  Assert(TopPortalContext == NULL);
109 
110  TopPortalContext = AllocSetContextCreate(TopMemoryContext,
111  "TopPortalContext",
113 
115  ctl.entrysize = sizeof(PortalHashEnt);
116 
117  /*
118  * use PORTALS_PER_USER as a guess of how many hash table entries to
119  * create, initially
120  */
121  PortalHashTable = hash_create("Portal hash", PORTALS_PER_USER,
122  &ctl, HASH_ELEM | HASH_STRINGS);
123 }
124 
125 /*
126  * GetPortalByName
127  * Returns a portal given a portal name, or NULL if name not found.
128  */
129 Portal
130 GetPortalByName(const char *name)
131 {
132  Portal portal;
133 
134  if (PointerIsValid(name))
135  PortalHashTableLookup(name, portal);
136  else
137  portal = NULL;
138 
139  return portal;
140 }
141 
142 /*
143  * PortalGetPrimaryStmt
144  * Get the "primary" stmt within a portal, ie, the one marked canSetTag.
145  *
146  * Returns NULL if no such stmt. If multiple PlannedStmt structs within the
147  * portal are marked canSetTag, returns the first one. Neither of these
148  * cases should occur in present usages of this function.
149  */
150 PlannedStmt *
152 {
153  ListCell *lc;
154 
155  foreach(lc, portal->stmts)
156  {
157  PlannedStmt *stmt = lfirst_node(PlannedStmt, lc);
158 
159  if (stmt->canSetTag)
160  return stmt;
161  }
162  return NULL;
163 }
164 
165 /*
166  * CreatePortal
167  * Returns a new portal given a name.
168  *
169  * allowDup: if true, automatically drop any pre-existing portal of the
170  * same name (if false, an error is raised).
171  *
172  * dupSilent: if true, don't even emit a WARNING.
173  */
174 Portal
175 CreatePortal(const char *name, bool allowDup, bool dupSilent)
176 {
177  Portal portal;
178 
179  AssertArg(PointerIsValid(name));
180 
181  portal = GetPortalByName(name);
182  if (PortalIsValid(portal))
183  {
184  if (!allowDup)
185  ereport(ERROR,
186  (errcode(ERRCODE_DUPLICATE_CURSOR),
187  errmsg("cursor \"%s\" already exists", name)));
188  if (!dupSilent)
190  (errcode(ERRCODE_DUPLICATE_CURSOR),
191  errmsg("closing existing cursor \"%s\"",
192  name)));
193  PortalDrop(portal, false);
194  }
195 
196  /* make new portal structure */
197  portal = (Portal) MemoryContextAllocZero(TopPortalContext, sizeof *portal);
198 
199  /* initialize portal context; typically it won't store much */
200  portal->portalContext = AllocSetContextCreate(TopPortalContext,
201  "PortalContext",
203 
204  /* create a resource owner for the portal */
206  "Portal");
207 
208  /* initialize portal fields that don't start off zero */
209  portal->status = PORTAL_NEW;
210  portal->cleanup = PortalCleanup;
212  portal->activeSubid = portal->createSubid;
213  portal->strategy = PORTAL_MULTI_QUERY;
215  portal->atStart = true;
216  portal->atEnd = true; /* disallow fetches until query is set */
217  portal->visible = true;
219 
220  /* put portal in table (sets portal->name) */
221  PortalHashTableInsert(portal, name);
222 
223  /* for named portals reuse portal->name copy */
224  MemoryContextSetIdentifier(portal->portalContext, portal->name[0] ? portal->name : "<unnamed>");
225 
226  return portal;
227 }
228 
229 /*
230  * CreateNewPortal
231  * Create a new portal, assigning it a random nonconflicting name.
232  */
233 Portal
235 {
236  static unsigned int unnamed_portal_count = 0;
237 
239 
240  /* Select a nonconflicting name */
241  for (;;)
242  {
243  unnamed_portal_count++;
244  sprintf(portalname, "<unnamed portal %u>", unnamed_portal_count);
245  if (GetPortalByName(portalname) == NULL)
246  break;
247  }
248 
249  return CreatePortal(portalname, false, false);
250 }
251 
252 /*
253  * PortalDefineQuery
254  * A simple subroutine to establish a portal's query.
255  *
256  * Notes: as of PG 8.4, caller MUST supply a sourceText string; it is not
257  * allowed anymore to pass NULL. (If you really don't have source text,
258  * you can pass a constant string, perhaps "(query not available)".)
259  *
260  * commandTag shall be NULL if and only if the original query string
261  * (before rewriting) was an empty string. Also, the passed commandTag must
262  * be a pointer to a constant string, since it is not copied.
263  *
264  * If cplan is provided, then it is a cached plan containing the stmts, and
265  * the caller must have done GetCachedPlan(), causing a refcount increment.
266  * The refcount will be released when the portal is destroyed.
267  *
268  * If cplan is NULL, then it is the caller's responsibility to ensure that
269  * the passed plan trees have adequate lifetime. Typically this is done by
270  * copying them into the portal's context.
271  *
272  * The caller is also responsible for ensuring that the passed prepStmtName
273  * (if not NULL) and sourceText have adequate lifetime.
274  *
275  * NB: this function mustn't do much beyond storing the passed values; in
276  * particular don't do anything that risks elog(ERROR). If that were to
277  * happen here before storing the cplan reference, we'd leak the plancache
278  * refcount that the caller is trying to hand off to us.
279  */
280 void
282  const char *prepStmtName,
283  const char *sourceText,
284  CommandTag commandTag,
285  List *stmts,
286  CachedPlan *cplan)
287 {
288  AssertArg(PortalIsValid(portal));
289  AssertState(portal->status == PORTAL_NEW);
290 
291  AssertArg(sourceText != NULL);
292  AssertArg(commandTag != CMDTAG_UNKNOWN || stmts == NIL);
293 
294  portal->prepStmtName = prepStmtName;
295  portal->sourceText = sourceText;
296  portal->qc.commandTag = commandTag;
297  portal->qc.nprocessed = 0;
298  portal->commandTag = commandTag;
299  portal->stmts = stmts;
300  portal->cplan = cplan;
301  portal->status = PORTAL_DEFINED;
302 }
303 
304 /*
305  * PortalReleaseCachedPlan
306  * Release a portal's reference to its cached plan, if any.
307  */
308 static void
310 {
311  if (portal->cplan)
312  {
313  ReleaseCachedPlan(portal->cplan, NULL);
314  portal->cplan = NULL;
315 
316  /*
317  * We must also clear portal->stmts which is now a dangling reference
318  * to the cached plan's plan list. This protects any code that might
319  * try to examine the Portal later.
320  */
321  portal->stmts = NIL;
322  }
323 }
324 
325 /*
326  * PortalCreateHoldStore
327  * Create the tuplestore for a portal.
328  */
329 void
331 {
332  MemoryContext oldcxt;
333 
334  Assert(portal->holdContext == NULL);
335  Assert(portal->holdStore == NULL);
336  Assert(portal->holdSnapshot == NULL);
337 
338  /*
339  * Create the memory context that is used for storage of the tuple set.
340  * Note this is NOT a child of the portal's portalContext.
341  */
342  portal->holdContext =
343  AllocSetContextCreate(TopPortalContext,
344  "PortalHoldContext",
346 
347  /*
348  * Create the tuple store, selecting cross-transaction temp files, and
349  * enabling random access only if cursor requires scrolling.
350  *
351  * XXX: Should maintenance_work_mem be used for the portal size?
352  */
353  oldcxt = MemoryContextSwitchTo(portal->holdContext);
354 
355  portal->holdStore =
357  true, work_mem);
358 
359  MemoryContextSwitchTo(oldcxt);
360 }
361 
362 /*
363  * PinPortal
364  * Protect a portal from dropping.
365  *
366  * A pinned portal is still unpinned and dropped at transaction or
367  * subtransaction abort.
368  */
369 void
371 {
372  if (portal->portalPinned)
373  elog(ERROR, "portal already pinned");
374 
375  portal->portalPinned = true;
376 }
377 
378 void
380 {
381  if (!portal->portalPinned)
382  elog(ERROR, "portal not pinned");
383 
384  portal->portalPinned = false;
385 }
386 
387 /*
388  * MarkPortalActive
389  * Transition a portal from READY to ACTIVE state.
390  *
391  * NOTE: never set portal->status = PORTAL_ACTIVE directly; call this instead.
392  */
393 void
395 {
396  /* For safety, this is a runtime test not just an Assert */
397  if (portal->status != PORTAL_READY)
398  ereport(ERROR,
399  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
400  errmsg("portal \"%s\" cannot be run", portal->name)));
401  /* Perform the state transition */
402  portal->status = PORTAL_ACTIVE;
404 }
405 
406 /*
407  * MarkPortalDone
408  * Transition a portal from ACTIVE to DONE state.
409  *
410  * NOTE: never set portal->status = PORTAL_DONE directly; call this instead.
411  */
412 void
414 {
415  /* Perform the state transition */
416  Assert(portal->status == PORTAL_ACTIVE);
417  portal->status = PORTAL_DONE;
418 
419  /*
420  * Allow portalcmds.c to clean up the state it knows about. We might as
421  * well do that now, since the portal can't be executed any more.
422  *
423  * In some cases involving execution of a ROLLBACK command in an already
424  * aborted transaction, this is necessary, or we'd reach AtCleanup_Portals
425  * with the cleanup hook still unexecuted.
426  */
427  if (PointerIsValid(portal->cleanup))
428  {
429  portal->cleanup(portal);
430  portal->cleanup = NULL;
431  }
432 }
433 
434 /*
435  * MarkPortalFailed
436  * Transition a portal into FAILED state.
437  *
438  * NOTE: never set portal->status = PORTAL_FAILED directly; call this instead.
439  */
440 void
442 {
443  /* Perform the state transition */
444  Assert(portal->status != PORTAL_DONE);
445  portal->status = PORTAL_FAILED;
446 
447  /*
448  * Allow portalcmds.c to clean up the state it knows about. We might as
449  * well do that now, since the portal can't be executed any more.
450  *
451  * In some cases involving cleanup of an already aborted transaction, this
452  * is necessary, or we'd reach AtCleanup_Portals with the cleanup hook
453  * still unexecuted.
454  */
455  if (PointerIsValid(portal->cleanup))
456  {
457  portal->cleanup(portal);
458  portal->cleanup = NULL;
459  }
460 }
461 
462 /*
463  * PortalDrop
464  * Destroy the portal.
465  */
466 void
467 PortalDrop(Portal portal, bool isTopCommit)
468 {
469  AssertArg(PortalIsValid(portal));
470 
471  /*
472  * Don't allow dropping a pinned portal, it's still needed by whoever
473  * pinned it.
474  */
475  if (portal->portalPinned)
476  ereport(ERROR,
477  (errcode(ERRCODE_INVALID_CURSOR_STATE),
478  errmsg("cannot drop pinned portal \"%s\"", portal->name)));
479 
480  /*
481  * Not sure if the PORTAL_ACTIVE case can validly happen or not...
482  */
483  if (portal->status == PORTAL_ACTIVE)
484  ereport(ERROR,
485  (errcode(ERRCODE_INVALID_CURSOR_STATE),
486  errmsg("cannot drop active portal \"%s\"", portal->name)));
487 
488  /*
489  * Allow portalcmds.c to clean up the state it knows about, in particular
490  * shutting down the executor if still active. This step potentially runs
491  * user-defined code so failure has to be expected. It's the cleanup
492  * hook's responsibility to not try to do that more than once, in the case
493  * that failure occurs and then we come back to drop the portal again
494  * during transaction abort.
495  *
496  * Note: in most paths of control, this will have been done already in
497  * MarkPortalDone or MarkPortalFailed. We're just making sure.
498  */
499  if (PointerIsValid(portal->cleanup))
500  {
501  portal->cleanup(portal);
502  portal->cleanup = NULL;
503  }
504 
505  /* There shouldn't be an active snapshot anymore, except after error */
506  Assert(portal->portalSnapshot == NULL || !isTopCommit);
507 
508  /*
509  * Remove portal from hash table. Because we do this here, we will not
510  * come back to try to remove the portal again if there's any error in the
511  * subsequent steps. Better to leak a little memory than to get into an
512  * infinite error-recovery loop.
513  */
514  PortalHashTableDelete(portal);
515 
516  /* drop cached plan reference, if any */
517  PortalReleaseCachedPlan(portal);
518 
519  /*
520  * If portal has a snapshot protecting its data, release that. This needs
521  * a little care since the registration will be attached to the portal's
522  * resowner; if the portal failed, we will already have released the
523  * resowner (and the snapshot) during transaction abort.
524  */
525  if (portal->holdSnapshot)
526  {
527  if (portal->resowner)
529  portal->resowner);
530  portal->holdSnapshot = NULL;
531  }
532 
533  /*
534  * Release any resources still attached to the portal. There are several
535  * cases being covered here:
536  *
537  * Top transaction commit (indicated by isTopCommit): normally we should
538  * do nothing here and let the regular end-of-transaction resource
539  * releasing mechanism handle these resources too. However, if we have a
540  * FAILED portal (eg, a cursor that got an error), we'd better clean up
541  * its resources to avoid resource-leakage warning messages.
542  *
543  * Sub transaction commit: never comes here at all, since we don't kill
544  * any portals in AtSubCommit_Portals().
545  *
546  * Main or sub transaction abort: we will do nothing here because
547  * portal->resowner was already set NULL; the resources were already
548  * cleaned up in transaction abort.
549  *
550  * Ordinary portal drop: must release resources. However, if the portal
551  * is not FAILED then we do not release its locks. The locks become the
552  * responsibility of the transaction's ResourceOwner (since it is the
553  * parent of the portal's owner) and will be released when the transaction
554  * eventually ends.
555  */
556  if (portal->resowner &&
557  (!isTopCommit || portal->status == PORTAL_FAILED))
558  {
559  bool isCommit = (portal->status != PORTAL_FAILED);
560 
563  isCommit, false);
566  isCommit, false);
569  isCommit, false);
570  ResourceOwnerDelete(portal->resowner);
571  }
572  portal->resowner = NULL;
573 
574  /*
575  * Delete tuplestore if present. We should do this even under error
576  * conditions; since the tuplestore would have been using cross-
577  * transaction storage, its temp files need to be explicitly deleted.
578  */
579  if (portal->holdStore)
580  {
581  MemoryContext oldcontext;
582 
583  oldcontext = MemoryContextSwitchTo(portal->holdContext);
584  tuplestore_end(portal->holdStore);
585  MemoryContextSwitchTo(oldcontext);
586  portal->holdStore = NULL;
587  }
588 
589  /* delete tuplestore storage, if any */
590  if (portal->holdContext)
592 
593  /* release subsidiary storage */
595 
596  /* release portal struct (it's in TopPortalContext) */
597  pfree(portal);
598 }
599 
600 /*
601  * Delete all declared cursors.
602  *
603  * Used by commands: CLOSE ALL, DISCARD ALL
604  */
605 void
607 {
609  PortalHashEnt *hentry;
610 
611  if (PortalHashTable == NULL)
612  return;
613 
614  hash_seq_init(&status, PortalHashTable);
615  while ((hentry = hash_seq_search(&status)) != NULL)
616  {
617  Portal portal = hentry->portal;
618 
619  /* Can't close the active portal (the one running the command) */
620  if (portal->status == PORTAL_ACTIVE)
621  continue;
622 
623  PortalDrop(portal, false);
624 
625  /* Restart the iteration in case that led to other drops */
626  hash_seq_term(&status);
627  hash_seq_init(&status, PortalHashTable);
628  }
629 }
630 
631 /*
632  * "Hold" a portal. Prepare it for access by later transactions.
633  */
634 static void
636 {
637  /*
638  * Note that PersistHoldablePortal() must release all resources used by
639  * the portal that are local to the creating transaction.
640  */
641  PortalCreateHoldStore(portal);
642  PersistHoldablePortal(portal);
643 
644  /* drop cached plan reference, if any */
645  PortalReleaseCachedPlan(portal);
646 
647  /*
648  * Any resources belonging to the portal will be released in the upcoming
649  * transaction-wide cleanup; the portal will no longer have its own
650  * resources.
651  */
652  portal->resowner = NULL;
653 
654  /*
655  * Having successfully exported the holdable cursor, mark it as not
656  * belonging to this transaction.
657  */
660 }
661 
662 /*
663  * Pre-commit processing for portals.
664  *
665  * Holdable cursors created in this transaction need to be converted to
666  * materialized form, since we are going to close down the executor and
667  * release locks. Non-holdable portals created in this transaction are
668  * simply removed. Portals remaining from prior transactions should be
669  * left untouched.
670  *
671  * Returns true if any portals changed state (possibly causing user-defined
672  * code to be run), false if not.
673  */
674 bool
675 PreCommit_Portals(bool isPrepare)
676 {
677  bool result = false;
679  PortalHashEnt *hentry;
680 
681  hash_seq_init(&status, PortalHashTable);
682 
683  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
684  {
685  Portal portal = hentry->portal;
686 
687  /*
688  * There should be no pinned portals anymore. Complain if someone
689  * leaked one. Auto-held portals are allowed; we assume that whoever
690  * pinned them is managing them.
691  */
692  if (portal->portalPinned && !portal->autoHeld)
693  elog(ERROR, "cannot commit while a portal is pinned");
694 
695  /*
696  * Do not touch active portals --- this can only happen in the case of
697  * a multi-transaction utility command, such as VACUUM, or a commit in
698  * a procedure.
699  *
700  * Note however that any resource owner attached to such a portal is
701  * still going to go away, so don't leave a dangling pointer. Also
702  * unregister any snapshots held by the portal, mainly to avoid
703  * snapshot leak warnings from ResourceOwnerRelease().
704  */
705  if (portal->status == PORTAL_ACTIVE)
706  {
707  if (portal->holdSnapshot)
708  {
709  if (portal->resowner)
711  portal->resowner);
712  portal->holdSnapshot = NULL;
713  }
714  portal->resowner = NULL;
715  /* Clear portalSnapshot too, for cleanliness */
716  portal->portalSnapshot = NULL;
717  continue;
718  }
719 
720  /* Is it a holdable portal created in the current xact? */
721  if ((portal->cursorOptions & CURSOR_OPT_HOLD) &&
723  portal->status == PORTAL_READY)
724  {
725  /*
726  * We are exiting the transaction that created a holdable cursor.
727  * Instead of dropping the portal, prepare it for access by later
728  * transactions.
729  *
730  * However, if this is PREPARE TRANSACTION rather than COMMIT,
731  * refuse PREPARE, because the semantics seem pretty unclear.
732  */
733  if (isPrepare)
734  ereport(ERROR,
735  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
736  errmsg("cannot PREPARE a transaction that has created a cursor WITH HOLD")));
737 
738  HoldPortal(portal);
739 
740  /* Report we changed state */
741  result = true;
742  }
743  else if (portal->createSubid == InvalidSubTransactionId)
744  {
745  /*
746  * Do nothing to cursors held over from a previous transaction
747  * (including ones we just froze in a previous cycle of this loop)
748  */
749  continue;
750  }
751  else
752  {
753  /* Zap all non-holdable portals */
754  PortalDrop(portal, true);
755 
756  /* Report we changed state */
757  result = true;
758  }
759 
760  /*
761  * After either freezing or dropping a portal, we have to restart the
762  * iteration, because we could have invoked user-defined code that
763  * caused a drop of the next portal in the hash chain.
764  */
765  hash_seq_term(&status);
766  hash_seq_init(&status, PortalHashTable);
767  }
768 
769  return result;
770 }
771 
772 /*
773  * Abort processing for portals.
774  *
775  * At this point we run the cleanup hook if present, but we can't release the
776  * portal's memory until the cleanup call.
777  */
778 void
780 {
782  PortalHashEnt *hentry;
783 
784  hash_seq_init(&status, PortalHashTable);
785 
786  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
787  {
788  Portal portal = hentry->portal;
789 
790  /*
791  * When elog(FATAL) is progress, we need to set the active portal to
792  * failed, so that PortalCleanup() doesn't run the executor shutdown.
793  */
794  if (portal->status == PORTAL_ACTIVE && shmem_exit_inprogress)
795  MarkPortalFailed(portal);
796 
797  /*
798  * Do nothing else to cursors held over from a previous transaction.
799  */
800  if (portal->createSubid == InvalidSubTransactionId)
801  continue;
802 
803  /*
804  * Do nothing to auto-held cursors. This is similar to the case of a
805  * cursor from a previous transaction, but it could also be that the
806  * cursor was auto-held in this transaction, so it wants to live on.
807  */
808  if (portal->autoHeld)
809  continue;
810 
811  /*
812  * If it was created in the current transaction, we can't do normal
813  * shutdown on a READY portal either; it might refer to objects
814  * created in the failed transaction. See comments in
815  * AtSubAbort_Portals.
816  */
817  if (portal->status == PORTAL_READY)
818  MarkPortalFailed(portal);
819 
820  /*
821  * Allow portalcmds.c to clean up the state it knows about, if we
822  * haven't already.
823  */
824  if (PointerIsValid(portal->cleanup))
825  {
826  portal->cleanup(portal);
827  portal->cleanup = NULL;
828  }
829 
830  /* drop cached plan reference, if any */
831  PortalReleaseCachedPlan(portal);
832 
833  /*
834  * Any resources belonging to the portal will be released in the
835  * upcoming transaction-wide cleanup; they will be gone before we run
836  * PortalDrop.
837  */
838  portal->resowner = NULL;
839 
840  /*
841  * Although we can't delete the portal data structure proper, we can
842  * release any memory in subsidiary contexts, such as executor state.
843  * The cleanup hook was the last thing that might have needed data
844  * there. But leave active portals alone.
845  */
846  if (portal->status != PORTAL_ACTIVE)
848  }
849 }
850 
851 /*
852  * Post-abort cleanup for portals.
853  *
854  * Delete all portals not held over from prior transactions. */
855 void
857 {
859  PortalHashEnt *hentry;
860 
861  hash_seq_init(&status, PortalHashTable);
862 
863  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
864  {
865  Portal portal = hentry->portal;
866 
867  /*
868  * Do not touch active portals --- this can only happen in the case of
869  * a multi-transaction command.
870  */
871  if (portal->status == PORTAL_ACTIVE)
872  continue;
873 
874  /*
875  * Do nothing to cursors held over from a previous transaction or
876  * auto-held ones.
877  */
878  if (portal->createSubid == InvalidSubTransactionId || portal->autoHeld)
879  {
880  Assert(portal->status != PORTAL_ACTIVE);
881  Assert(portal->resowner == NULL);
882  continue;
883  }
884 
885  /*
886  * If a portal is still pinned, forcibly unpin it. PortalDrop will not
887  * let us drop the portal otherwise. Whoever pinned the portal was
888  * interrupted by the abort too and won't try to use it anymore.
889  */
890  if (portal->portalPinned)
891  portal->portalPinned = false;
892 
893  /*
894  * We had better not call any user-defined code during cleanup, so if
895  * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
896  */
897  if (PointerIsValid(portal->cleanup))
898  {
899  elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
900  portal->cleanup = NULL;
901  }
902 
903  /* Zap it. */
904  PortalDrop(portal, false);
905  }
906 }
907 
908 /*
909  * Portal-related cleanup when we return to the main loop on error.
910  *
911  * This is different from the cleanup at transaction abort. Auto-held portals
912  * are cleaned up on error but not on transaction abort.
913  */
914 void
916 {
918  PortalHashEnt *hentry;
919 
920  hash_seq_init(&status, PortalHashTable);
921 
922  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
923  {
924  Portal portal = hentry->portal;
925 
926  if (portal->autoHeld)
927  {
928  portal->portalPinned = false;
929  PortalDrop(portal, false);
930  }
931  }
932 }
933 
934 /*
935  * Pre-subcommit processing for portals.
936  *
937  * Reassign portals created or used in the current subtransaction to the
938  * parent subtransaction.
939  */
940 void
942  SubTransactionId parentSubid,
943  ResourceOwner parentXactOwner)
944 {
946  PortalHashEnt *hentry;
947 
948  hash_seq_init(&status, PortalHashTable);
949 
950  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
951  {
952  Portal portal = hentry->portal;
953 
954  if (portal->createSubid == mySubid)
955  {
956  portal->createSubid = parentSubid;
957  if (portal->resowner)
958  ResourceOwnerNewParent(portal->resowner, parentXactOwner);
959  }
960  if (portal->activeSubid == mySubid)
961  portal->activeSubid = parentSubid;
962  }
963 }
964 
965 /*
966  * Subtransaction abort handling for portals.
967  *
968  * Deactivate portals created or used during the failed subtransaction.
969  * Note that per AtSubCommit_Portals, this will catch portals created/used
970  * in descendants of the subtransaction too.
971  *
972  * We don't destroy any portals here; that's done in AtSubCleanup_Portals.
973  */
974 void
976  SubTransactionId parentSubid,
977  ResourceOwner myXactOwner,
978  ResourceOwner parentXactOwner)
979 {
981  PortalHashEnt *hentry;
982 
983  hash_seq_init(&status, PortalHashTable);
984 
985  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
986  {
987  Portal portal = hentry->portal;
988 
989  /* Was it created in this subtransaction? */
990  if (portal->createSubid != mySubid)
991  {
992  /* No, but maybe it was used in this subtransaction? */
993  if (portal->activeSubid == mySubid)
994  {
995  /* Maintain activeSubid until the portal is removed */
996  portal->activeSubid = parentSubid;
997 
998  /*
999  * A MarkPortalActive() caller ran an upper-level portal in
1000  * this subtransaction and left the portal ACTIVE. This can't
1001  * happen, but force the portal into FAILED state for the same
1002  * reasons discussed below.
1003  *
1004  * We assume we can get away without forcing upper-level READY
1005  * portals to fail, even if they were run and then suspended.
1006  * In theory a suspended upper-level portal could have
1007  * acquired some references to objects that are about to be
1008  * destroyed, but there should be sufficient defenses against
1009  * such cases: the portal's original query cannot contain such
1010  * references, and any references within, say, cached plans of
1011  * PL/pgSQL functions are not from active queries and should
1012  * be protected by revalidation logic.
1013  */
1014  if (portal->status == PORTAL_ACTIVE)
1015  MarkPortalFailed(portal);
1016 
1017  /*
1018  * Also, if we failed it during the current subtransaction
1019  * (either just above, or earlier), reattach its resource
1020  * owner to the current subtransaction's resource owner, so
1021  * that any resources it still holds will be released while
1022  * cleaning up this subtransaction. This prevents some corner
1023  * cases wherein we might get Asserts or worse while cleaning
1024  * up objects created during the current subtransaction
1025  * (because they're still referenced within this portal).
1026  */
1027  if (portal->status == PORTAL_FAILED && portal->resowner)
1028  {
1029  ResourceOwnerNewParent(portal->resowner, myXactOwner);
1030  portal->resowner = NULL;
1031  }
1032  }
1033  /* Done if it wasn't created in this subtransaction */
1034  continue;
1035  }
1036 
1037  /*
1038  * Force any live portals of my own subtransaction into FAILED state.
1039  * We have to do this because they might refer to objects created or
1040  * changed in the failed subtransaction, leading to crashes within
1041  * ExecutorEnd when portalcmds.c tries to close down the portal.
1042  * Currently, every MarkPortalActive() caller ensures it updates the
1043  * portal status again before relinquishing control, so ACTIVE can't
1044  * happen here. If it does happen, dispose the portal like existing
1045  * MarkPortalActive() callers would.
1046  */
1047  if (portal->status == PORTAL_READY ||
1048  portal->status == PORTAL_ACTIVE)
1049  MarkPortalFailed(portal);
1050 
1051  /*
1052  * Allow portalcmds.c to clean up the state it knows about, if we
1053  * haven't already.
1054  */
1055  if (PointerIsValid(portal->cleanup))
1056  {
1057  portal->cleanup(portal);
1058  portal->cleanup = NULL;
1059  }
1060 
1061  /* drop cached plan reference, if any */
1062  PortalReleaseCachedPlan(portal);
1063 
1064  /*
1065  * Any resources belonging to the portal will be released in the
1066  * upcoming transaction-wide cleanup; they will be gone before we run
1067  * PortalDrop.
1068  */
1069  portal->resowner = NULL;
1070 
1071  /*
1072  * Although we can't delete the portal data structure proper, we can
1073  * release any memory in subsidiary contexts, such as executor state.
1074  * The cleanup hook was the last thing that might have needed data
1075  * there.
1076  */
1078  }
1079 }
1080 
1081 /*
1082  * Post-subabort cleanup for portals.
1083  *
1084  * Drop all portals created in the failed subtransaction (but note that
1085  * we will not drop any that were reassigned to the parent above).
1086  */
1087 void
1089 {
1091  PortalHashEnt *hentry;
1092 
1093  hash_seq_init(&status, PortalHashTable);
1094 
1095  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1096  {
1097  Portal portal = hentry->portal;
1098 
1099  if (portal->createSubid != mySubid)
1100  continue;
1101 
1102  /*
1103  * If a portal is still pinned, forcibly unpin it. PortalDrop will not
1104  * let us drop the portal otherwise. Whoever pinned the portal was
1105  * interrupted by the abort too and won't try to use it anymore.
1106  */
1107  if (portal->portalPinned)
1108  portal->portalPinned = false;
1109 
1110  /*
1111  * We had better not call any user-defined code during cleanup, so if
1112  * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
1113  */
1114  if (PointerIsValid(portal->cleanup))
1115  {
1116  elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
1117  portal->cleanup = NULL;
1118  }
1119 
1120  /* Zap it. */
1121  PortalDrop(portal, false);
1122  }
1123 }
1124 
1125 /* Find all available cursors */
1126 Datum
1128 {
1129  ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
1130  TupleDesc tupdesc;
1131  Tuplestorestate *tupstore;
1132  MemoryContext per_query_ctx;
1133  MemoryContext oldcontext;
1134  HASH_SEQ_STATUS hash_seq;
1135  PortalHashEnt *hentry;
1136 
1137  /* check to see if caller supports us returning a tuplestore */
1138  if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
1139  ereport(ERROR,
1140  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1141  errmsg("set-valued function called in context that cannot accept a set")));
1142  if (!(rsinfo->allowedModes & SFRM_Materialize))
1143  ereport(ERROR,
1144  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1145  errmsg("materialize mode required, but it is not allowed in this context")));
1146 
1147  /* need to build tuplestore in query context */
1148  per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
1149  oldcontext = MemoryContextSwitchTo(per_query_ctx);
1150 
1151  /*
1152  * build tupdesc for result tuples. This must match the definition of the
1153  * pg_cursors view in system_views.sql
1154  */
1155  tupdesc = CreateTemplateTupleDesc(6);
1156  TupleDescInitEntry(tupdesc, (AttrNumber) 1, "name",
1157  TEXTOID, -1, 0);
1158  TupleDescInitEntry(tupdesc, (AttrNumber) 2, "statement",
1159  TEXTOID, -1, 0);
1160  TupleDescInitEntry(tupdesc, (AttrNumber) 3, "is_holdable",
1161  BOOLOID, -1, 0);
1162  TupleDescInitEntry(tupdesc, (AttrNumber) 4, "is_binary",
1163  BOOLOID, -1, 0);
1164  TupleDescInitEntry(tupdesc, (AttrNumber) 5, "is_scrollable",
1165  BOOLOID, -1, 0);
1166  TupleDescInitEntry(tupdesc, (AttrNumber) 6, "creation_time",
1167  TIMESTAMPTZOID, -1, 0);
1168 
1169  /*
1170  * We put all the tuples into a tuplestore in one scan of the hashtable.
1171  * This avoids any issue of the hashtable possibly changing between calls.
1172  */
1173  tupstore =
1175  false, work_mem);
1176 
1177  /* generate junk in short-term context */
1178  MemoryContextSwitchTo(oldcontext);
1179 
1180  hash_seq_init(&hash_seq, PortalHashTable);
1181  while ((hentry = hash_seq_search(&hash_seq)) != NULL)
1182  {
1183  Portal portal = hentry->portal;
1184  Datum values[6];
1185  bool nulls[6];
1186 
1187  /* report only "visible" entries */
1188  if (!portal->visible)
1189  continue;
1190 
1191  MemSet(nulls, 0, sizeof(nulls));
1192 
1193  values[0] = CStringGetTextDatum(portal->name);
1194  values[1] = CStringGetTextDatum(portal->sourceText);
1195  values[2] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_HOLD);
1196  values[3] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_BINARY);
1197  values[4] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_SCROLL);
1198  values[5] = TimestampTzGetDatum(portal->creation_time);
1199 
1200  tuplestore_putvalues(tupstore, tupdesc, values, nulls);
1201  }
1202 
1203  /* clean up and return the tuplestore */
1204  tuplestore_donestoring(tupstore);
1205 
1206  rsinfo->returnMode = SFRM_Materialize;
1207  rsinfo->setResult = tupstore;
1208  rsinfo->setDesc = tupdesc;
1209 
1210  return (Datum) 0;
1211 }
1212 
1213 bool
1215 {
1217  PortalHashEnt *hentry;
1218 
1219  hash_seq_init(&status, PortalHashTable);
1220 
1221  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1222  {
1223  Portal portal = hentry->portal;
1224 
1225  if (portal->status == PORTAL_READY)
1226  return false;
1227  }
1228 
1229  return true;
1230 }
1231 
1232 /*
1233  * Hold all pinned portals.
1234  *
1235  * When initiating a COMMIT or ROLLBACK inside a procedure, this must be
1236  * called to protect internally-generated cursors from being dropped during
1237  * the transaction shutdown. Currently, SPI calls this automatically; PLs
1238  * that initiate COMMIT or ROLLBACK some other way are on the hook to do it
1239  * themselves. (Note that we couldn't do this in, say, AtAbort_Portals
1240  * because we need to run user-defined code while persisting a portal.
1241  * It's too late to do that once transaction abort has started.)
1242  *
1243  * We protect such portals by converting them to held cursors. We mark them
1244  * as "auto-held" so that exception exit knows to clean them up. (In normal,
1245  * non-exception code paths, the PL needs to clean such portals itself, since
1246  * transaction end won't do it anymore; but that should be normal practice
1247  * anyway.)
1248  */
1249 void
1251 {
1253  PortalHashEnt *hentry;
1254 
1255  hash_seq_init(&status, PortalHashTable);
1256 
1257  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1258  {
1259  Portal portal = hentry->portal;
1260 
1261  if (portal->portalPinned && !portal->autoHeld)
1262  {
1263  /*
1264  * Doing transaction control, especially abort, inside a cursor
1265  * loop that is not read-only, for example using UPDATE ...
1266  * RETURNING, has weird semantics issues. Also, this
1267  * implementation wouldn't work, because such portals cannot be
1268  * held. (The core grammar enforces that only SELECT statements
1269  * can drive a cursor, but for example PL/pgSQL does not restrict
1270  * it.)
1271  */
1272  if (portal->strategy != PORTAL_ONE_SELECT)
1273  ereport(ERROR,
1274  (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION),
1275  errmsg("cannot perform transaction commands inside a cursor loop that is not read-only")));
1276 
1277  /* Verify it's in a suitable state to be held */
1278  if (portal->status != PORTAL_READY)
1279  elog(ERROR, "pinned portal is not ready to be auto-held");
1280 
1281  HoldPortal(portal);
1282  portal->autoHeld = true;
1283  }
1284  }
1285 }
1286 
1287 /*
1288  * Drop the outer active snapshots for all portals, so that no snapshots
1289  * remain active.
1290  *
1291  * Like HoldPinnedPortals, this must be called when initiating a COMMIT or
1292  * ROLLBACK inside a procedure. This has to be separate from that since it
1293  * should not be run until we're done with steps that are likely to fail.
1294  *
1295  * It's tempting to fold this into PreCommit_Portals, but to do so, we'd
1296  * need to clean up snapshot management in VACUUM and perhaps other places.
1297  */
1298 void
1300 {
1302  PortalHashEnt *hentry;
1303  int numPortalSnaps = 0;
1304  int numActiveSnaps = 0;
1305 
1306  /* First, scan PortalHashTable and clear portalSnapshot fields */
1307  hash_seq_init(&status, PortalHashTable);
1308 
1309  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1310  {
1311  Portal portal = hentry->portal;
1312 
1313  if (portal->portalSnapshot != NULL)
1314  {
1315  portal->portalSnapshot = NULL;
1316  numPortalSnaps++;
1317  }
1318  /* portal->holdSnapshot will be cleaned up in PreCommit_Portals */
1319  }
1320 
1321  /*
1322  * Now, pop all the active snapshots, which should be just those that were
1323  * portal snapshots. Ideally we'd drive this directly off the portal
1324  * scan, but there's no good way to visit the portals in the correct
1325  * order. So just cross-check after the fact.
1326  */
1327  while (ActiveSnapshotSet())
1328  {
1330  numActiveSnaps++;
1331  }
1332 
1333  if (numPortalSnaps != numActiveSnaps)
1334  elog(ERROR, "portal snapshots (%d) did not account for all active snapshots (%d)",
1335  numPortalSnaps, numActiveSnaps);
1336 }
void tuplestore_putvalues(Tuplestorestate *state, TupleDesc tdesc, Datum *values, bool *isnull)
Definition: tuplestore.c:750
void UnpinPortal(Portal portal)
Definition: portalmem.c:379
#define NIL
Definition: pg_list.h:65
CommandTag
Definition: cmdtag.h:20
Portal CreatePortal(const char *name, bool allowDup, bool dupSilent)
Definition: portalmem.c:175
#define IsA(nodeptr, _type_)
Definition: nodes.h:590
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:218
#define AllocSetContextCreate
Definition: memutils.h:173
static void PortalReleaseCachedPlan(Portal portal)
Definition: portalmem.c:309
#define AssertState(condition)
Definition: c.h:807
Portal CreateNewPortal(void)
Definition: portalmem.c:234
bool atEnd
Definition: portal.h:199
CommandTag commandTag
Definition: portal.h:136
#define HASH_ELEM
Definition: hsearch.h:95
bool visible
Definition: portal.h:204
TupleDesc CreateTemplateTupleDesc(int natts)
Definition: tupdesc.c:45
bool autoHeld
Definition: portal.h:152
void(* cleanup)(Portal portal)
Definition: portal.h:122
#define PORTALS_PER_USER
Definition: portalmem.c:38
Portal GetPortalByName(const char *name)
Definition: portalmem.c:130
#define CURSOR_OPT_BINARY
Definition: parsenodes.h:2805
ResourceOwner CurTransactionResourceOwner
Definition: resowner.c:147
#define ALLOCSET_SMALL_SIZES
Definition: memutils.h:205
void MarkPortalActive(Portal portal)
Definition: portalmem.c:394
#define CURSOR_OPT_HOLD
Definition: parsenodes.h:2810
#define tuplestore_donestoring(state)
Definition: tuplestore.h:60
void ResourceOwnerDelete(ResourceOwner owner)
Definition: resowner.c:737
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
Size entrysize
Definition: hsearch.h:76
CachedPlan * cplan
Definition: portal.h:139
int errcode(int sqlerrcode)
Definition: elog.c:698
Datum pg_cursor(PG_FUNCTION_ARGS)
Definition: portalmem.c:1127
#define MemSet(start, val, len)
Definition: c.h:1008
MemoryContext holdContext
Definition: portal.h:177
void ReleaseCachedPlan(CachedPlan *plan, ResourceOwner owner)
Definition: plancache.c:1265
void PopActiveSnapshot(void)
Definition: snapmgr.c:759
uint32 SubTransactionId
Definition: c.h:591
void PortalCleanup(Portal portal)
Definition: portalcmds.c:263
void PortalDefineQuery(Portal portal, const char *prepStmtName, const char *sourceText, CommandTag commandTag, List *stmts, CachedPlan *cplan)
Definition: portalmem.c:281
List * stmts
Definition: portal.h:138
MemoryContext portalContext
Definition: portal.h:120
#define CURSOR_OPT_NO_SCROLL
Definition: parsenodes.h:2807
void AtCleanup_Portals(void)
Definition: portalmem.c:856
void AtAbort_Portals(void)
Definition: portalmem.c:779
#define sprintf
Definition: port.h:218
Definition: dynahash.c:219
#define PortalHashTableDelete(PORTAL)
Definition: portalmem.c:81
static MemoryContext TopPortalContext
Definition: portalmem.c:91
void pfree(void *pointer)
Definition: mcxt.c:1169
const char * name
Definition: portal.h:118
PlannedStmt * PortalGetPrimaryStmt(Portal portal)
Definition: portalmem.c:151
bool ThereAreNoReadyPortals(void)
Definition: portalmem.c:1214
#define ERROR
Definition: elog.h:46
uint64 nprocessed
Definition: cmdtag.h:31
#define TimestampTzGetDatum(X)
Definition: timestamp.h:32
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:195
#define lfirst_node(type, lc)
Definition: pg_list.h:172
void MarkPortalDone(Portal portal)
Definition: portalmem.c:413
struct PortalData * Portal
Definition: portal.h:113
void PinPortal(Portal portal)
Definition: portalmem.c:370
bool portalPinned
Definition: portal.h:151
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:349
bool ActiveSnapshotSet(void)
Definition: snapmgr.c:798
void PortalErrorCleanup(void)
Definition: portalmem.c:915
PortalStrategy strategy
Definition: portal.h:145
void AtSubCleanup_Portals(SubTransactionId mySubid)
Definition: portalmem.c:1088
void TupleDescInitEntry(TupleDesc desc, AttrNumber attributeNumber, const char *attributeName, Oid oidtypeid, int32 typmod, int attdim)
Definition: tupdesc.c:583
SubTransactionId createSubid
Definition: portal.h:131
#define AssertArg(condition)
Definition: c.h:806
MemoryContext TopMemoryContext
Definition: mcxt.c:48
Portal portal
Definition: portalmem.c:51
TimestampTz creation_time
Definition: portal.h:203
char portalname[MAX_PORTALNAME_LEN]
Definition: portalmem.c:50
#define WARNING
Definition: elog.h:40
void MemoryContextDeleteChildren(MemoryContext context)
Definition: mcxt.c:263
Tuplestorestate * tuplestore_begin_heap(bool randomAccess, bool interXact, int maxKBytes)
Definition: tuplestore.c:318
#define PortalIsValid(p)
Definition: portal.h:211
bool canSetTag
Definition: plannodes.h:54
uintptr_t Datum
Definition: postgres.h:411
void ForgetPortalSnapshots(void)
Definition: portalmem.c:1299
SubTransactionId activeSubid
Definition: portal.h:132
Size keysize
Definition: hsearch.h:75
int work_mem
Definition: globals.c:124
const char * sourceText
Definition: portal.h:135
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:906
void EnablePortalManager(void)
Definition: portalmem.c:104
#define BoolGetDatum(X)
Definition: postgres.h:446
CommandTag commandTag
Definition: cmdtag.h:30
#define ereport(elevel,...)
Definition: elog.h:157
int allowedModes
Definition: execnodes.h:305
void PortalHashTableDeleteAll(void)
Definition: portalmem.c:606
SetFunctionReturnMode returnMode
Definition: execnodes.h:307
struct portalhashent PortalHashEnt
QueryCompletion qc
Definition: portal.h:137
#define Assert(condition)
Definition: c.h:804
void ResourceOwnerRelease(ResourceOwner owner, ResourceReleasePhase phase, bool isCommit, bool isTopLevel)
Definition: resowner.c:486
SubTransactionId GetCurrentSubTransactionId(void)
Definition: xact.c:723
static void HoldPortal(Portal portal)
Definition: portalmem.c:635
void AtSubAbort_Portals(SubTransactionId mySubid, SubTransactionId parentSubid, ResourceOwner myXactOwner, ResourceOwner parentXactOwner)
Definition: portalmem.c:975
Snapshot portalSnapshot
Definition: portal.h:169
void UnregisterSnapshotFromOwner(Snapshot snapshot, ResourceOwner owner)
Definition: snapmgr.c:865
void tuplestore_end(Tuplestorestate *state)
Definition: tuplestore.c:453
Snapshot holdSnapshot
Definition: portal.h:187
void MemoryContextSetIdentifier(MemoryContext context, const char *id)
Definition: mcxt.c:336
void PersistHoldablePortal(Portal portal)
Definition: portalcmds.c:316
void MarkPortalFailed(Portal portal)
Definition: portalmem.c:441
PortalStatus status
Definition: portal.h:150
#define InvalidSubTransactionId
Definition: c.h:593
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1436
MemoryContext ecxt_per_query_memory
Definition: execnodes.h:233
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1426
void PortalCreateHoldStore(Portal portal)
Definition: portalmem.c:330
const char * name
Definition: encode.c:515
bool PreCommit_Portals(bool isPrepare)
Definition: portalmem.c:675
Tuplestorestate * setResult
Definition: execnodes.h:310
void ResourceOwnerNewParent(ResourceOwner owner, ResourceOwner newparent)
Definition: resowner.c:801
static Datum values[MAXATTR]
Definition: bootstrap.c:166
ResourceOwner resowner
Definition: portal.h:121
ExprContext * econtext
Definition: execnodes.h:303
bool atStart
Definition: portal.h:198
#define MAX_PORTALNAME_LEN
Definition: portalmem.c:46
TupleDesc setDesc
Definition: execnodes.h:311
bool shmem_exit_inprogress
Definition: ipc.c:45
Tuplestorestate * holdStore
Definition: portal.h:176
int errmsg(const char *fmt,...)
Definition: elog.c:909
void PortalDrop(Portal portal, bool isTopCommit)
Definition: portalmem.c:467
#define elog(elevel,...)
Definition: elog.h:232
#define CURSOR_OPT_SCROLL
Definition: parsenodes.h:2806
#define CStringGetTextDatum(s)
Definition: builtins.h:82
void HoldPinnedPortals(void)
Definition: portalmem.c:1250
static HTAB * PortalHashTable
Definition: portalmem.c:54
#define PG_FUNCTION_ARGS
Definition: fmgr.h:193
int cursorOptions
Definition: portal.h:146
#define HASH_STRINGS
Definition: hsearch.h:96
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:227
#define PortalHashTableLookup(NAME, PORTAL)
Definition: portalmem.c:56
#define PortalHashTableInsert(PORTAL, NAME)
Definition: portalmem.c:68
Definition: pg_list.h:50
#define PointerIsValid(pointer)
Definition: c.h:698
int16 AttrNumber
Definition: attnum.h:21
void hash_seq_term(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1512
void AtSubCommit_Portals(SubTransactionId mySubid, SubTransactionId parentSubid, ResourceOwner parentXactOwner)
Definition: portalmem.c:941
const char * prepStmtName
Definition: portal.h:119
TimestampTz GetCurrentStatementStartTimestamp(void)
Definition: xact.c:807
ResourceOwner ResourceOwnerCreate(ResourceOwner parent, const char *name)
Definition: resowner.c:428