PostgreSQL Source Code  git master
portalmem.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * portalmem.c
4  * backend portal memory management
5  *
6  * Portals are objects representing the execution state of a query.
7  * This module provides memory management services for portals, but it
8  * doesn't actually run the executor for them.
9  *
10  *
11  * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
12  * Portions Copyright (c) 1994, Regents of the University of California
13  *
14  * IDENTIFICATION
15  * src/backend/utils/mmgr/portalmem.c
16  *
17  *-------------------------------------------------------------------------
18  */
19 #include "postgres.h"
20 
21 #include "access/xact.h"
22 #include "catalog/pg_type.h"
23 #include "commands/portalcmds.h"
24 #include "miscadmin.h"
25 #include "storage/ipc.h"
26 #include "utils/builtins.h"
27 #include "utils/memutils.h"
28 #include "utils/snapmgr.h"
29 #include "utils/timestamp.h"
30 
31 /*
32  * Estimate of the maximum number of open portals a user would have,
33  * used in initially sizing the PortalHashTable in EnablePortalManager().
34  * Since the hash table can expand, there's no need to make this overly
35  * generous, and keeping it small avoids unnecessary overhead in the
36  * hash_seq_search() calls executed during transaction end.
37  */
38 #define PORTALS_PER_USER 16
39 
40 
41 /* ----------------
42  * Global state
43  * ----------------
44  */
45 
46 #define MAX_PORTALNAME_LEN NAMEDATALEN
47 
48 typedef struct portalhashent
49 {
53 
54 static HTAB *PortalHashTable = NULL;
55 
56 #define PortalHashTableLookup(NAME, PORTAL) \
57 do { \
58  PortalHashEnt *hentry; \
59  \
60  hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
61  (NAME), HASH_FIND, NULL); \
62  if (hentry) \
63  PORTAL = hentry->portal; \
64  else \
65  PORTAL = NULL; \
66 } while(0)
67 
68 #define PortalHashTableInsert(PORTAL, NAME) \
69 do { \
70  PortalHashEnt *hentry; bool found; \
71  \
72  hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
73  (NAME), HASH_ENTER, &found); \
74  if (found) \
75  elog(ERROR, "duplicate portal name"); \
76  hentry->portal = PORTAL; \
77  /* To avoid duplicate storage, make PORTAL->name point to htab entry */ \
78  PORTAL->name = hentry->portalname; \
79 } while(0)
80 
81 #define PortalHashTableDelete(PORTAL) \
82 do { \
83  PortalHashEnt *hentry; \
84  \
85  hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
86  PORTAL->name, HASH_REMOVE, NULL); \
87  if (hentry == NULL) \
88  elog(WARNING, "trying to delete portal name that does not exist"); \
89 } while(0)
90 
92 
93 
94 /* ----------------------------------------------------------------
95  * public portal interface functions
96  * ----------------------------------------------------------------
97  */
98 
99 /*
100  * EnablePortalManager
101  * Enables the portal management module at backend startup.
102  */
103 void
105 {
106  HASHCTL ctl;
107 
108  Assert(TopPortalContext == NULL);
109 
110  TopPortalContext = AllocSetContextCreate(TopMemoryContext,
111  "TopPortalContext",
113 
115  ctl.entrysize = sizeof(PortalHashEnt);
116 
117  /*
118  * use PORTALS_PER_USER as a guess of how many hash table entries to
119  * create, initially
120  */
121  PortalHashTable = hash_create("Portal hash", PORTALS_PER_USER,
122  &ctl, HASH_ELEM);
123 }
124 
125 /*
126  * GetPortalByName
127  * Returns a portal given a portal name, or NULL if name not found.
128  */
129 Portal
130 GetPortalByName(const char *name)
131 {
132  Portal portal;
133 
134  if (PointerIsValid(name))
135  PortalHashTableLookup(name, portal);
136  else
137  portal = NULL;
138 
139  return portal;
140 }
141 
142 /*
143  * PortalGetPrimaryStmt
144  * Get the "primary" stmt within a portal, ie, the one marked canSetTag.
145  *
146  * Returns NULL if no such stmt. If multiple PlannedStmt structs within the
147  * portal are marked canSetTag, returns the first one. Neither of these
148  * cases should occur in present usages of this function.
149  */
150 PlannedStmt *
152 {
153  ListCell *lc;
154 
155  foreach(lc, portal->stmts)
156  {
157  PlannedStmt *stmt = lfirst_node(PlannedStmt, lc);
158 
159  if (stmt->canSetTag)
160  return stmt;
161  }
162  return NULL;
163 }
164 
165 /*
166  * CreatePortal
167  * Returns a new portal given a name.
168  *
169  * allowDup: if true, automatically drop any pre-existing portal of the
170  * same name (if false, an error is raised).
171  *
172  * dupSilent: if true, don't even emit a WARNING.
173  */
174 Portal
175 CreatePortal(const char *name, bool allowDup, bool dupSilent)
176 {
177  Portal portal;
178 
179  AssertArg(PointerIsValid(name));
180 
181  portal = GetPortalByName(name);
182  if (PortalIsValid(portal))
183  {
184  if (!allowDup)
185  ereport(ERROR,
186  (errcode(ERRCODE_DUPLICATE_CURSOR),
187  errmsg("cursor \"%s\" already exists", name)));
188  if (!dupSilent)
190  (errcode(ERRCODE_DUPLICATE_CURSOR),
191  errmsg("closing existing cursor \"%s\"",
192  name)));
193  PortalDrop(portal, false);
194  }
195 
196  /* make new portal structure */
197  portal = (Portal) MemoryContextAllocZero(TopPortalContext, sizeof *portal);
198 
199  /* initialize portal context; typically it won't store much */
200  portal->portalContext = AllocSetContextCreate(TopPortalContext,
201  "PortalContext",
203 
204  /* create a resource owner for the portal */
206  "Portal");
207 
208  /* initialize portal fields that don't start off zero */
209  portal->status = PORTAL_NEW;
210  portal->cleanup = PortalCleanup;
212  portal->activeSubid = portal->createSubid;
213  portal->strategy = PORTAL_MULTI_QUERY;
215  portal->atStart = true;
216  portal->atEnd = true; /* disallow fetches until query is set */
217  portal->visible = true;
219 
220  /* put portal in table (sets portal->name) */
221  PortalHashTableInsert(portal, name);
222 
223  return portal;
224 }
225 
226 /*
227  * CreateNewPortal
228  * Create a new portal, assigning it a random nonconflicting name.
229  */
230 Portal
232 {
233  static unsigned int unnamed_portal_count = 0;
234 
236 
237  /* Select a nonconflicting name */
238  for (;;)
239  {
240  unnamed_portal_count++;
241  sprintf(portalname, "<unnamed portal %u>", unnamed_portal_count);
242  if (GetPortalByName(portalname) == NULL)
243  break;
244  }
245 
246  return CreatePortal(portalname, false, false);
247 }
248 
249 /*
250  * PortalDefineQuery
251  * A simple subroutine to establish a portal's query.
252  *
253  * Notes: as of PG 8.4, caller MUST supply a sourceText string; it is not
254  * allowed anymore to pass NULL. (If you really don't have source text,
255  * you can pass a constant string, perhaps "(query not available)".)
256  *
257  * commandTag shall be NULL if and only if the original query string
258  * (before rewriting) was an empty string. Also, the passed commandTag must
259  * be a pointer to a constant string, since it is not copied.
260  *
261  * If cplan is provided, then it is a cached plan containing the stmts, and
262  * the caller must have done GetCachedPlan(), causing a refcount increment.
263  * The refcount will be released when the portal is destroyed.
264  *
265  * If cplan is NULL, then it is the caller's responsibility to ensure that
266  * the passed plan trees have adequate lifetime. Typically this is done by
267  * copying them into the portal's context.
268  *
269  * The caller is also responsible for ensuring that the passed prepStmtName
270  * (if not NULL) and sourceText have adequate lifetime.
271  *
272  * NB: this function mustn't do much beyond storing the passed values; in
273  * particular don't do anything that risks elog(ERROR). If that were to
274  * happen here before storing the cplan reference, we'd leak the plancache
275  * refcount that the caller is trying to hand off to us.
276  */
277 void
279  const char *prepStmtName,
280  const char *sourceText,
281  const char *commandTag,
282  List *stmts,
283  CachedPlan *cplan)
284 {
285  AssertArg(PortalIsValid(portal));
286  AssertState(portal->status == PORTAL_NEW);
287 
288  AssertArg(sourceText != NULL);
289  AssertArg(commandTag != NULL || stmts == NIL);
290 
291  portal->prepStmtName = prepStmtName;
292  portal->sourceText = sourceText;
293  portal->commandTag = commandTag;
294  portal->stmts = stmts;
295  portal->cplan = cplan;
296  portal->status = PORTAL_DEFINED;
297 }
298 
299 /*
300  * PortalReleaseCachedPlan
301  * Release a portal's reference to its cached plan, if any.
302  */
303 static void
305 {
306  if (portal->cplan)
307  {
308  ReleaseCachedPlan(portal->cplan, false);
309  portal->cplan = NULL;
310 
311  /*
312  * We must also clear portal->stmts which is now a dangling reference
313  * to the cached plan's plan list. This protects any code that might
314  * try to examine the Portal later.
315  */
316  portal->stmts = NIL;
317  }
318 }
319 
320 /*
321  * PortalCreateHoldStore
322  * Create the tuplestore for a portal.
323  */
324 void
326 {
327  MemoryContext oldcxt;
328 
329  Assert(portal->holdContext == NULL);
330  Assert(portal->holdStore == NULL);
331  Assert(portal->holdSnapshot == NULL);
332 
333  /*
334  * Create the memory context that is used for storage of the tuple set.
335  * Note this is NOT a child of the portal's portalContext.
336  */
337  portal->holdContext =
338  AllocSetContextCreate(TopPortalContext,
339  "PortalHoldContext",
341 
342  /*
343  * Create the tuple store, selecting cross-transaction temp files, and
344  * enabling random access only if cursor requires scrolling.
345  *
346  * XXX: Should maintenance_work_mem be used for the portal size?
347  */
348  oldcxt = MemoryContextSwitchTo(portal->holdContext);
349 
350  portal->holdStore =
352  true, work_mem);
353 
354  MemoryContextSwitchTo(oldcxt);
355 }
356 
357 /*
358  * PinPortal
359  * Protect a portal from dropping.
360  *
361  * A pinned portal is still unpinned and dropped at transaction or
362  * subtransaction abort.
363  */
364 void
366 {
367  if (portal->portalPinned)
368  elog(ERROR, "portal already pinned");
369 
370  portal->portalPinned = true;
371 }
372 
373 void
375 {
376  if (!portal->portalPinned)
377  elog(ERROR, "portal not pinned");
378 
379  portal->portalPinned = false;
380 }
381 
382 /*
383  * MarkPortalActive
384  * Transition a portal from READY to ACTIVE state.
385  *
386  * NOTE: never set portal->status = PORTAL_ACTIVE directly; call this instead.
387  */
388 void
390 {
391  /* For safety, this is a runtime test not just an Assert */
392  if (portal->status != PORTAL_READY)
393  ereport(ERROR,
394  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
395  errmsg("portal \"%s\" cannot be run", portal->name)));
396  /* Perform the state transition */
397  portal->status = PORTAL_ACTIVE;
399 }
400 
401 /*
402  * MarkPortalDone
403  * Transition a portal from ACTIVE to DONE state.
404  *
405  * NOTE: never set portal->status = PORTAL_DONE directly; call this instead.
406  */
407 void
409 {
410  /* Perform the state transition */
411  Assert(portal->status == PORTAL_ACTIVE);
412  portal->status = PORTAL_DONE;
413 
414  /*
415  * Allow portalcmds.c to clean up the state it knows about. We might as
416  * well do that now, since the portal can't be executed any more.
417  *
418  * In some cases involving execution of a ROLLBACK command in an already
419  * aborted transaction, this is necessary, or we'd reach AtCleanup_Portals
420  * with the cleanup hook still unexecuted.
421  */
422  if (PointerIsValid(portal->cleanup))
423  {
424  portal->cleanup(portal);
425  portal->cleanup = NULL;
426  }
427 }
428 
429 /*
430  * MarkPortalFailed
431  * Transition a portal into FAILED state.
432  *
433  * NOTE: never set portal->status = PORTAL_FAILED directly; call this instead.
434  */
435 void
437 {
438  /* Perform the state transition */
439  Assert(portal->status != PORTAL_DONE);
440  portal->status = PORTAL_FAILED;
441 
442  /*
443  * Allow portalcmds.c to clean up the state it knows about. We might as
444  * well do that now, since the portal can't be executed any more.
445  *
446  * In some cases involving cleanup of an already aborted transaction, this
447  * is necessary, or we'd reach AtCleanup_Portals with the cleanup hook
448  * still unexecuted.
449  */
450  if (PointerIsValid(portal->cleanup))
451  {
452  portal->cleanup(portal);
453  portal->cleanup = NULL;
454  }
455 }
456 
457 /*
458  * PortalDrop
459  * Destroy the portal.
460  */
461 void
462 PortalDrop(Portal portal, bool isTopCommit)
463 {
464  AssertArg(PortalIsValid(portal));
465 
466  /*
467  * Don't allow dropping a pinned portal, it's still needed by whoever
468  * pinned it.
469  */
470  if (portal->portalPinned)
471  ereport(ERROR,
472  (errcode(ERRCODE_INVALID_CURSOR_STATE),
473  errmsg("cannot drop pinned portal \"%s\"", portal->name)));
474 
475  /*
476  * Not sure if the PORTAL_ACTIVE case can validly happen or not...
477  */
478  if (portal->status == PORTAL_ACTIVE)
479  ereport(ERROR,
480  (errcode(ERRCODE_INVALID_CURSOR_STATE),
481  errmsg("cannot drop active portal \"%s\"", portal->name)));
482 
483  /*
484  * Allow portalcmds.c to clean up the state it knows about, in particular
485  * shutting down the executor if still active. This step potentially runs
486  * user-defined code so failure has to be expected. It's the cleanup
487  * hook's responsibility to not try to do that more than once, in the case
488  * that failure occurs and then we come back to drop the portal again
489  * during transaction abort.
490  *
491  * Note: in most paths of control, this will have been done already in
492  * MarkPortalDone or MarkPortalFailed. We're just making sure.
493  */
494  if (PointerIsValid(portal->cleanup))
495  {
496  portal->cleanup(portal);
497  portal->cleanup = NULL;
498  }
499 
500  /*
501  * Remove portal from hash table. Because we do this here, we will not
502  * come back to try to remove the portal again if there's any error in the
503  * subsequent steps. Better to leak a little memory than to get into an
504  * infinite error-recovery loop.
505  */
506  PortalHashTableDelete(portal);
507 
508  /* drop cached plan reference, if any */
509  PortalReleaseCachedPlan(portal);
510 
511  /*
512  * If portal has a snapshot protecting its data, release that. This needs
513  * a little care since the registration will be attached to the portal's
514  * resowner; if the portal failed, we will already have released the
515  * resowner (and the snapshot) during transaction abort.
516  */
517  if (portal->holdSnapshot)
518  {
519  if (portal->resowner)
521  portal->resowner);
522  portal->holdSnapshot = NULL;
523  }
524 
525  /*
526  * Release any resources still attached to the portal. There are several
527  * cases being covered here:
528  *
529  * Top transaction commit (indicated by isTopCommit): normally we should
530  * do nothing here and let the regular end-of-transaction resource
531  * releasing mechanism handle these resources too. However, if we have a
532  * FAILED portal (eg, a cursor that got an error), we'd better clean up
533  * its resources to avoid resource-leakage warning messages.
534  *
535  * Sub transaction commit: never comes here at all, since we don't kill
536  * any portals in AtSubCommit_Portals().
537  *
538  * Main or sub transaction abort: we will do nothing here because
539  * portal->resowner was already set NULL; the resources were already
540  * cleaned up in transaction abort.
541  *
542  * Ordinary portal drop: must release resources. However, if the portal
543  * is not FAILED then we do not release its locks. The locks become the
544  * responsibility of the transaction's ResourceOwner (since it is the
545  * parent of the portal's owner) and will be released when the transaction
546  * eventually ends.
547  */
548  if (portal->resowner &&
549  (!isTopCommit || portal->status == PORTAL_FAILED))
550  {
551  bool isCommit = (portal->status != PORTAL_FAILED);
552 
555  isCommit, false);
558  isCommit, false);
561  isCommit, false);
562  ResourceOwnerDelete(portal->resowner);
563  }
564  portal->resowner = NULL;
565 
566  /*
567  * Delete tuplestore if present. We should do this even under error
568  * conditions; since the tuplestore would have been using cross-
569  * transaction storage, its temp files need to be explicitly deleted.
570  */
571  if (portal->holdStore)
572  {
573  MemoryContext oldcontext;
574 
575  oldcontext = MemoryContextSwitchTo(portal->holdContext);
576  tuplestore_end(portal->holdStore);
577  MemoryContextSwitchTo(oldcontext);
578  portal->holdStore = NULL;
579  }
580 
581  /* delete tuplestore storage, if any */
582  if (portal->holdContext)
584 
585  /* release subsidiary storage */
587 
588  /* release portal struct (it's in TopPortalContext) */
589  pfree(portal);
590 }
591 
592 /*
593  * Delete all declared cursors.
594  *
595  * Used by commands: CLOSE ALL, DISCARD ALL
596  */
597 void
599 {
601  PortalHashEnt *hentry;
602 
603  if (PortalHashTable == NULL)
604  return;
605 
606  hash_seq_init(&status, PortalHashTable);
607  while ((hentry = hash_seq_search(&status)) != NULL)
608  {
609  Portal portal = hentry->portal;
610 
611  /* Can't close the active portal (the one running the command) */
612  if (portal->status == PORTAL_ACTIVE)
613  continue;
614 
615  PortalDrop(portal, false);
616 
617  /* Restart the iteration in case that led to other drops */
618  hash_seq_term(&status);
619  hash_seq_init(&status, PortalHashTable);
620  }
621 }
622 
623 
624 /*
625  * Pre-commit processing for portals.
626  *
627  * Holdable cursors created in this transaction need to be converted to
628  * materialized form, since we are going to close down the executor and
629  * release locks. Non-holdable portals created in this transaction are
630  * simply removed. Portals remaining from prior transactions should be
631  * left untouched.
632  *
633  * Returns true if any portals changed state (possibly causing user-defined
634  * code to be run), false if not.
635  */
636 bool
637 PreCommit_Portals(bool isPrepare)
638 {
639  bool result = false;
641  PortalHashEnt *hentry;
642 
643  hash_seq_init(&status, PortalHashTable);
644 
645  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
646  {
647  Portal portal = hentry->portal;
648 
649  /*
650  * There should be no pinned portals anymore. Complain if someone
651  * leaked one.
652  */
653  if (portal->portalPinned)
654  elog(ERROR, "cannot commit while a portal is pinned");
655 
656  /*
657  * Do not touch active portals --- this can only happen in the case of
658  * a multi-transaction utility command, such as VACUUM.
659  *
660  * Note however that any resource owner attached to such a portal is
661  * still going to go away, so don't leave a dangling pointer.
662  */
663  if (portal->status == PORTAL_ACTIVE)
664  {
665  portal->resowner = NULL;
666  continue;
667  }
668 
669  /* Is it a holdable portal created in the current xact? */
670  if ((portal->cursorOptions & CURSOR_OPT_HOLD) &&
672  portal->status == PORTAL_READY)
673  {
674  /*
675  * We are exiting the transaction that created a holdable cursor.
676  * Instead of dropping the portal, prepare it for access by later
677  * transactions.
678  *
679  * However, if this is PREPARE TRANSACTION rather than COMMIT,
680  * refuse PREPARE, because the semantics seem pretty unclear.
681  */
682  if (isPrepare)
683  ereport(ERROR,
684  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
685  errmsg("cannot PREPARE a transaction that has created a cursor WITH HOLD")));
686 
687  /*
688  * Note that PersistHoldablePortal() must release all resources
689  * used by the portal that are local to the creating transaction.
690  */
691  PortalCreateHoldStore(portal);
692  PersistHoldablePortal(portal);
693 
694  /* drop cached plan reference, if any */
695  PortalReleaseCachedPlan(portal);
696 
697  /*
698  * Any resources belonging to the portal will be released in the
699  * upcoming transaction-wide cleanup; the portal will no longer
700  * have its own resources.
701  */
702  portal->resowner = NULL;
703 
704  /*
705  * Having successfully exported the holdable cursor, mark it as
706  * not belonging to this transaction.
707  */
710 
711  /* Report we changed state */
712  result = true;
713  }
714  else if (portal->createSubid == InvalidSubTransactionId)
715  {
716  /*
717  * Do nothing to cursors held over from a previous transaction
718  * (including ones we just froze in a previous cycle of this loop)
719  */
720  continue;
721  }
722  else
723  {
724  /* Zap all non-holdable portals */
725  PortalDrop(portal, true);
726 
727  /* Report we changed state */
728  result = true;
729  }
730 
731  /*
732  * After either freezing or dropping a portal, we have to restart the
733  * iteration, because we could have invoked user-defined code that
734  * caused a drop of the next portal in the hash chain.
735  */
736  hash_seq_term(&status);
737  hash_seq_init(&status, PortalHashTable);
738  }
739 
740  return result;
741 }
742 
743 /*
744  * Abort processing for portals.
745  *
746  * At this point we run the cleanup hook if present, but we can't release the
747  * portal's memory until the cleanup call.
748  */
749 void
751 {
753  PortalHashEnt *hentry;
754 
755  hash_seq_init(&status, PortalHashTable);
756 
757  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
758  {
759  Portal portal = hentry->portal;
760 
761  /*
762  * When elog(FATAL) is progress, we need to set the active portal to
763  * failed, so that PortalCleanup() doesn't run the executor shutdown.
764  */
765  if (portal->status == PORTAL_ACTIVE && shmem_exit_inprogress)
766  MarkPortalFailed(portal);
767 
768  /*
769  * Do nothing else to cursors held over from a previous transaction.
770  */
771  if (portal->createSubid == InvalidSubTransactionId)
772  continue;
773 
774  /*
775  * If it was created in the current transaction, we can't do normal
776  * shutdown on a READY portal either; it might refer to objects
777  * created in the failed transaction. See comments in
778  * AtSubAbort_Portals.
779  */
780  if (portal->status == PORTAL_READY)
781  MarkPortalFailed(portal);
782 
783  /*
784  * Allow portalcmds.c to clean up the state it knows about, if we
785  * haven't already.
786  */
787  if (PointerIsValid(portal->cleanup))
788  {
789  portal->cleanup(portal);
790  portal->cleanup = NULL;
791  }
792 
793  /* drop cached plan reference, if any */
794  PortalReleaseCachedPlan(portal);
795 
796  /*
797  * Any resources belonging to the portal will be released in the
798  * upcoming transaction-wide cleanup; they will be gone before we run
799  * PortalDrop.
800  */
801  portal->resowner = NULL;
802 
803  /*
804  * Although we can't delete the portal data structure proper, we can
805  * release any memory in subsidiary contexts, such as executor state.
806  * The cleanup hook was the last thing that might have needed data
807  * there. But leave active portals alone.
808  */
809  if (portal->status != PORTAL_ACTIVE)
811  }
812 }
813 
814 /*
815  * Post-abort cleanup for portals.
816  *
817  * Delete all portals not held over from prior transactions. */
818 void
820 {
822  PortalHashEnt *hentry;
823 
824  hash_seq_init(&status, PortalHashTable);
825 
826  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
827  {
828  Portal portal = hentry->portal;
829 
830  /*
831  * Do not touch active portals --- this can only happen in the case of
832  * a multi-transaction command.
833  */
834  if (portal->status == PORTAL_ACTIVE)
835  continue;
836 
837  /* Do nothing to cursors held over from a previous transaction */
838  if (portal->createSubid == InvalidSubTransactionId)
839  {
840  Assert(portal->status != PORTAL_ACTIVE);
841  Assert(portal->resowner == NULL);
842  continue;
843  }
844 
845  /*
846  * If a portal is still pinned, forcibly unpin it. PortalDrop will not
847  * let us drop the portal otherwise. Whoever pinned the portal was
848  * interrupted by the abort too and won't try to use it anymore.
849  */
850  if (portal->portalPinned)
851  portal->portalPinned = false;
852 
853  /*
854  * We had better not call any user-defined code during cleanup, so if
855  * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
856  */
857  if (PointerIsValid(portal->cleanup))
858  {
859  elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
860  portal->cleanup = NULL;
861  }
862 
863  /* Zap it. */
864  PortalDrop(portal, false);
865  }
866 }
867 
868 /*
869  * Pre-subcommit processing for portals.
870  *
871  * Reassign portals created or used in the current subtransaction to the
872  * parent subtransaction.
873  */
874 void
876  SubTransactionId parentSubid,
877  ResourceOwner parentXactOwner)
878 {
880  PortalHashEnt *hentry;
881 
882  hash_seq_init(&status, PortalHashTable);
883 
884  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
885  {
886  Portal portal = hentry->portal;
887 
888  if (portal->createSubid == mySubid)
889  {
890  portal->createSubid = parentSubid;
891  if (portal->resowner)
892  ResourceOwnerNewParent(portal->resowner, parentXactOwner);
893  }
894  if (portal->activeSubid == mySubid)
895  portal->activeSubid = parentSubid;
896  }
897 }
898 
899 /*
900  * Subtransaction abort handling for portals.
901  *
902  * Deactivate portals created or used during the failed subtransaction.
903  * Note that per AtSubCommit_Portals, this will catch portals created/used
904  * in descendants of the subtransaction too.
905  *
906  * We don't destroy any portals here; that's done in AtSubCleanup_Portals.
907  */
908 void
910  SubTransactionId parentSubid,
911  ResourceOwner myXactOwner,
912  ResourceOwner parentXactOwner)
913 {
915  PortalHashEnt *hentry;
916 
917  hash_seq_init(&status, PortalHashTable);
918 
919  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
920  {
921  Portal portal = hentry->portal;
922 
923  /* Was it created in this subtransaction? */
924  if (portal->createSubid != mySubid)
925  {
926  /* No, but maybe it was used in this subtransaction? */
927  if (portal->activeSubid == mySubid)
928  {
929  /* Maintain activeSubid until the portal is removed */
930  portal->activeSubid = parentSubid;
931 
932  /*
933  * A MarkPortalActive() caller ran an upper-level portal in
934  * this subtransaction and left the portal ACTIVE. This can't
935  * happen, but force the portal into FAILED state for the same
936  * reasons discussed below.
937  *
938  * We assume we can get away without forcing upper-level READY
939  * portals to fail, even if they were run and then suspended.
940  * In theory a suspended upper-level portal could have
941  * acquired some references to objects that are about to be
942  * destroyed, but there should be sufficient defenses against
943  * such cases: the portal's original query cannot contain such
944  * references, and any references within, say, cached plans of
945  * PL/pgSQL functions are not from active queries and should
946  * be protected by revalidation logic.
947  */
948  if (portal->status == PORTAL_ACTIVE)
949  MarkPortalFailed(portal);
950 
951  /*
952  * Also, if we failed it during the current subtransaction
953  * (either just above, or earlier), reattach its resource
954  * owner to the current subtransaction's resource owner, so
955  * that any resources it still holds will be released while
956  * cleaning up this subtransaction. This prevents some corner
957  * cases wherein we might get Asserts or worse while cleaning
958  * up objects created during the current subtransaction
959  * (because they're still referenced within this portal).
960  */
961  if (portal->status == PORTAL_FAILED && portal->resowner)
962  {
963  ResourceOwnerNewParent(portal->resowner, myXactOwner);
964  portal->resowner = NULL;
965  }
966  }
967  /* Done if it wasn't created in this subtransaction */
968  continue;
969  }
970 
971  /*
972  * Force any live portals of my own subtransaction into FAILED state.
973  * We have to do this because they might refer to objects created or
974  * changed in the failed subtransaction, leading to crashes within
975  * ExecutorEnd when portalcmds.c tries to close down the portal.
976  * Currently, every MarkPortalActive() caller ensures it updates the
977  * portal status again before relinquishing control, so ACTIVE can't
978  * happen here. If it does happen, dispose the portal like existing
979  * MarkPortalActive() callers would.
980  */
981  if (portal->status == PORTAL_READY ||
982  portal->status == PORTAL_ACTIVE)
983  MarkPortalFailed(portal);
984 
985  /*
986  * Allow portalcmds.c to clean up the state it knows about, if we
987  * haven't already.
988  */
989  if (PointerIsValid(portal->cleanup))
990  {
991  portal->cleanup(portal);
992  portal->cleanup = NULL;
993  }
994 
995  /* drop cached plan reference, if any */
996  PortalReleaseCachedPlan(portal);
997 
998  /*
999  * Any resources belonging to the portal will be released in the
1000  * upcoming transaction-wide cleanup; they will be gone before we run
1001  * PortalDrop.
1002  */
1003  portal->resowner = NULL;
1004 
1005  /*
1006  * Although we can't delete the portal data structure proper, we can
1007  * release any memory in subsidiary contexts, such as executor state.
1008  * The cleanup hook was the last thing that might have needed data
1009  * there.
1010  */
1012  }
1013 }
1014 
1015 /*
1016  * Post-subabort cleanup for portals.
1017  *
1018  * Drop all portals created in the failed subtransaction (but note that
1019  * we will not drop any that were reassigned to the parent above).
1020  */
1021 void
1023 {
1025  PortalHashEnt *hentry;
1026 
1027  hash_seq_init(&status, PortalHashTable);
1028 
1029  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1030  {
1031  Portal portal = hentry->portal;
1032 
1033  if (portal->createSubid != mySubid)
1034  continue;
1035 
1036  /*
1037  * If a portal is still pinned, forcibly unpin it. PortalDrop will not
1038  * let us drop the portal otherwise. Whoever pinned the portal was
1039  * interrupted by the abort too and won't try to use it anymore.
1040  */
1041  if (portal->portalPinned)
1042  portal->portalPinned = false;
1043 
1044  /*
1045  * We had better not call any user-defined code during cleanup, so if
1046  * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
1047  */
1048  if (PointerIsValid(portal->cleanup))
1049  {
1050  elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
1051  portal->cleanup = NULL;
1052  }
1053 
1054  /* Zap it. */
1055  PortalDrop(portal, false);
1056  }
1057 }
1058 
1059 /* Find all available cursors */
1060 Datum
1062 {
1063  ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
1064  TupleDesc tupdesc;
1065  Tuplestorestate *tupstore;
1066  MemoryContext per_query_ctx;
1067  MemoryContext oldcontext;
1068  HASH_SEQ_STATUS hash_seq;
1069  PortalHashEnt *hentry;
1070 
1071  /* check to see if caller supports us returning a tuplestore */
1072  if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
1073  ereport(ERROR,
1074  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1075  errmsg("set-valued function called in context that cannot accept a set")));
1076  if (!(rsinfo->allowedModes & SFRM_Materialize))
1077  ereport(ERROR,
1078  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1079  errmsg("materialize mode required, but it is not " \
1080  "allowed in this context")));
1081 
1082  /* need to build tuplestore in query context */
1083  per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
1084  oldcontext = MemoryContextSwitchTo(per_query_ctx);
1085 
1086  /*
1087  * build tupdesc for result tuples. This must match the definition of the
1088  * pg_cursors view in system_views.sql
1089  */
1090  tupdesc = CreateTemplateTupleDesc(6, false);
1091  TupleDescInitEntry(tupdesc, (AttrNumber) 1, "name",
1092  TEXTOID, -1, 0);
1093  TupleDescInitEntry(tupdesc, (AttrNumber) 2, "statement",
1094  TEXTOID, -1, 0);
1095  TupleDescInitEntry(tupdesc, (AttrNumber) 3, "is_holdable",
1096  BOOLOID, -1, 0);
1097  TupleDescInitEntry(tupdesc, (AttrNumber) 4, "is_binary",
1098  BOOLOID, -1, 0);
1099  TupleDescInitEntry(tupdesc, (AttrNumber) 5, "is_scrollable",
1100  BOOLOID, -1, 0);
1101  TupleDescInitEntry(tupdesc, (AttrNumber) 6, "creation_time",
1102  TIMESTAMPTZOID, -1, 0);
1103 
1104  /*
1105  * We put all the tuples into a tuplestore in one scan of the hashtable.
1106  * This avoids any issue of the hashtable possibly changing between calls.
1107  */
1108  tupstore =
1110  false, work_mem);
1111 
1112  /* generate junk in short-term context */
1113  MemoryContextSwitchTo(oldcontext);
1114 
1115  hash_seq_init(&hash_seq, PortalHashTable);
1116  while ((hentry = hash_seq_search(&hash_seq)) != NULL)
1117  {
1118  Portal portal = hentry->portal;
1119  Datum values[6];
1120  bool nulls[6];
1121 
1122  /* report only "visible" entries */
1123  if (!portal->visible)
1124  continue;
1125 
1126  MemSet(nulls, 0, sizeof(nulls));
1127 
1128  values[0] = CStringGetTextDatum(portal->name);
1129  values[1] = CStringGetTextDatum(portal->sourceText);
1130  values[2] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_HOLD);
1131  values[3] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_BINARY);
1132  values[4] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_SCROLL);
1133  values[5] = TimestampTzGetDatum(portal->creation_time);
1134 
1135  tuplestore_putvalues(tupstore, tupdesc, values, nulls);
1136  }
1137 
1138  /* clean up and return the tuplestore */
1139  tuplestore_donestoring(tupstore);
1140 
1141  rsinfo->returnMode = SFRM_Materialize;
1142  rsinfo->setResult = tupstore;
1143  rsinfo->setDesc = tupdesc;
1144 
1145  return (Datum) 0;
1146 }
1147 
1148 bool
1150 {
1152  PortalHashEnt *hentry;
1153 
1154  hash_seq_init(&status, PortalHashTable);
1155 
1156  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1157  {
1158  Portal portal = hentry->portal;
1159 
1160  if (portal->status == PORTAL_READY)
1161  return false;
1162  }
1163 
1164  return true;
1165 }
1166 
1167 bool
1169 {
1171  PortalHashEnt *hentry;
1172 
1173  hash_seq_init(&status, PortalHashTable);
1174 
1175  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1176  {
1177  Portal portal = hentry->portal;
1178 
1179  if (portal->portalPinned)
1180  return true;
1181  }
1182 
1183  return false;
1184 }
void tuplestore_putvalues(Tuplestorestate *state, TupleDesc tdesc, Datum *values, bool *isnull)
Definition: tuplestore.c:750
#define TIMESTAMPTZOID
Definition: pg_type.h:525
void UnpinPortal(Portal portal)
Definition: portalmem.c:374
#define NIL
Definition: pg_list.h:69
Portal CreatePortal(const char *name, bool allowDup, bool dupSilent)
Definition: portalmem.c:175
#define IsA(nodeptr, _type_)
Definition: nodes.h:564
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:198
static void PortalReleaseCachedPlan(Portal portal)
Definition: portalmem.c:304
#define AssertState(condition)
Definition: c.h:691
Portal CreateNewPortal(void)
Definition: portalmem.c:231
bool atEnd
Definition: portal.h:187
#define HASH_ELEM
Definition: hsearch.h:87
bool visible
Definition: portal.h:192
void PortalDefineQuery(Portal portal, const char *prepStmtName, const char *sourceText, const char *commandTag, List *stmts, CachedPlan *cplan)
Definition: portalmem.c:278
void(* cleanup)(Portal portal)
Definition: portal.h:121
#define TEXTOID
Definition: pg_type.h:324
#define PORTALS_PER_USER
Definition: portalmem.c:38
Portal GetPortalByName(const char *name)
Definition: portalmem.c:130
#define CURSOR_OPT_BINARY
Definition: parsenodes.h:2638
ResourceOwner CurTransactionResourceOwner
Definition: resowner.c:139
#define ALLOCSET_SMALL_SIZES
Definition: memutils.h:207
void MarkPortalActive(Portal portal)
Definition: portalmem.c:389
#define CURSOR_OPT_HOLD
Definition: parsenodes.h:2642
#define tuplestore_donestoring(state)
Definition: tuplestore.h:60
void ResourceOwnerDelete(ResourceOwner owner)
Definition: resowner.c:673
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
Size entrysize
Definition: hsearch.h:73
CachedPlan * cplan
Definition: portal.h:137
int errcode(int sqlerrcode)
Definition: elog.c:575
Datum pg_cursor(PG_FUNCTION_ARGS)
Definition: portalmem.c:1061
#define MemSet(start, val, len)
Definition: c.h:897
MemoryContext holdContext
Definition: portal.h:165
uint32 SubTransactionId
Definition: c.h:467
void PortalCleanup(Portal portal)
Definition: portalcmds.c:265
List * stmts
Definition: portal.h:136
MemoryContext portalContext
Definition: portal.h:119
#define CURSOR_OPT_NO_SCROLL
Definition: parsenodes.h:2640
void AtCleanup_Portals(void)
Definition: portalmem.c:819
void AtAbort_Portals(void)
Definition: portalmem.c:750
bool ThereArePinnedPortals(void)
Definition: portalmem.c:1168
Definition: dynahash.c:208
#define PortalHashTableDelete(PORTAL)
Definition: portalmem.c:81
static MemoryContext TopPortalContext
Definition: portalmem.c:91
void pfree(void *pointer)
Definition: mcxt.c:936
const char * name
Definition: portal.h:117
PlannedStmt * PortalGetPrimaryStmt(Portal portal)
Definition: portalmem.c:151
bool ThereAreNoReadyPortals(void)
Definition: portalmem.c:1149
#define ERROR
Definition: elog.h:43
#define TimestampTzGetDatum(X)
Definition: timestamp.h:32
const char * commandTag
Definition: portal.h:135
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:197
#define lfirst_node(type, lc)
Definition: pg_list.h:109
void MarkPortalDone(Portal portal)
Definition: portalmem.c:408
struct PortalData * Portal
Definition: portal.h:112
void PinPortal(Portal portal)
Definition: portalmem.c:365
bool portalPinned
Definition: portal.h:149
void ReleaseCachedPlan(CachedPlan *plan, bool useResOwner)
Definition: plancache.c:1256
PortalStrategy strategy
Definition: portal.h:143
void AtSubCleanup_Portals(SubTransactionId mySubid)
Definition: portalmem.c:1022
void TupleDescInitEntry(TupleDesc desc, AttrNumber attributeNumber, const char *attributeName, Oid oidtypeid, int32 typmod, int attdim)
Definition: tupdesc.c:543
SubTransactionId createSubid
Definition: portal.h:130
#define ereport(elevel, rest)
Definition: elog.h:122
#define AssertArg(condition)
Definition: c.h:690
MemoryContext TopMemoryContext
Definition: mcxt.c:43
Portal portal
Definition: portalmem.c:51
TimestampTz creation_time
Definition: portal.h:191
char portalname[MAX_PORTALNAME_LEN]
Definition: portalmem.c:50
#define AllocSetContextCreate(parent, name, allocparams)
Definition: memutils.h:165
#define WARNING
Definition: elog.h:40
void MemoryContextDeleteChildren(MemoryContext context)
Definition: mcxt.c:236
Tuplestorestate * tuplestore_begin_heap(bool randomAccess, bool interXact, int maxKBytes)
Definition: tuplestore.c:318
#define PortalIsValid(p)
Definition: portal.h:199
bool canSetTag
Definition: plannodes.h:53
uintptr_t Datum
Definition: postgres.h:365
HTAB * hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
Definition: dynahash.c:316
SubTransactionId activeSubid
Definition: portal.h:131
Size keysize
Definition: hsearch.h:72
int work_mem
Definition: globals.c:113
const char * sourceText
Definition: portal.h:134
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:728
void EnablePortalManager(void)
Definition: portalmem.c:104
#define BoolGetDatum(X)
Definition: postgres.h:385
int allowedModes
Definition: execnodes.h:282
void PortalHashTableDeleteAll(void)
Definition: portalmem.c:598
SetFunctionReturnMode returnMode
Definition: execnodes.h:284
struct portalhashent PortalHashEnt
#define Assert(condition)
Definition: c.h:688
void ResourceOwnerRelease(ResourceOwner owner, ResourceReleasePhase phase, bool isCommit, bool isTopLevel)
Definition: resowner.c:471
SubTransactionId GetCurrentSubTransactionId(void)
Definition: xact.c:642
void AtSubAbort_Portals(SubTransactionId mySubid, SubTransactionId parentSubid, ResourceOwner myXactOwner, ResourceOwner parentXactOwner)
Definition: portalmem.c:909
void UnregisterSnapshotFromOwner(Snapshot snapshot, ResourceOwner owner)
Definition: snapmgr.c:918
void tuplestore_end(Tuplestorestate *state)
Definition: tuplestore.c:453
Snapshot holdSnapshot
Definition: portal.h:175
void PersistHoldablePortal(Portal portal)
Definition: portalcmds.c:318
void MarkPortalFailed(Portal portal)
Definition: portalmem.c:436
PortalStatus status
Definition: portal.h:148
#define InvalidSubTransactionId
Definition: c.h:469
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1387
#define BOOLOID
Definition: pg_type.h:288
MemoryContext ecxt_per_query_memory
Definition: execnodes.h:216
TupleDesc CreateTemplateTupleDesc(int natts, bool hasoid)
Definition: tupdesc.c:43
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1377
void PortalCreateHoldStore(Portal portal)
Definition: portalmem.c:325
const char * name
Definition: encode.c:521
bool PreCommit_Portals(bool isPrepare)
Definition: portalmem.c:637
Tuplestorestate * setResult
Definition: execnodes.h:287
void ResourceOwnerNewParent(ResourceOwner owner, ResourceOwner newparent)
Definition: resowner.c:731
static Datum values[MAXATTR]
Definition: bootstrap.c:164
ResourceOwner resowner
Definition: portal.h:120
ExprContext * econtext
Definition: execnodes.h:280
bool atStart
Definition: portal.h:186
#define MAX_PORTALNAME_LEN
Definition: portalmem.c:46
TupleDesc setDesc
Definition: execnodes.h:288
bool shmem_exit_inprogress
Definition: ipc.c:45
Tuplestorestate * holdStore
Definition: portal.h:164
int errmsg(const char *fmt,...)
Definition: elog.c:797
void PortalDrop(Portal portal, bool isTopCommit)
Definition: portalmem.c:462
#define CURSOR_OPT_SCROLL
Definition: parsenodes.h:2639
#define CStringGetTextDatum(s)
Definition: builtins.h:91
static HTAB * PortalHashTable
Definition: portalmem.c:54
#define PG_FUNCTION_ARGS
Definition: fmgr.h:158
#define elog
Definition: elog.h:219
int cursorOptions
Definition: portal.h:144
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225
#define PortalHashTableLookup(NAME, PORTAL)
Definition: portalmem.c:56
#define PortalHashTableInsert(PORTAL, NAME)
Definition: portalmem.c:68
Definition: pg_list.h:45
#define PointerIsValid(pointer)
Definition: c.h:582
int16 AttrNumber
Definition: attnum.h:21
void hash_seq_term(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1463
void AtSubCommit_Portals(SubTransactionId mySubid, SubTransactionId parentSubid, ResourceOwner parentXactOwner)
Definition: portalmem.c:875
const char * prepStmtName
Definition: portal.h:118
TimestampTz GetCurrentStatementStartTimestamp(void)
Definition: xact.c:710
ResourceOwner ResourceOwnerCreate(ResourceOwner parent, const char *name)
Definition: resowner.c:416