PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
portalmem.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * portalmem.c
4  * backend portal memory management
5  *
6  * Portals are objects representing the execution state of a query.
7  * This module provides memory management services for portals, but it
8  * doesn't actually run the executor for them.
9  *
10  *
11  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
12  * Portions Copyright (c) 1994, Regents of the University of California
13  *
14  * IDENTIFICATION
15  * src/backend/utils/mmgr/portalmem.c
16  *
17  *-------------------------------------------------------------------------
18  */
19 #include "postgres.h"
20 
21 #include "access/xact.h"
22 #include "catalog/pg_type.h"
23 #include "commands/portalcmds.h"
24 #include "miscadmin.h"
25 #include "utils/builtins.h"
26 #include "utils/memutils.h"
27 #include "utils/snapmgr.h"
28 #include "utils/timestamp.h"
29 
30 /*
31  * Estimate of the maximum number of open portals a user would have,
32  * used in initially sizing the PortalHashTable in EnablePortalManager().
33  * Since the hash table can expand, there's no need to make this overly
34  * generous, and keeping it small avoids unnecessary overhead in the
35  * hash_seq_search() calls executed during transaction end.
36  */
37 #define PORTALS_PER_USER 16
38 
39 
40 /* ----------------
41  * Global state
42  * ----------------
43  */
44 
45 #define MAX_PORTALNAME_LEN NAMEDATALEN
46 
47 typedef struct portalhashent
48 {
52 
54 
55 #define PortalHashTableLookup(NAME, PORTAL) \
56 do { \
57  PortalHashEnt *hentry; \
58  \
59  hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
60  (NAME), HASH_FIND, NULL); \
61  if (hentry) \
62  PORTAL = hentry->portal; \
63  else \
64  PORTAL = NULL; \
65 } while(0)
66 
67 #define PortalHashTableInsert(PORTAL, NAME) \
68 do { \
69  PortalHashEnt *hentry; bool found; \
70  \
71  hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
72  (NAME), HASH_ENTER, &found); \
73  if (found) \
74  elog(ERROR, "duplicate portal name"); \
75  hentry->portal = PORTAL; \
76  /* To avoid duplicate storage, make PORTAL->name point to htab entry */ \
77  PORTAL->name = hentry->portalname; \
78 } while(0)
79 
80 #define PortalHashTableDelete(PORTAL) \
81 do { \
82  PortalHashEnt *hentry; \
83  \
84  hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
85  PORTAL->name, HASH_REMOVE, NULL); \
86  if (hentry == NULL) \
87  elog(WARNING, "trying to delete portal name that does not exist"); \
88 } while(0)
89 
91 
92 
93 /* ----------------------------------------------------------------
94  * public portal interface functions
95  * ----------------------------------------------------------------
96  */
97 
98 /*
99  * EnablePortalManager
100  * Enables the portal management module at backend startup.
101  */
102 void
104 {
105  HASHCTL ctl;
106 
107  Assert(PortalMemory == NULL);
108 
110  "PortalMemory",
112 
114  ctl.entrysize = sizeof(PortalHashEnt);
115 
116  /*
117  * use PORTALS_PER_USER as a guess of how many hash table entries to
118  * create, initially
119  */
120  PortalHashTable = hash_create("Portal hash", PORTALS_PER_USER,
121  &ctl, HASH_ELEM);
122 }
123 
124 /*
125  * GetPortalByName
126  * Returns a portal given a portal name, or NULL if name not found.
127  */
128 Portal
129 GetPortalByName(const char *name)
130 {
131  Portal portal;
132 
133  if (PointerIsValid(name))
134  PortalHashTableLookup(name, portal);
135  else
136  portal = NULL;
137 
138  return portal;
139 }
140 
141 /*
142  * PortalGetPrimaryStmt
143  * Get the "primary" stmt within a portal, ie, the one marked canSetTag.
144  *
145  * Returns NULL if no such stmt. If multiple PlannedStmt structs within the
146  * portal are marked canSetTag, returns the first one. Neither of these
147  * cases should occur in present usages of this function.
148  */
149 PlannedStmt *
151 {
152  ListCell *lc;
153 
154  foreach(lc, portal->stmts)
155  {
156  PlannedStmt *stmt = lfirst_node(PlannedStmt, lc);
157 
158  if (stmt->canSetTag)
159  return stmt;
160  }
161  return NULL;
162 }
163 
164 /*
165  * CreatePortal
166  * Returns a new portal given a name.
167  *
168  * allowDup: if true, automatically drop any pre-existing portal of the
169  * same name (if false, an error is raised).
170  *
171  * dupSilent: if true, don't even emit a WARNING.
172  */
173 Portal
174 CreatePortal(const char *name, bool allowDup, bool dupSilent)
175 {
176  Portal portal;
177 
178  AssertArg(PointerIsValid(name));
179 
180  portal = GetPortalByName(name);
181  if (PortalIsValid(portal))
182  {
183  if (!allowDup)
184  ereport(ERROR,
185  (errcode(ERRCODE_DUPLICATE_CURSOR),
186  errmsg("cursor \"%s\" already exists", name)));
187  if (!dupSilent)
189  (errcode(ERRCODE_DUPLICATE_CURSOR),
190  errmsg("closing existing cursor \"%s\"",
191  name)));
192  PortalDrop(portal, false);
193  }
194 
195  /* make new portal structure */
196  portal = (Portal) MemoryContextAllocZero(PortalMemory, sizeof *portal);
197 
198  /* initialize portal heap context; typically it won't store much */
199  portal->heap = AllocSetContextCreate(PortalMemory,
200  "PortalHeapMemory",
202 
203  /* create a resource owner for the portal */
205  "Portal");
206 
207  /* initialize portal fields that don't start off zero */
208  portal->status = PORTAL_NEW;
209  portal->cleanup = PortalCleanup;
211  portal->activeSubid = portal->createSubid;
212  portal->strategy = PORTAL_MULTI_QUERY;
214  portal->atStart = true;
215  portal->atEnd = true; /* disallow fetches until query is set */
216  portal->visible = true;
218 
219  /* put portal in table (sets portal->name) */
220  PortalHashTableInsert(portal, name);
221 
222  return portal;
223 }
224 
225 /*
226  * CreateNewPortal
227  * Create a new portal, assigning it a random nonconflicting name.
228  */
229 Portal
231 {
232  static unsigned int unnamed_portal_count = 0;
233 
234  char portalname[MAX_PORTALNAME_LEN];
235 
236  /* Select a nonconflicting name */
237  for (;;)
238  {
239  unnamed_portal_count++;
240  sprintf(portalname, "<unnamed portal %u>", unnamed_portal_count);
241  if (GetPortalByName(portalname) == NULL)
242  break;
243  }
244 
245  return CreatePortal(portalname, false, false);
246 }
247 
248 /*
249  * PortalDefineQuery
250  * A simple subroutine to establish a portal's query.
251  *
252  * Notes: as of PG 8.4, caller MUST supply a sourceText string; it is not
253  * allowed anymore to pass NULL. (If you really don't have source text,
254  * you can pass a constant string, perhaps "(query not available)".)
255  *
256  * commandTag shall be NULL if and only if the original query string
257  * (before rewriting) was an empty string. Also, the passed commandTag must
258  * be a pointer to a constant string, since it is not copied.
259  *
260  * If cplan is provided, then it is a cached plan containing the stmts, and
261  * the caller must have done GetCachedPlan(), causing a refcount increment.
262  * The refcount will be released when the portal is destroyed.
263  *
264  * If cplan is NULL, then it is the caller's responsibility to ensure that
265  * the passed plan trees have adequate lifetime. Typically this is done by
266  * copying them into the portal's heap context.
267  *
268  * The caller is also responsible for ensuring that the passed prepStmtName
269  * (if not NULL) and sourceText have adequate lifetime.
270  *
271  * NB: this function mustn't do much beyond storing the passed values; in
272  * particular don't do anything that risks elog(ERROR). If that were to
273  * happen here before storing the cplan reference, we'd leak the plancache
274  * refcount that the caller is trying to hand off to us.
275  */
276 void
278  const char *prepStmtName,
279  const char *sourceText,
280  const char *commandTag,
281  List *stmts,
282  CachedPlan *cplan)
283 {
284  AssertArg(PortalIsValid(portal));
285  AssertState(portal->status == PORTAL_NEW);
286 
287  AssertArg(sourceText != NULL);
288  AssertArg(commandTag != NULL || stmts == NIL);
289 
290  portal->prepStmtName = prepStmtName;
291  portal->sourceText = sourceText;
292  portal->commandTag = commandTag;
293  portal->stmts = stmts;
294  portal->cplan = cplan;
295  portal->status = PORTAL_DEFINED;
296 }
297 
298 /*
299  * PortalReleaseCachedPlan
300  * Release a portal's reference to its cached plan, if any.
301  */
302 static void
304 {
305  if (portal->cplan)
306  {
307  ReleaseCachedPlan(portal->cplan, false);
308  portal->cplan = NULL;
309 
310  /*
311  * We must also clear portal->stmts which is now a dangling reference
312  * to the cached plan's plan list. This protects any code that might
313  * try to examine the Portal later.
314  */
315  portal->stmts = NIL;
316  }
317 }
318 
319 /*
320  * PortalCreateHoldStore
321  * Create the tuplestore for a portal.
322  */
323 void
325 {
326  MemoryContext oldcxt;
327 
328  Assert(portal->holdContext == NULL);
329  Assert(portal->holdStore == NULL);
330  Assert(portal->holdSnapshot == NULL);
331 
332  /*
333  * Create the memory context that is used for storage of the tuple set.
334  * Note this is NOT a child of the portal's heap memory.
335  */
336  portal->holdContext =
337  AllocSetContextCreate(PortalMemory,
338  "PortalHoldContext",
340 
341  /*
342  * Create the tuple store, selecting cross-transaction temp files, and
343  * enabling random access only if cursor requires scrolling.
344  *
345  * XXX: Should maintenance_work_mem be used for the portal size?
346  */
347  oldcxt = MemoryContextSwitchTo(portal->holdContext);
348 
349  portal->holdStore =
351  true, work_mem);
352 
353  MemoryContextSwitchTo(oldcxt);
354 }
355 
356 /*
357  * PinPortal
358  * Protect a portal from dropping.
359  *
360  * A pinned portal is still unpinned and dropped at transaction or
361  * subtransaction abort.
362  */
363 void
365 {
366  if (portal->portalPinned)
367  elog(ERROR, "portal already pinned");
368 
369  portal->portalPinned = true;
370 }
371 
372 void
374 {
375  if (!portal->portalPinned)
376  elog(ERROR, "portal not pinned");
377 
378  portal->portalPinned = false;
379 }
380 
381 /*
382  * MarkPortalActive
383  * Transition a portal from READY to ACTIVE state.
384  *
385  * NOTE: never set portal->status = PORTAL_ACTIVE directly; call this instead.
386  */
387 void
389 {
390  /* For safety, this is a runtime test not just an Assert */
391  if (portal->status != PORTAL_READY)
392  ereport(ERROR,
393  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
394  errmsg("portal \"%s\" cannot be run", portal->name)));
395  /* Perform the state transition */
396  portal->status = PORTAL_ACTIVE;
398 }
399 
400 /*
401  * MarkPortalDone
402  * Transition a portal from ACTIVE to DONE state.
403  *
404  * NOTE: never set portal->status = PORTAL_DONE directly; call this instead.
405  */
406 void
408 {
409  /* Perform the state transition */
410  Assert(portal->status == PORTAL_ACTIVE);
411  portal->status = PORTAL_DONE;
412 
413  /*
414  * Allow portalcmds.c to clean up the state it knows about. We might as
415  * well do that now, since the portal can't be executed any more.
416  *
417  * In some cases involving execution of a ROLLBACK command in an already
418  * aborted transaction, this prevents an assertion failure caused by
419  * reaching AtCleanup_Portals with the cleanup hook still unexecuted.
420  */
421  if (PointerIsValid(portal->cleanup))
422  {
423  (*portal->cleanup) (portal);
424  portal->cleanup = NULL;
425  }
426 }
427 
428 /*
429  * MarkPortalFailed
430  * Transition a portal into FAILED state.
431  *
432  * NOTE: never set portal->status = PORTAL_FAILED directly; call this instead.
433  */
434 void
436 {
437  /* Perform the state transition */
438  Assert(portal->status != PORTAL_DONE);
439  portal->status = PORTAL_FAILED;
440 
441  /*
442  * Allow portalcmds.c to clean up the state it knows about. We might as
443  * well do that now, since the portal can't be executed any more.
444  *
445  * In some cases involving cleanup of an already aborted transaction, this
446  * prevents an assertion failure caused by reaching AtCleanup_Portals with
447  * the cleanup hook still unexecuted.
448  */
449  if (PointerIsValid(portal->cleanup))
450  {
451  (*portal->cleanup) (portal);
452  portal->cleanup = NULL;
453  }
454 }
455 
456 /*
457  * PortalDrop
458  * Destroy the portal.
459  */
460 void
461 PortalDrop(Portal portal, bool isTopCommit)
462 {
463  AssertArg(PortalIsValid(portal));
464 
465  /*
466  * Don't allow dropping a pinned portal, it's still needed by whoever
467  * pinned it. Not sure if the PORTAL_ACTIVE case can validly happen or
468  * not...
469  */
470  if (portal->portalPinned ||
471  portal->status == PORTAL_ACTIVE)
472  ereport(ERROR,
473  (errcode(ERRCODE_INVALID_CURSOR_STATE),
474  errmsg("cannot drop active portal \"%s\"", portal->name)));
475 
476  /*
477  * Allow portalcmds.c to clean up the state it knows about, in particular
478  * shutting down the executor if still active. This step potentially runs
479  * user-defined code so failure has to be expected. It's the cleanup
480  * hook's responsibility to not try to do that more than once, in the case
481  * that failure occurs and then we come back to drop the portal again
482  * during transaction abort.
483  *
484  * Note: in most paths of control, this will have been done already in
485  * MarkPortalDone or MarkPortalFailed. We're just making sure.
486  */
487  if (PointerIsValid(portal->cleanup))
488  {
489  (*portal->cleanup) (portal);
490  portal->cleanup = NULL;
491  }
492 
493  /*
494  * Remove portal from hash table. Because we do this here, we will not
495  * come back to try to remove the portal again if there's any error in the
496  * subsequent steps. Better to leak a little memory than to get into an
497  * infinite error-recovery loop.
498  */
499  PortalHashTableDelete(portal);
500 
501  /* drop cached plan reference, if any */
502  PortalReleaseCachedPlan(portal);
503 
504  /*
505  * If portal has a snapshot protecting its data, release that. This needs
506  * a little care since the registration will be attached to the portal's
507  * resowner; if the portal failed, we will already have released the
508  * resowner (and the snapshot) during transaction abort.
509  */
510  if (portal->holdSnapshot)
511  {
512  if (portal->resowner)
514  portal->resowner);
515  portal->holdSnapshot = NULL;
516  }
517 
518  /*
519  * Release any resources still attached to the portal. There are several
520  * cases being covered here:
521  *
522  * Top transaction commit (indicated by isTopCommit): normally we should
523  * do nothing here and let the regular end-of-transaction resource
524  * releasing mechanism handle these resources too. However, if we have a
525  * FAILED portal (eg, a cursor that got an error), we'd better clean up
526  * its resources to avoid resource-leakage warning messages.
527  *
528  * Sub transaction commit: never comes here at all, since we don't kill
529  * any portals in AtSubCommit_Portals().
530  *
531  * Main or sub transaction abort: we will do nothing here because
532  * portal->resowner was already set NULL; the resources were already
533  * cleaned up in transaction abort.
534  *
535  * Ordinary portal drop: must release resources. However, if the portal
536  * is not FAILED then we do not release its locks. The locks become the
537  * responsibility of the transaction's ResourceOwner (since it is the
538  * parent of the portal's owner) and will be released when the transaction
539  * eventually ends.
540  */
541  if (portal->resowner &&
542  (!isTopCommit || portal->status == PORTAL_FAILED))
543  {
544  bool isCommit = (portal->status != PORTAL_FAILED);
545 
548  isCommit, false);
551  isCommit, false);
554  isCommit, false);
555  ResourceOwnerDelete(portal->resowner);
556  }
557  portal->resowner = NULL;
558 
559  /*
560  * Delete tuplestore if present. We should do this even under error
561  * conditions; since the tuplestore would have been using cross-
562  * transaction storage, its temp files need to be explicitly deleted.
563  */
564  if (portal->holdStore)
565  {
566  MemoryContext oldcontext;
567 
568  oldcontext = MemoryContextSwitchTo(portal->holdContext);
569  tuplestore_end(portal->holdStore);
570  MemoryContextSwitchTo(oldcontext);
571  portal->holdStore = NULL;
572  }
573 
574  /* delete tuplestore storage, if any */
575  if (portal->holdContext)
577 
578  /* release subsidiary storage */
580 
581  /* release portal struct (it's in PortalMemory) */
582  pfree(portal);
583 }
584 
585 /*
586  * Delete all declared cursors.
587  *
588  * Used by commands: CLOSE ALL, DISCARD ALL
589  */
590 void
592 {
594  PortalHashEnt *hentry;
595 
596  if (PortalHashTable == NULL)
597  return;
598 
599  hash_seq_init(&status, PortalHashTable);
600  while ((hentry = hash_seq_search(&status)) != NULL)
601  {
602  Portal portal = hentry->portal;
603 
604  /* Can't close the active portal (the one running the command) */
605  if (portal->status == PORTAL_ACTIVE)
606  continue;
607 
608  PortalDrop(portal, false);
609 
610  /* Restart the iteration in case that led to other drops */
611  hash_seq_term(&status);
612  hash_seq_init(&status, PortalHashTable);
613  }
614 }
615 
616 
617 /*
618  * Pre-commit processing for portals.
619  *
620  * Holdable cursors created in this transaction need to be converted to
621  * materialized form, since we are going to close down the executor and
622  * release locks. Non-holdable portals created in this transaction are
623  * simply removed. Portals remaining from prior transactions should be
624  * left untouched.
625  *
626  * Returns TRUE if any portals changed state (possibly causing user-defined
627  * code to be run), FALSE if not.
628  */
629 bool
630 PreCommit_Portals(bool isPrepare)
631 {
632  bool result = false;
634  PortalHashEnt *hentry;
635 
636  hash_seq_init(&status, PortalHashTable);
637 
638  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
639  {
640  Portal portal = hentry->portal;
641 
642  /*
643  * There should be no pinned portals anymore. Complain if someone
644  * leaked one.
645  */
646  if (portal->portalPinned)
647  elog(ERROR, "cannot commit while a portal is pinned");
648 
649  /*
650  * Do not touch active portals --- this can only happen in the case of
651  * a multi-transaction utility command, such as VACUUM.
652  *
653  * Note however that any resource owner attached to such a portal is
654  * still going to go away, so don't leave a dangling pointer.
655  */
656  if (portal->status == PORTAL_ACTIVE)
657  {
658  portal->resowner = NULL;
659  continue;
660  }
661 
662  /* Is it a holdable portal created in the current xact? */
663  if ((portal->cursorOptions & CURSOR_OPT_HOLD) &&
665  portal->status == PORTAL_READY)
666  {
667  /*
668  * We are exiting the transaction that created a holdable cursor.
669  * Instead of dropping the portal, prepare it for access by later
670  * transactions.
671  *
672  * However, if this is PREPARE TRANSACTION rather than COMMIT,
673  * refuse PREPARE, because the semantics seem pretty unclear.
674  */
675  if (isPrepare)
676  ereport(ERROR,
677  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
678  errmsg("cannot PREPARE a transaction that has created a cursor WITH HOLD")));
679 
680  /*
681  * Note that PersistHoldablePortal() must release all resources
682  * used by the portal that are local to the creating transaction.
683  */
684  PortalCreateHoldStore(portal);
685  PersistHoldablePortal(portal);
686 
687  /* drop cached plan reference, if any */
688  PortalReleaseCachedPlan(portal);
689 
690  /*
691  * Any resources belonging to the portal will be released in the
692  * upcoming transaction-wide cleanup; the portal will no longer
693  * have its own resources.
694  */
695  portal->resowner = NULL;
696 
697  /*
698  * Having successfully exported the holdable cursor, mark it as
699  * not belonging to this transaction.
700  */
703 
704  /* Report we changed state */
705  result = true;
706  }
707  else if (portal->createSubid == InvalidSubTransactionId)
708  {
709  /*
710  * Do nothing to cursors held over from a previous transaction
711  * (including ones we just froze in a previous cycle of this loop)
712  */
713  continue;
714  }
715  else
716  {
717  /* Zap all non-holdable portals */
718  PortalDrop(portal, true);
719 
720  /* Report we changed state */
721  result = true;
722  }
723 
724  /*
725  * After either freezing or dropping a portal, we have to restart the
726  * iteration, because we could have invoked user-defined code that
727  * caused a drop of the next portal in the hash chain.
728  */
729  hash_seq_term(&status);
730  hash_seq_init(&status, PortalHashTable);
731  }
732 
733  return result;
734 }
735 
736 /*
737  * Abort processing for portals.
738  *
739  * At this point we reset "active" status and run the cleanup hook if
740  * present, but we can't release the portal's memory until the cleanup call.
741  *
742  * The reason we need to reset active is so that we can replace the unnamed
743  * portal, else we'll fail to execute ROLLBACK when it arrives.
744  */
745 void
747 {
749  PortalHashEnt *hentry;
750 
751  hash_seq_init(&status, PortalHashTable);
752 
753  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
754  {
755  Portal portal = hentry->portal;
756 
757  /*
758  * See similar code in AtSubAbort_Portals(). This would fire if code
759  * orchestrating multiple top-level transactions within a portal, such
760  * as VACUUM, caught errors and continued under the same portal with a
761  * fresh transaction. No part of core PostgreSQL functions that way.
762  * XXX Such code would wish the portal to remain ACTIVE, as in
763  * PreCommit_Portals().
764  */
765  if (portal->status == PORTAL_ACTIVE)
766  MarkPortalFailed(portal);
767 
768  /*
769  * Do nothing else to cursors held over from a previous transaction.
770  */
771  if (portal->createSubid == InvalidSubTransactionId)
772  continue;
773 
774  /*
775  * If it was created in the current transaction, we can't do normal
776  * shutdown on a READY portal either; it might refer to objects
777  * created in the failed transaction. See comments in
778  * AtSubAbort_Portals.
779  */
780  if (portal->status == PORTAL_READY)
781  MarkPortalFailed(portal);
782 
783  /*
784  * Allow portalcmds.c to clean up the state it knows about, if we
785  * haven't already.
786  */
787  if (PointerIsValid(portal->cleanup))
788  {
789  (*portal->cleanup) (portal);
790  portal->cleanup = NULL;
791  }
792 
793  /* drop cached plan reference, if any */
794  PortalReleaseCachedPlan(portal);
795 
796  /*
797  * Any resources belonging to the portal will be released in the
798  * upcoming transaction-wide cleanup; they will be gone before we run
799  * PortalDrop.
800  */
801  portal->resowner = NULL;
802 
803  /*
804  * Although we can't delete the portal data structure proper, we can
805  * release any memory in subsidiary contexts, such as executor state.
806  * The cleanup hook was the last thing that might have needed data
807  * there.
808  */
810  }
811 }
812 
813 /*
814  * Post-abort cleanup for portals.
815  *
816  * Delete all portals not held over from prior transactions. */
817 void
819 {
821  PortalHashEnt *hentry;
822 
823  hash_seq_init(&status, PortalHashTable);
824 
825  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
826  {
827  Portal portal = hentry->portal;
828 
829  /* Do nothing to cursors held over from a previous transaction */
830  if (portal->createSubid == InvalidSubTransactionId)
831  {
832  Assert(portal->status != PORTAL_ACTIVE);
833  Assert(portal->resowner == NULL);
834  continue;
835  }
836 
837  /*
838  * If a portal is still pinned, forcibly unpin it. PortalDrop will not
839  * let us drop the portal otherwise. Whoever pinned the portal was
840  * interrupted by the abort too and won't try to use it anymore.
841  */
842  if (portal->portalPinned)
843  portal->portalPinned = false;
844 
845  /* We had better not be calling any user-defined code here */
846  Assert(portal->cleanup == NULL);
847 
848  /* Zap it. */
849  PortalDrop(portal, false);
850  }
851 }
852 
853 /*
854  * Pre-subcommit processing for portals.
855  *
856  * Reassign portals created or used in the current subtransaction to the
857  * parent subtransaction.
858  */
859 void
861  SubTransactionId parentSubid,
862  ResourceOwner parentXactOwner)
863 {
865  PortalHashEnt *hentry;
866 
867  hash_seq_init(&status, PortalHashTable);
868 
869  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
870  {
871  Portal portal = hentry->portal;
872 
873  if (portal->createSubid == mySubid)
874  {
875  portal->createSubid = parentSubid;
876  if (portal->resowner)
877  ResourceOwnerNewParent(portal->resowner, parentXactOwner);
878  }
879  if (portal->activeSubid == mySubid)
880  portal->activeSubid = parentSubid;
881  }
882 }
883 
884 /*
885  * Subtransaction abort handling for portals.
886  *
887  * Deactivate portals created or used during the failed subtransaction.
888  * Note that per AtSubCommit_Portals, this will catch portals created/used
889  * in descendants of the subtransaction too.
890  *
891  * We don't destroy any portals here; that's done in AtSubCleanup_Portals.
892  */
893 void
895  SubTransactionId parentSubid,
896  ResourceOwner myXactOwner,
897  ResourceOwner parentXactOwner)
898 {
900  PortalHashEnt *hentry;
901 
902  hash_seq_init(&status, PortalHashTable);
903 
904  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
905  {
906  Portal portal = hentry->portal;
907 
908  /* Was it created in this subtransaction? */
909  if (portal->createSubid != mySubid)
910  {
911  /* No, but maybe it was used in this subtransaction? */
912  if (portal->activeSubid == mySubid)
913  {
914  /* Maintain activeSubid until the portal is removed */
915  portal->activeSubid = parentSubid;
916 
917  /*
918  * A MarkPortalActive() caller ran an upper-level portal in
919  * this subtransaction and left the portal ACTIVE. This can't
920  * happen, but force the portal into FAILED state for the same
921  * reasons discussed below.
922  *
923  * We assume we can get away without forcing upper-level READY
924  * portals to fail, even if they were run and then suspended.
925  * In theory a suspended upper-level portal could have
926  * acquired some references to objects that are about to be
927  * destroyed, but there should be sufficient defenses against
928  * such cases: the portal's original query cannot contain such
929  * references, and any references within, say, cached plans of
930  * PL/pgSQL functions are not from active queries and should
931  * be protected by revalidation logic.
932  */
933  if (portal->status == PORTAL_ACTIVE)
934  MarkPortalFailed(portal);
935 
936  /*
937  * Also, if we failed it during the current subtransaction
938  * (either just above, or earlier), reattach its resource
939  * owner to the current subtransaction's resource owner, so
940  * that any resources it still holds will be released while
941  * cleaning up this subtransaction. This prevents some corner
942  * cases wherein we might get Asserts or worse while cleaning
943  * up objects created during the current subtransaction
944  * (because they're still referenced within this portal).
945  */
946  if (portal->status == PORTAL_FAILED && portal->resowner)
947  {
948  ResourceOwnerNewParent(portal->resowner, myXactOwner);
949  portal->resowner = NULL;
950  }
951  }
952  /* Done if it wasn't created in this subtransaction */
953  continue;
954  }
955 
956  /*
957  * Force any live portals of my own subtransaction into FAILED state.
958  * We have to do this because they might refer to objects created or
959  * changed in the failed subtransaction, leading to crashes within
960  * ExecutorEnd when portalcmds.c tries to close down the portal.
961  * Currently, every MarkPortalActive() caller ensures it updates the
962  * portal status again before relinquishing control, so ACTIVE can't
963  * happen here. If it does happen, dispose the portal like existing
964  * MarkPortalActive() callers would.
965  */
966  if (portal->status == PORTAL_READY ||
967  portal->status == PORTAL_ACTIVE)
968  MarkPortalFailed(portal);
969 
970  /*
971  * Allow portalcmds.c to clean up the state it knows about, if we
972  * haven't already.
973  */
974  if (PointerIsValid(portal->cleanup))
975  {
976  (*portal->cleanup) (portal);
977  portal->cleanup = NULL;
978  }
979 
980  /* drop cached plan reference, if any */
981  PortalReleaseCachedPlan(portal);
982 
983  /*
984  * Any resources belonging to the portal will be released in the
985  * upcoming transaction-wide cleanup; they will be gone before we run
986  * PortalDrop.
987  */
988  portal->resowner = NULL;
989 
990  /*
991  * Although we can't delete the portal data structure proper, we can
992  * release any memory in subsidiary contexts, such as executor state.
993  * The cleanup hook was the last thing that might have needed data
994  * there.
995  */
997  }
998 }
999 
1000 /*
1001  * Post-subabort cleanup for portals.
1002  *
1003  * Drop all portals created in the failed subtransaction (but note that
1004  * we will not drop any that were reassigned to the parent above).
1005  */
1006 void
1008 {
1010  PortalHashEnt *hentry;
1011 
1012  hash_seq_init(&status, PortalHashTable);
1013 
1014  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1015  {
1016  Portal portal = hentry->portal;
1017 
1018  if (portal->createSubid != mySubid)
1019  continue;
1020 
1021  /*
1022  * If a portal is still pinned, forcibly unpin it. PortalDrop will not
1023  * let us drop the portal otherwise. Whoever pinned the portal was
1024  * interrupted by the abort too and won't try to use it anymore.
1025  */
1026  if (portal->portalPinned)
1027  portal->portalPinned = false;
1028 
1029  /* We had better not be calling any user-defined code here */
1030  Assert(portal->cleanup == NULL);
1031 
1032  /* Zap it. */
1033  PortalDrop(portal, false);
1034  }
1035 }
1036 
1037 /* Find all available cursors */
1038 Datum
1040 {
1041  ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
1042  TupleDesc tupdesc;
1043  Tuplestorestate *tupstore;
1044  MemoryContext per_query_ctx;
1045  MemoryContext oldcontext;
1046  HASH_SEQ_STATUS hash_seq;
1047  PortalHashEnt *hentry;
1048 
1049  /* check to see if caller supports us returning a tuplestore */
1050  if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
1051  ereport(ERROR,
1052  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1053  errmsg("set-valued function called in context that cannot accept a set")));
1054  if (!(rsinfo->allowedModes & SFRM_Materialize))
1055  ereport(ERROR,
1056  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1057  errmsg("materialize mode required, but it is not " \
1058  "allowed in this context")));
1059 
1060  /* need to build tuplestore in query context */
1061  per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
1062  oldcontext = MemoryContextSwitchTo(per_query_ctx);
1063 
1064  /*
1065  * build tupdesc for result tuples. This must match the definition of the
1066  * pg_cursors view in system_views.sql
1067  */
1068  tupdesc = CreateTemplateTupleDesc(6, false);
1069  TupleDescInitEntry(tupdesc, (AttrNumber) 1, "name",
1070  TEXTOID, -1, 0);
1071  TupleDescInitEntry(tupdesc, (AttrNumber) 2, "statement",
1072  TEXTOID, -1, 0);
1073  TupleDescInitEntry(tupdesc, (AttrNumber) 3, "is_holdable",
1074  BOOLOID, -1, 0);
1075  TupleDescInitEntry(tupdesc, (AttrNumber) 4, "is_binary",
1076  BOOLOID, -1, 0);
1077  TupleDescInitEntry(tupdesc, (AttrNumber) 5, "is_scrollable",
1078  BOOLOID, -1, 0);
1079  TupleDescInitEntry(tupdesc, (AttrNumber) 6, "creation_time",
1080  TIMESTAMPTZOID, -1, 0);
1081 
1082  /*
1083  * We put all the tuples into a tuplestore in one scan of the hashtable.
1084  * This avoids any issue of the hashtable possibly changing between calls.
1085  */
1086  tupstore =
1088  false, work_mem);
1089 
1090  /* generate junk in short-term context */
1091  MemoryContextSwitchTo(oldcontext);
1092 
1093  hash_seq_init(&hash_seq, PortalHashTable);
1094  while ((hentry = hash_seq_search(&hash_seq)) != NULL)
1095  {
1096  Portal portal = hentry->portal;
1097  Datum values[6];
1098  bool nulls[6];
1099 
1100  /* report only "visible" entries */
1101  if (!portal->visible)
1102  continue;
1103 
1104  MemSet(nulls, 0, sizeof(nulls));
1105 
1106  values[0] = CStringGetTextDatum(portal->name);
1107  values[1] = CStringGetTextDatum(portal->sourceText);
1108  values[2] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_HOLD);
1109  values[3] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_BINARY);
1110  values[4] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_SCROLL);
1111  values[5] = TimestampTzGetDatum(portal->creation_time);
1112 
1113  tuplestore_putvalues(tupstore, tupdesc, values, nulls);
1114  }
1115 
1116  /* clean up and return the tuplestore */
1117  tuplestore_donestoring(tupstore);
1118 
1119  rsinfo->returnMode = SFRM_Materialize;
1120  rsinfo->setResult = tupstore;
1121  rsinfo->setDesc = tupdesc;
1122 
1123  return (Datum) 0;
1124 }
1125 
1126 bool
1128 {
1130  PortalHashEnt *hentry;
1131 
1132  hash_seq_init(&status, PortalHashTable);
1133 
1134  while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1135  {
1136  Portal portal = hentry->portal;
1137 
1138  if (portal->status == PORTAL_READY)
1139  return false;
1140  }
1141 
1142  return true;
1143 }
void tuplestore_putvalues(Tuplestorestate *state, TupleDesc tdesc, Datum *values, bool *isnull)
Definition: tuplestore.c:750
#define TIMESTAMPTZOID
Definition: pg_type.h:525
void UnpinPortal(Portal portal)
Definition: portalmem.c:373
#define NIL
Definition: pg_list.h:69
Portal CreatePortal(const char *name, bool allowDup, bool dupSilent)
Definition: portalmem.c:174
#define IsA(nodeptr, _type_)
Definition: nodes.h:560
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:200
static void PortalReleaseCachedPlan(Portal portal)
Definition: portalmem.c:303
#define AssertState(condition)
Definition: c.h:678
Portal CreateNewPortal(void)
Definition: portalmem.c:230
MemoryContext heap
Definition: portal.h:119
bool atEnd
Definition: portal.h:187
#define HASH_ELEM
Definition: hsearch.h:87
bool visible
Definition: portal.h:192
void PortalDefineQuery(Portal portal, const char *prepStmtName, const char *sourceText, const char *commandTag, List *stmts, CachedPlan *cplan)
Definition: portalmem.c:277
#define TEXTOID
Definition: pg_type.h:324
#define PORTALS_PER_USER
Definition: portalmem.c:37
Portal GetPortalByName(const char *name)
Definition: portalmem.c:129
#define CURSOR_OPT_BINARY
Definition: parsenodes.h:2614
ResourceOwner CurTransactionResourceOwner
Definition: resowner.c:139
#define ALLOCSET_SMALL_SIZES
Definition: memutils.h:175
void MarkPortalActive(Portal portal)
Definition: portalmem.c:388
#define CURSOR_OPT_HOLD
Definition: parsenodes.h:2618
#define tuplestore_donestoring(state)
Definition: tuplestore.h:60
void ResourceOwnerDelete(ResourceOwner owner)
Definition: resowner.c:687
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
Size entrysize
Definition: hsearch.h:73
CachedPlan * cplan
Definition: portal.h:137
int errcode(int sqlerrcode)
Definition: elog.c:575
Datum pg_cursor(PG_FUNCTION_ARGS)
Definition: portalmem.c:1039
#define MemSet(start, val, len)
Definition: c.h:857
MemoryContext holdContext
Definition: portal.h:165
return result
Definition: formatting.c:1633
uint32 SubTransactionId
Definition: c.h:401
void PortalCleanup(Portal portal)
Definition: portalcmds.c:265
List * stmts
Definition: portal.h:136
#define CURSOR_OPT_NO_SCROLL
Definition: parsenodes.h:2616
void AtCleanup_Portals(void)
Definition: portalmem.c:818
void AtAbort_Portals(void)
Definition: portalmem.c:746
Definition: dynahash.c:193
#define PortalHashTableDelete(PORTAL)
Definition: portalmem.c:80
void pfree(void *pointer)
Definition: mcxt.c:950
const char * name
Definition: portal.h:117
PlannedStmt * PortalGetPrimaryStmt(Portal portal)
Definition: portalmem.c:150
bool ThereAreNoReadyPortals(void)
Definition: portalmem.c:1127
#define ERROR
Definition: elog.h:43
#define TimestampTzGetDatum(X)
Definition: timestamp.h:32
const char * commandTag
Definition: portal.h:135
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:165
#define lfirst_node(type, lc)
Definition: pg_list.h:109
void MarkPortalDone(Portal portal)
Definition: portalmem.c:407
struct PortalData * Portal
Definition: portal.h:112
void PinPortal(Portal portal)
Definition: portalmem.c:364
bool portalPinned
Definition: portal.h:149
void ReleaseCachedPlan(CachedPlan *plan, bool useResOwner)
Definition: plancache.c:1256
PortalStrategy strategy
Definition: portal.h:143
void AtSubCleanup_Portals(SubTransactionId mySubid)
Definition: portalmem.c:1007
void TupleDescInitEntry(TupleDesc desc, AttrNumber attributeNumber, const char *attributeName, Oid oidtypeid, int32 typmod, int attdim)
Definition: tupdesc.c:497
SubTransactionId createSubid
Definition: portal.h:130
#define ereport(elevel, rest)
Definition: elog.h:122
void(* cleanup)(Portal portal)
Definition: portal.h:121
#define AssertArg(condition)
Definition: c.h:677
MemoryContext TopMemoryContext
Definition: mcxt.c:43
Portal portal
Definition: portalmem.c:50
TimestampTz creation_time
Definition: portal.h:191
char portalname[MAX_PORTALNAME_LEN]
Definition: portalmem.c:49
#define WARNING
Definition: elog.h:40
void MemoryContextDeleteChildren(MemoryContext context)
Definition: mcxt.c:236
Tuplestorestate * tuplestore_begin_heap(bool randomAccess, bool interXact, int maxKBytes)
Definition: tuplestore.c:318
#define PortalIsValid(p)
Definition: portal.h:199
bool canSetTag
Definition: plannodes.h:53
MemoryContext AllocSetContextCreate(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition: aset.c:322
uintptr_t Datum
Definition: postgres.h:372
HTAB * hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
Definition: dynahash.c:301
SubTransactionId activeSubid
Definition: portal.h:131
static MemoryContext PortalMemory
Definition: portalmem.c:90
Size keysize
Definition: hsearch.h:72
int work_mem
Definition: globals.c:113
const char * sourceText
Definition: portal.h:134
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:742
void EnablePortalManager(void)
Definition: portalmem.c:103
#define BoolGetDatum(X)
Definition: postgres.h:408
int allowedModes
Definition: execnodes.h:268
void PortalHashTableDeleteAll(void)
Definition: portalmem.c:591
SetFunctionReturnMode returnMode
Definition: execnodes.h:270
struct portalhashent PortalHashEnt
#define NULL
Definition: c.h:229
#define Assert(condition)
Definition: c.h:675
void ResourceOwnerRelease(ResourceOwner owner, ResourceReleasePhase phase, bool isCommit, bool isTopLevel)
Definition: resowner.c:471
SubTransactionId GetCurrentSubTransactionId(void)
Definition: xact.c:649
void AtSubAbort_Portals(SubTransactionId mySubid, SubTransactionId parentSubid, ResourceOwner myXactOwner, ResourceOwner parentXactOwner)
Definition: portalmem.c:894
#define PortalGetHeapMemory(portal)
Definition: portal.h:205
void UnregisterSnapshotFromOwner(Snapshot snapshot, ResourceOwner owner)
Definition: snapmgr.c:918
void tuplestore_end(Tuplestorestate *state)
Definition: tuplestore.c:453
Snapshot holdSnapshot
Definition: portal.h:175
void PersistHoldablePortal(Portal portal)
Definition: portalcmds.c:326
void MarkPortalFailed(Portal portal)
Definition: portalmem.c:435
PortalStatus status
Definition: portal.h:148
#define InvalidSubTransactionId
Definition: c.h:403
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1351
#define BOOLOID
Definition: pg_type.h:288
MemoryContext ecxt_per_query_memory
Definition: execnodes.h:202
TupleDesc CreateTemplateTupleDesc(int natts, bool hasoid)
Definition: tupdesc.c:41
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1341
void PortalCreateHoldStore(Portal portal)
Definition: portalmem.c:324
const char * name
Definition: encode.c:521
bool PreCommit_Portals(bool isPrepare)
Definition: portalmem.c:630
Tuplestorestate * setResult
Definition: execnodes.h:273
void ResourceOwnerNewParent(ResourceOwner owner, ResourceOwner newparent)
Definition: resowner.c:745
static Datum values[MAXATTR]
Definition: bootstrap.c:163
ResourceOwner resowner
Definition: portal.h:120
ExprContext * econtext
Definition: execnodes.h:266
bool atStart
Definition: portal.h:186
#define MAX_PORTALNAME_LEN
Definition: portalmem.c:45
TupleDesc setDesc
Definition: execnodes.h:274
Tuplestorestate * holdStore
Definition: portal.h:164
int errmsg(const char *fmt,...)
Definition: elog.c:797
void PortalDrop(Portal portal, bool isTopCommit)
Definition: portalmem.c:461
#define CURSOR_OPT_SCROLL
Definition: parsenodes.h:2615
#define CStringGetTextDatum(s)
Definition: builtins.h:91
static HTAB * PortalHashTable
Definition: portalmem.c:53
#define PG_FUNCTION_ARGS
Definition: fmgr.h:158
#define elog
Definition: elog.h:219
int cursorOptions
Definition: portal.h:144
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:224
#define PortalHashTableLookup(NAME, PORTAL)
Definition: portalmem.c:55
#define PortalHashTableInsert(PORTAL, NAME)
Definition: portalmem.c:67
Definition: pg_list.h:45
#define PointerIsValid(pointer)
Definition: c.h:526
int16 AttrNumber
Definition: attnum.h:21
void hash_seq_term(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1427
void AtSubCommit_Portals(SubTransactionId mySubid, SubTransactionId parentSubid, ResourceOwner parentXactOwner)
Definition: portalmem.c:860
const char * prepStmtName
Definition: portal.h:118
TimestampTz GetCurrentStatementStartTimestamp(void)
Definition: xact.c:717
ResourceOwner ResourceOwnerCreate(ResourceOwner parent, const char *name)
Definition: resowner.c:416