PostgreSQL Source Code git master
portalmem.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * portalmem.c
4 * backend portal memory management
5 *
6 * Portals are objects representing the execution state of a query.
7 * This module provides memory management services for portals, but it
8 * doesn't actually run the executor for them.
9 *
10 *
11 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
12 * Portions Copyright (c) 1994, Regents of the University of California
13 *
14 * IDENTIFICATION
15 * src/backend/utils/mmgr/portalmem.c
16 *
17 *-------------------------------------------------------------------------
18 */
19#include "postgres.h"
20
21#include "access/xact.h"
22#include "commands/portalcmds.h"
23#include "funcapi.h"
24#include "miscadmin.h"
25#include "storage/ipc.h"
26#include "utils/builtins.h"
27#include "utils/memutils.h"
28#include "utils/snapmgr.h"
29#include "utils/timestamp.h"
30
31/*
32 * Estimate of the maximum number of open portals a user would have,
33 * used in initially sizing the PortalHashTable in EnablePortalManager().
34 * Since the hash table can expand, there's no need to make this overly
35 * generous, and keeping it small avoids unnecessary overhead in the
36 * hash_seq_search() calls executed during transaction end.
37 */
38#define PORTALS_PER_USER 16
39
40
41/* ----------------
42 * Global state
43 * ----------------
44 */
45
46#define MAX_PORTALNAME_LEN NAMEDATALEN
47
48typedef struct portalhashent
49{
53
54static HTAB *PortalHashTable = NULL;
55
56#define PortalHashTableLookup(NAME, PORTAL) \
57do { \
58 PortalHashEnt *hentry; \
59 \
60 hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
61 (NAME), HASH_FIND, NULL); \
62 if (hentry) \
63 PORTAL = hentry->portal; \
64 else \
65 PORTAL = NULL; \
66} while(0)
67
68#define PortalHashTableInsert(PORTAL, NAME) \
69do { \
70 PortalHashEnt *hentry; bool found; \
71 \
72 hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
73 (NAME), HASH_ENTER, &found); \
74 if (found) \
75 elog(ERROR, "duplicate portal name"); \
76 hentry->portal = PORTAL; \
77 /* To avoid duplicate storage, make PORTAL->name point to htab entry */ \
78 PORTAL->name = hentry->portalname; \
79} while(0)
80
81#define PortalHashTableDelete(PORTAL) \
82do { \
83 PortalHashEnt *hentry; \
84 \
85 hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
86 PORTAL->name, HASH_REMOVE, NULL); \
87 if (hentry == NULL) \
88 elog(WARNING, "trying to delete portal name that does not exist"); \
89} while(0)
90
92
93
94/* ----------------------------------------------------------------
95 * public portal interface functions
96 * ----------------------------------------------------------------
97 */
98
99/*
100 * EnablePortalManager
101 * Enables the portal management module at backend startup.
102 */
103void
105{
106 HASHCTL ctl;
107
108 Assert(TopPortalContext == NULL);
109
111 "TopPortalContext",
113
114 ctl.keysize = MAX_PORTALNAME_LEN;
115 ctl.entrysize = sizeof(PortalHashEnt);
116
117 /*
118 * use PORTALS_PER_USER as a guess of how many hash table entries to
119 * create, initially
120 */
123}
124
125/*
126 * GetPortalByName
127 * Returns a portal given a portal name, or NULL if name not found.
128 */
129Portal
131{
132 Portal portal;
133
134 if (PointerIsValid(name))
136 else
137 portal = NULL;
138
139 return portal;
140}
141
142/*
143 * PortalGetPrimaryStmt
144 * Get the "primary" stmt within a portal, ie, the one marked canSetTag.
145 *
146 * Returns NULL if no such stmt. If multiple PlannedStmt structs within the
147 * portal are marked canSetTag, returns the first one. Neither of these
148 * cases should occur in present usages of this function.
149 */
152{
153 ListCell *lc;
154
155 foreach(lc, portal->stmts)
156 {
158
159 if (stmt->canSetTag)
160 return stmt;
161 }
162 return NULL;
163}
164
165/*
166 * CreatePortal
167 * Returns a new portal given a name.
168 *
169 * allowDup: if true, automatically drop any pre-existing portal of the
170 * same name (if false, an error is raised).
171 *
172 * dupSilent: if true, don't even emit a WARNING.
173 */
174Portal
175CreatePortal(const char *name, bool allowDup, bool dupSilent)
176{
177 Portal portal;
178
180
181 portal = GetPortalByName(name);
182 if (PortalIsValid(portal))
183 {
184 if (!allowDup)
186 (errcode(ERRCODE_DUPLICATE_CURSOR),
187 errmsg("cursor \"%s\" already exists", name)));
188 if (!dupSilent)
190 (errcode(ERRCODE_DUPLICATE_CURSOR),
191 errmsg("closing existing cursor \"%s\"",
192 name)));
193 PortalDrop(portal, false);
194 }
195
196 /* make new portal structure */
197 portal = (Portal) MemoryContextAllocZero(TopPortalContext, sizeof *portal);
198
199 /* initialize portal context; typically it won't store much */
201 "PortalContext",
203
204 /* create a resource owner for the portal */
206 "Portal");
207
208 /* initialize portal fields that don't start off zero */
209 portal->status = PORTAL_NEW;
210 portal->cleanup = PortalCleanup;
212 portal->activeSubid = portal->createSubid;
216 portal->atStart = true;
217 portal->atEnd = true; /* disallow fetches until query is set */
218 portal->visible = true;
220
221 /* put portal in table (sets portal->name) */
223
224 /* for named portals reuse portal->name copy */
225 MemoryContextSetIdentifier(portal->portalContext, portal->name[0] ? portal->name : "<unnamed>");
226
227 return portal;
228}
229
230/*
231 * CreateNewPortal
232 * Create a new portal, assigning it a random nonconflicting name.
233 */
234Portal
236{
237 static unsigned int unnamed_portal_count = 0;
238
239 char portalname[MAX_PORTALNAME_LEN];
240
241 /* Select a nonconflicting name */
242 for (;;)
243 {
244 unnamed_portal_count++;
245 sprintf(portalname, "<unnamed portal %u>", unnamed_portal_count);
246 if (GetPortalByName(portalname) == NULL)
247 break;
248 }
249
250 return CreatePortal(portalname, false, false);
251}
252
253/*
254 * PortalDefineQuery
255 * A simple subroutine to establish a portal's query.
256 *
257 * Notes: as of PG 8.4, caller MUST supply a sourceText string; it is not
258 * allowed anymore to pass NULL. (If you really don't have source text,
259 * you can pass a constant string, perhaps "(query not available)".)
260 *
261 * commandTag shall be NULL if and only if the original query string
262 * (before rewriting) was an empty string. Also, the passed commandTag must
263 * be a pointer to a constant string, since it is not copied.
264 *
265 * If cplan is provided, then it is a cached plan containing the stmts, and
266 * the caller must have done GetCachedPlan(), causing a refcount increment.
267 * The refcount will be released when the portal is destroyed.
268 *
269 * If cplan is NULL, then it is the caller's responsibility to ensure that
270 * the passed plan trees have adequate lifetime. Typically this is done by
271 * copying them into the portal's context.
272 *
273 * The caller is also responsible for ensuring that the passed prepStmtName
274 * (if not NULL) and sourceText have adequate lifetime.
275 *
276 * NB: this function mustn't do much beyond storing the passed values; in
277 * particular don't do anything that risks elog(ERROR). If that were to
278 * happen here before storing the cplan reference, we'd leak the plancache
279 * refcount that the caller is trying to hand off to us.
280 */
281void
283 const char *prepStmtName,
284 const char *sourceText,
285 CommandTag commandTag,
286 List *stmts,
287 CachedPlan *cplan,
288 CachedPlanSource *plansource)
289{
290 Assert(PortalIsValid(portal));
291 Assert(portal->status == PORTAL_NEW);
292
293 Assert(sourceText != NULL);
294 Assert(commandTag != CMDTAG_UNKNOWN || stmts == NIL);
295
296 portal->prepStmtName = prepStmtName;
297 portal->sourceText = sourceText;
298 portal->qc.commandTag = commandTag;
299 portal->qc.nprocessed = 0;
300 portal->commandTag = commandTag;
301 portal->stmts = stmts;
302 portal->cplan = cplan;
303 portal->plansource = plansource;
304 portal->status = PORTAL_DEFINED;
305}
306
307/*
308 * PortalReleaseCachedPlan
309 * Release a portal's reference to its cached plan, if any.
310 */
311static void
313{
314 if (portal->cplan)
315 {
316 ReleaseCachedPlan(portal->cplan, NULL);
317 portal->cplan = NULL;
318
319 /*
320 * We must also clear portal->stmts which is now a dangling reference
321 * to the cached plan's plan list. This protects any code that might
322 * try to examine the Portal later.
323 */
324 portal->stmts = NIL;
325 }
326}
327
328/*
329 * PortalCreateHoldStore
330 * Create the tuplestore for a portal.
331 */
332void
334{
335 MemoryContext oldcxt;
336
337 Assert(portal->holdContext == NULL);
338 Assert(portal->holdStore == NULL);
339 Assert(portal->holdSnapshot == NULL);
340
341 /*
342 * Create the memory context that is used for storage of the tuple set.
343 * Note this is NOT a child of the portal's portalContext.
344 */
345 portal->holdContext =
347 "PortalHoldContext",
349
350 /*
351 * Create the tuple store, selecting cross-transaction temp files, and
352 * enabling random access only if cursor requires scrolling.
353 *
354 * XXX: Should maintenance_work_mem be used for the portal size?
355 */
356 oldcxt = MemoryContextSwitchTo(portal->holdContext);
357
358 portal->holdStore =
360 true, work_mem);
361
362 MemoryContextSwitchTo(oldcxt);
363}
364
365/*
366 * PinPortal
367 * Protect a portal from dropping.
368 *
369 * A pinned portal is still unpinned and dropped at transaction or
370 * subtransaction abort.
371 */
372void
374{
375 if (portal->portalPinned)
376 elog(ERROR, "portal already pinned");
377
378 portal->portalPinned = true;
379}
380
381void
383{
384 if (!portal->portalPinned)
385 elog(ERROR, "portal not pinned");
386
387 portal->portalPinned = false;
388}
389
390/*
391 * MarkPortalActive
392 * Transition a portal from READY to ACTIVE state.
393 *
394 * NOTE: never set portal->status = PORTAL_ACTIVE directly; call this instead.
395 */
396void
398{
399 /* For safety, this is a runtime test not just an Assert */
400 if (portal->status != PORTAL_READY)
402 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
403 errmsg("portal \"%s\" cannot be run", portal->name)));
404 /* Perform the state transition */
405 portal->status = PORTAL_ACTIVE;
407}
408
409/*
410 * MarkPortalDone
411 * Transition a portal from ACTIVE to DONE state.
412 *
413 * NOTE: never set portal->status = PORTAL_DONE directly; call this instead.
414 */
415void
417{
418 /* Perform the state transition */
419 Assert(portal->status == PORTAL_ACTIVE);
420 portal->status = PORTAL_DONE;
421
422 /*
423 * Allow portalcmds.c to clean up the state it knows about. We might as
424 * well do that now, since the portal can't be executed any more.
425 *
426 * In some cases involving execution of a ROLLBACK command in an already
427 * aborted transaction, this is necessary, or we'd reach AtCleanup_Portals
428 * with the cleanup hook still unexecuted.
429 */
430 if (PointerIsValid(portal->cleanup))
431 {
432 portal->cleanup(portal);
433 portal->cleanup = NULL;
434 }
435}
436
437/*
438 * MarkPortalFailed
439 * Transition a portal into FAILED state.
440 *
441 * NOTE: never set portal->status = PORTAL_FAILED directly; call this instead.
442 */
443void
445{
446 /* Perform the state transition */
447 Assert(portal->status != PORTAL_DONE);
448 portal->status = PORTAL_FAILED;
449
450 /*
451 * Allow portalcmds.c to clean up the state it knows about. We might as
452 * well do that now, since the portal can't be executed any more.
453 *
454 * In some cases involving cleanup of an already aborted transaction, this
455 * is necessary, or we'd reach AtCleanup_Portals with the cleanup hook
456 * still unexecuted.
457 */
458 if (PointerIsValid(portal->cleanup))
459 {
460 portal->cleanup(portal);
461 portal->cleanup = NULL;
462 }
463}
464
465/*
466 * PortalDrop
467 * Destroy the portal.
468 */
469void
470PortalDrop(Portal portal, bool isTopCommit)
471{
472 Assert(PortalIsValid(portal));
473
474 /*
475 * Don't allow dropping a pinned portal, it's still needed by whoever
476 * pinned it.
477 */
478 if (portal->portalPinned)
480 (errcode(ERRCODE_INVALID_CURSOR_STATE),
481 errmsg("cannot drop pinned portal \"%s\"", portal->name)));
482
483 /*
484 * Not sure if the PORTAL_ACTIVE case can validly happen or not...
485 */
486 if (portal->status == PORTAL_ACTIVE)
488 (errcode(ERRCODE_INVALID_CURSOR_STATE),
489 errmsg("cannot drop active portal \"%s\"", portal->name)));
490
491 /*
492 * Allow portalcmds.c to clean up the state it knows about, in particular
493 * shutting down the executor if still active. This step potentially runs
494 * user-defined code so failure has to be expected. It's the cleanup
495 * hook's responsibility to not try to do that more than once, in the case
496 * that failure occurs and then we come back to drop the portal again
497 * during transaction abort.
498 *
499 * Note: in most paths of control, this will have been done already in
500 * MarkPortalDone or MarkPortalFailed. We're just making sure.
501 */
502 if (PointerIsValid(portal->cleanup))
503 {
504 portal->cleanup(portal);
505 portal->cleanup = NULL;
506 }
507
508 /* There shouldn't be an active snapshot anymore, except after error */
509 Assert(portal->portalSnapshot == NULL || !isTopCommit);
510
511 /*
512 * Remove portal from hash table. Because we do this here, we will not
513 * come back to try to remove the portal again if there's any error in the
514 * subsequent steps. Better to leak a little memory than to get into an
515 * infinite error-recovery loop.
516 */
517 PortalHashTableDelete(portal);
518
519 /* drop cached plan reference, if any */
521
522 /*
523 * If portal has a snapshot protecting its data, release that. This needs
524 * a little care since the registration will be attached to the portal's
525 * resowner; if the portal failed, we will already have released the
526 * resowner (and the snapshot) during transaction abort.
527 */
528 if (portal->holdSnapshot)
529 {
530 if (portal->resowner)
532 portal->resowner);
533 portal->holdSnapshot = NULL;
534 }
535
536 /*
537 * Release any resources still attached to the portal. There are several
538 * cases being covered here:
539 *
540 * Top transaction commit (indicated by isTopCommit): normally we should
541 * do nothing here and let the regular end-of-transaction resource
542 * releasing mechanism handle these resources too. However, if we have a
543 * FAILED portal (eg, a cursor that got an error), we'd better clean up
544 * its resources to avoid resource-leakage warning messages.
545 *
546 * Sub transaction commit: never comes here at all, since we don't kill
547 * any portals in AtSubCommit_Portals().
548 *
549 * Main or sub transaction abort: we will do nothing here because
550 * portal->resowner was already set NULL; the resources were already
551 * cleaned up in transaction abort.
552 *
553 * Ordinary portal drop: must release resources. However, if the portal
554 * is not FAILED then we do not release its locks. The locks become the
555 * responsibility of the transaction's ResourceOwner (since it is the
556 * parent of the portal's owner) and will be released when the transaction
557 * eventually ends.
558 */
559 if (portal->resowner &&
560 (!isTopCommit || portal->status == PORTAL_FAILED))
561 {
562 bool isCommit = (portal->status != PORTAL_FAILED);
563
566 isCommit, false);
569 isCommit, false);
572 isCommit, false);
574 }
575 portal->resowner = NULL;
576
577 /*
578 * Delete tuplestore if present. We should do this even under error
579 * conditions; since the tuplestore would have been using cross-
580 * transaction storage, its temp files need to be explicitly deleted.
581 */
582 if (portal->holdStore)
583 {
584 MemoryContext oldcontext;
585
586 oldcontext = MemoryContextSwitchTo(portal->holdContext);
587 tuplestore_end(portal->holdStore);
588 MemoryContextSwitchTo(oldcontext);
589 portal->holdStore = NULL;
590 }
591
592 /* delete tuplestore storage, if any */
593 if (portal->holdContext)
595
596 /* release subsidiary storage */
598
599 /* release portal struct (it's in TopPortalContext) */
600 pfree(portal);
601}
602
603/*
604 * Delete all declared cursors.
605 *
606 * Used by commands: CLOSE ALL, DISCARD ALL
607 */
608void
610{
611 HASH_SEQ_STATUS status;
612 PortalHashEnt *hentry;
613
614 if (PortalHashTable == NULL)
615 return;
616
618 while ((hentry = hash_seq_search(&status)) != NULL)
619 {
620 Portal portal = hentry->portal;
621
622 /* Can't close the active portal (the one running the command) */
623 if (portal->status == PORTAL_ACTIVE)
624 continue;
625
626 PortalDrop(portal, false);
627
628 /* Restart the iteration in case that led to other drops */
629 hash_seq_term(&status);
631 }
632}
633
634/*
635 * "Hold" a portal. Prepare it for access by later transactions.
636 */
637static void
639{
640 /*
641 * Note that PersistHoldablePortal() must release all resources used by
642 * the portal that are local to the creating transaction.
643 */
644 PortalCreateHoldStore(portal);
645 PersistHoldablePortal(portal);
646
647 /* drop cached plan reference, if any */
649
650 /*
651 * Any resources belonging to the portal will be released in the upcoming
652 * transaction-wide cleanup; the portal will no longer have its own
653 * resources.
654 */
655 portal->resowner = NULL;
656
657 /*
658 * Having successfully exported the holdable cursor, mark it as not
659 * belonging to this transaction.
660 */
663 portal->createLevel = 0;
664}
665
666/*
667 * Pre-commit processing for portals.
668 *
669 * Holdable cursors created in this transaction need to be converted to
670 * materialized form, since we are going to close down the executor and
671 * release locks. Non-holdable portals created in this transaction are
672 * simply removed. Portals remaining from prior transactions should be
673 * left untouched.
674 *
675 * Returns true if any portals changed state (possibly causing user-defined
676 * code to be run), false if not.
677 */
678bool
679PreCommit_Portals(bool isPrepare)
680{
681 bool result = false;
682 HASH_SEQ_STATUS status;
683 PortalHashEnt *hentry;
684
686
687 while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
688 {
689 Portal portal = hentry->portal;
690
691 /*
692 * There should be no pinned portals anymore. Complain if someone
693 * leaked one. Auto-held portals are allowed; we assume that whoever
694 * pinned them is managing them.
695 */
696 if (portal->portalPinned && !portal->autoHeld)
697 elog(ERROR, "cannot commit while a portal is pinned");
698
699 /*
700 * Do not touch active portals --- this can only happen in the case of
701 * a multi-transaction utility command, such as VACUUM, or a commit in
702 * a procedure.
703 *
704 * Note however that any resource owner attached to such a portal is
705 * still going to go away, so don't leave a dangling pointer. Also
706 * unregister any snapshots held by the portal, mainly to avoid
707 * snapshot leak warnings from ResourceOwnerRelease().
708 */
709 if (portal->status == PORTAL_ACTIVE)
710 {
711 if (portal->holdSnapshot)
712 {
713 if (portal->resowner)
715 portal->resowner);
716 portal->holdSnapshot = NULL;
717 }
718 portal->resowner = NULL;
719 /* Clear portalSnapshot too, for cleanliness */
720 portal->portalSnapshot = NULL;
721 continue;
722 }
723
724 /* Is it a holdable portal created in the current xact? */
725 if ((portal->cursorOptions & CURSOR_OPT_HOLD) &&
727 portal->status == PORTAL_READY)
728 {
729 /*
730 * We are exiting the transaction that created a holdable cursor.
731 * Instead of dropping the portal, prepare it for access by later
732 * transactions.
733 *
734 * However, if this is PREPARE TRANSACTION rather than COMMIT,
735 * refuse PREPARE, because the semantics seem pretty unclear.
736 */
737 if (isPrepare)
739 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
740 errmsg("cannot PREPARE a transaction that has created a cursor WITH HOLD")));
741
742 HoldPortal(portal);
743
744 /* Report we changed state */
745 result = true;
746 }
747 else if (portal->createSubid == InvalidSubTransactionId)
748 {
749 /*
750 * Do nothing to cursors held over from a previous transaction
751 * (including ones we just froze in a previous cycle of this loop)
752 */
753 continue;
754 }
755 else
756 {
757 /* Zap all non-holdable portals */
758 PortalDrop(portal, true);
759
760 /* Report we changed state */
761 result = true;
762 }
763
764 /*
765 * After either freezing or dropping a portal, we have to restart the
766 * iteration, because we could have invoked user-defined code that
767 * caused a drop of the next portal in the hash chain.
768 */
769 hash_seq_term(&status);
771 }
772
773 return result;
774}
775
776/*
777 * Abort processing for portals.
778 *
779 * At this point we run the cleanup hook if present, but we can't release the
780 * portal's memory until the cleanup call.
781 */
782void
784{
785 HASH_SEQ_STATUS status;
786 PortalHashEnt *hentry;
787
789
790 while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
791 {
792 Portal portal = hentry->portal;
793
794 /*
795 * When elog(FATAL) is progress, we need to set the active portal to
796 * failed, so that PortalCleanup() doesn't run the executor shutdown.
797 */
799 MarkPortalFailed(portal);
800
801 /*
802 * Do nothing else to cursors held over from a previous transaction.
803 */
805 continue;
806
807 /*
808 * Do nothing to auto-held cursors. This is similar to the case of a
809 * cursor from a previous transaction, but it could also be that the
810 * cursor was auto-held in this transaction, so it wants to live on.
811 */
812 if (portal->autoHeld)
813 continue;
814
815 /*
816 * If it was created in the current transaction, we can't do normal
817 * shutdown on a READY portal either; it might refer to objects
818 * created in the failed transaction. See comments in
819 * AtSubAbort_Portals.
820 */
821 if (portal->status == PORTAL_READY)
822 MarkPortalFailed(portal);
823
824 /*
825 * Allow portalcmds.c to clean up the state it knows about, if we
826 * haven't already.
827 */
828 if (PointerIsValid(portal->cleanup))
829 {
830 portal->cleanup(portal);
831 portal->cleanup = NULL;
832 }
833
834 /* drop cached plan reference, if any */
836
837 /*
838 * Any resources belonging to the portal will be released in the
839 * upcoming transaction-wide cleanup; they will be gone before we run
840 * PortalDrop.
841 */
842 portal->resowner = NULL;
843
844 /*
845 * Although we can't delete the portal data structure proper, we can
846 * release any memory in subsidiary contexts, such as executor state.
847 * The cleanup hook was the last thing that might have needed data
848 * there. But leave active portals alone.
849 */
850 if (portal->status != PORTAL_ACTIVE)
852 }
853}
854
855/*
856 * Post-abort cleanup for portals.
857 *
858 * Delete all portals not held over from prior transactions. */
859void
861{
862 HASH_SEQ_STATUS status;
863 PortalHashEnt *hentry;
864
866
867 while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
868 {
869 Portal portal = hentry->portal;
870
871 /*
872 * Do not touch active portals --- this can only happen in the case of
873 * a multi-transaction command.
874 */
875 if (portal->status == PORTAL_ACTIVE)
876 continue;
877
878 /*
879 * Do nothing to cursors held over from a previous transaction or
880 * auto-held ones.
881 */
882 if (portal->createSubid == InvalidSubTransactionId || portal->autoHeld)
883 {
884 Assert(portal->status != PORTAL_ACTIVE);
885 Assert(portal->resowner == NULL);
886 continue;
887 }
888
889 /*
890 * If a portal is still pinned, forcibly unpin it. PortalDrop will not
891 * let us drop the portal otherwise. Whoever pinned the portal was
892 * interrupted by the abort too and won't try to use it anymore.
893 */
894 if (portal->portalPinned)
895 portal->portalPinned = false;
896
897 /*
898 * We had better not call any user-defined code during cleanup, so if
899 * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
900 */
901 if (PointerIsValid(portal->cleanup))
902 {
903 elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
904 portal->cleanup = NULL;
905 }
906
907 /* Zap it. */
908 PortalDrop(portal, false);
909 }
910}
911
912/*
913 * Portal-related cleanup when we return to the main loop on error.
914 *
915 * This is different from the cleanup at transaction abort. Auto-held portals
916 * are cleaned up on error but not on transaction abort.
917 */
918void
920{
921 HASH_SEQ_STATUS status;
922 PortalHashEnt *hentry;
923
925
926 while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
927 {
928 Portal portal = hentry->portal;
929
930 if (portal->autoHeld)
931 {
932 portal->portalPinned = false;
933 PortalDrop(portal, false);
934 }
935 }
936}
937
938/*
939 * Pre-subcommit processing for portals.
940 *
941 * Reassign portals created or used in the current subtransaction to the
942 * parent subtransaction.
943 */
944void
946 SubTransactionId parentSubid,
947 int parentLevel,
948 ResourceOwner parentXactOwner)
949{
950 HASH_SEQ_STATUS status;
951 PortalHashEnt *hentry;
952
954
955 while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
956 {
957 Portal portal = hentry->portal;
958
959 if (portal->createSubid == mySubid)
960 {
961 portal->createSubid = parentSubid;
962 portal->createLevel = parentLevel;
963 if (portal->resowner)
964 ResourceOwnerNewParent(portal->resowner, parentXactOwner);
965 }
966 if (portal->activeSubid == mySubid)
967 portal->activeSubid = parentSubid;
968 }
969}
970
971/*
972 * Subtransaction abort handling for portals.
973 *
974 * Deactivate portals created or used during the failed subtransaction.
975 * Note that per AtSubCommit_Portals, this will catch portals created/used
976 * in descendants of the subtransaction too.
977 *
978 * We don't destroy any portals here; that's done in AtSubCleanup_Portals.
979 */
980void
982 SubTransactionId parentSubid,
983 ResourceOwner myXactOwner,
984 ResourceOwner parentXactOwner)
985{
986 HASH_SEQ_STATUS status;
987 PortalHashEnt *hentry;
988
990
991 while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
992 {
993 Portal portal = hentry->portal;
994
995 /* Was it created in this subtransaction? */
996 if (portal->createSubid != mySubid)
997 {
998 /* No, but maybe it was used in this subtransaction? */
999 if (portal->activeSubid == mySubid)
1000 {
1001 /* Maintain activeSubid until the portal is removed */
1002 portal->activeSubid = parentSubid;
1003
1004 /*
1005 * A MarkPortalActive() caller ran an upper-level portal in
1006 * this subtransaction and left the portal ACTIVE. This can't
1007 * happen, but force the portal into FAILED state for the same
1008 * reasons discussed below.
1009 *
1010 * We assume we can get away without forcing upper-level READY
1011 * portals to fail, even if they were run and then suspended.
1012 * In theory a suspended upper-level portal could have
1013 * acquired some references to objects that are about to be
1014 * destroyed, but there should be sufficient defenses against
1015 * such cases: the portal's original query cannot contain such
1016 * references, and any references within, say, cached plans of
1017 * PL/pgSQL functions are not from active queries and should
1018 * be protected by revalidation logic.
1019 */
1020 if (portal->status == PORTAL_ACTIVE)
1021 MarkPortalFailed(portal);
1022
1023 /*
1024 * Also, if we failed it during the current subtransaction
1025 * (either just above, or earlier), reattach its resource
1026 * owner to the current subtransaction's resource owner, so
1027 * that any resources it still holds will be released while
1028 * cleaning up this subtransaction. This prevents some corner
1029 * cases wherein we might get Asserts or worse while cleaning
1030 * up objects created during the current subtransaction
1031 * (because they're still referenced within this portal).
1032 */
1033 if (portal->status == PORTAL_FAILED && portal->resowner)
1034 {
1035 ResourceOwnerNewParent(portal->resowner, myXactOwner);
1036 portal->resowner = NULL;
1037 }
1038 }
1039 /* Done if it wasn't created in this subtransaction */
1040 continue;
1041 }
1042
1043 /*
1044 * Force any live portals of my own subtransaction into FAILED state.
1045 * We have to do this because they might refer to objects created or
1046 * changed in the failed subtransaction, leading to crashes within
1047 * ExecutorEnd when portalcmds.c tries to close down the portal.
1048 * Currently, every MarkPortalActive() caller ensures it updates the
1049 * portal status again before relinquishing control, so ACTIVE can't
1050 * happen here. If it does happen, dispose the portal like existing
1051 * MarkPortalActive() callers would.
1052 */
1053 if (portal->status == PORTAL_READY ||
1054 portal->status == PORTAL_ACTIVE)
1055 MarkPortalFailed(portal);
1056
1057 /*
1058 * Allow portalcmds.c to clean up the state it knows about, if we
1059 * haven't already.
1060 */
1061 if (PointerIsValid(portal->cleanup))
1062 {
1063 portal->cleanup(portal);
1064 portal->cleanup = NULL;
1065 }
1066
1067 /* drop cached plan reference, if any */
1069
1070 /*
1071 * Any resources belonging to the portal will be released in the
1072 * upcoming transaction-wide cleanup; they will be gone before we run
1073 * PortalDrop.
1074 */
1075 portal->resowner = NULL;
1076
1077 /*
1078 * Although we can't delete the portal data structure proper, we can
1079 * release any memory in subsidiary contexts, such as executor state.
1080 * The cleanup hook was the last thing that might have needed data
1081 * there.
1082 */
1084 }
1085}
1086
1087/*
1088 * Post-subabort cleanup for portals.
1089 *
1090 * Drop all portals created in the failed subtransaction (but note that
1091 * we will not drop any that were reassigned to the parent above).
1092 */
1093void
1095{
1096 HASH_SEQ_STATUS status;
1097 PortalHashEnt *hentry;
1098
1100
1101 while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1102 {
1103 Portal portal = hentry->portal;
1104
1105 if (portal->createSubid != mySubid)
1106 continue;
1107
1108 /*
1109 * If a portal is still pinned, forcibly unpin it. PortalDrop will not
1110 * let us drop the portal otherwise. Whoever pinned the portal was
1111 * interrupted by the abort too and won't try to use it anymore.
1112 */
1113 if (portal->portalPinned)
1114 portal->portalPinned = false;
1115
1116 /*
1117 * We had better not call any user-defined code during cleanup, so if
1118 * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
1119 */
1120 if (PointerIsValid(portal->cleanup))
1121 {
1122 elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
1123 portal->cleanup = NULL;
1124 }
1125
1126 /* Zap it. */
1127 PortalDrop(portal, false);
1128 }
1129}
1130
1131/* Find all available cursors */
1132Datum
1134{
1135 ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
1136 HASH_SEQ_STATUS hash_seq;
1137 PortalHashEnt *hentry;
1138
1139 /*
1140 * We put all the tuples into a tuplestore in one scan of the hashtable.
1141 * This avoids any issue of the hashtable possibly changing between calls.
1142 */
1143 InitMaterializedSRF(fcinfo, 0);
1144
1145 hash_seq_init(&hash_seq, PortalHashTable);
1146 while ((hentry = hash_seq_search(&hash_seq)) != NULL)
1147 {
1148 Portal portal = hentry->portal;
1149 Datum values[6];
1150 bool nulls[6] = {0};
1151
1152 /* report only "visible" entries */
1153 if (!portal->visible)
1154 continue;
1155 /* also ignore it if PortalDefineQuery hasn't been called yet */
1156 if (!portal->sourceText)
1157 continue;
1158
1159 values[0] = CStringGetTextDatum(portal->name);
1165
1166 tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
1167 }
1168
1169 return (Datum) 0;
1170}
1171
1172bool
1174{
1175 HASH_SEQ_STATUS status;
1176 PortalHashEnt *hentry;
1177
1179
1180 while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1181 {
1182 Portal portal = hentry->portal;
1183
1184 if (portal->status == PORTAL_READY)
1185 return false;
1186 }
1187
1188 return true;
1189}
1190
1191/*
1192 * Hold all pinned portals.
1193 *
1194 * When initiating a COMMIT or ROLLBACK inside a procedure, this must be
1195 * called to protect internally-generated cursors from being dropped during
1196 * the transaction shutdown. Currently, SPI calls this automatically; PLs
1197 * that initiate COMMIT or ROLLBACK some other way are on the hook to do it
1198 * themselves. (Note that we couldn't do this in, say, AtAbort_Portals
1199 * because we need to run user-defined code while persisting a portal.
1200 * It's too late to do that once transaction abort has started.)
1201 *
1202 * We protect such portals by converting them to held cursors. We mark them
1203 * as "auto-held" so that exception exit knows to clean them up. (In normal,
1204 * non-exception code paths, the PL needs to clean such portals itself, since
1205 * transaction end won't do it anymore; but that should be normal practice
1206 * anyway.)
1207 */
1208void
1210{
1211 HASH_SEQ_STATUS status;
1212 PortalHashEnt *hentry;
1213
1215
1216 while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1217 {
1218 Portal portal = hentry->portal;
1219
1220 if (portal->portalPinned && !portal->autoHeld)
1221 {
1222 /*
1223 * Doing transaction control, especially abort, inside a cursor
1224 * loop that is not read-only, for example using UPDATE ...
1225 * RETURNING, has weird semantics issues. Also, this
1226 * implementation wouldn't work, because such portals cannot be
1227 * held. (The core grammar enforces that only SELECT statements
1228 * can drive a cursor, but for example PL/pgSQL does not restrict
1229 * it.)
1230 */
1231 if (portal->strategy != PORTAL_ONE_SELECT)
1232 ereport(ERROR,
1233 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1234 errmsg("cannot perform transaction commands inside a cursor loop that is not read-only")));
1235
1236 /* Verify it's in a suitable state to be held */
1237 if (portal->status != PORTAL_READY)
1238 elog(ERROR, "pinned portal is not ready to be auto-held");
1239
1240 HoldPortal(portal);
1241 portal->autoHeld = true;
1242 }
1243 }
1244}
1245
1246/*
1247 * Drop the outer active snapshots for all portals, so that no snapshots
1248 * remain active.
1249 *
1250 * Like HoldPinnedPortals, this must be called when initiating a COMMIT or
1251 * ROLLBACK inside a procedure. This has to be separate from that since it
1252 * should not be run until we're done with steps that are likely to fail.
1253 *
1254 * It's tempting to fold this into PreCommit_Portals, but to do so, we'd
1255 * need to clean up snapshot management in VACUUM and perhaps other places.
1256 */
1257void
1259{
1260 HASH_SEQ_STATUS status;
1261 PortalHashEnt *hentry;
1262 int numPortalSnaps = 0;
1263 int numActiveSnaps = 0;
1264
1265 /* First, scan PortalHashTable and clear portalSnapshot fields */
1267
1268 while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1269 {
1270 Portal portal = hentry->portal;
1271
1272 if (portal->portalSnapshot != NULL)
1273 {
1274 portal->portalSnapshot = NULL;
1275 numPortalSnaps++;
1276 }
1277 /* portal->holdSnapshot will be cleaned up in PreCommit_Portals */
1278 }
1279
1280 /*
1281 * Now, pop all the active snapshots, which should be just those that were
1282 * portal snapshots. Ideally we'd drive this directly off the portal
1283 * scan, but there's no good way to visit the portals in the correct
1284 * order. So just cross-check after the fact.
1285 */
1286 while (ActiveSnapshotSet())
1287 {
1289 numActiveSnaps++;
1290 }
1291
1292 if (numPortalSnaps != numActiveSnaps)
1293 elog(ERROR, "portal snapshots (%d) did not account for all active snapshots (%d)",
1294 numPortalSnaps, numActiveSnaps);
1295}
static Datum values[MAXATTR]
Definition: bootstrap.c:151
#define CStringGetTextDatum(s)
Definition: builtins.h:97
uint32 SubTransactionId
Definition: c.h:627
#define InvalidSubTransactionId
Definition: c.h:629
#define PointerIsValid(pointer)
Definition: c.h:734
CommandTag
Definition: cmdtag.h:23
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1420
void hash_seq_term(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1514
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:352
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1385
int errcode(int sqlerrcode)
Definition: elog.c:853
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define WARNING
Definition: elog.h:36
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
#define ereport(elevel,...)
Definition: elog.h:149
#define PG_FUNCTION_ARGS
Definition: fmgr.h:193
void InitMaterializedSRF(FunctionCallInfo fcinfo, bits32 flags)
Definition: funcapi.c:76
int work_mem
Definition: globals.c:130
Assert(PointerIsAligned(start, uint64))
#define HASH_STRINGS
Definition: hsearch.h:96
#define HASH_ELEM
Definition: hsearch.h:95
#define stmt
Definition: indent_codes.h:59
bool shmem_exit_inprogress
Definition: ipc.c:45
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:1215
void pfree(void *pointer)
Definition: mcxt.c:1524
void MemoryContextDeleteChildren(MemoryContext context)
Definition: mcxt.c:539
MemoryContext TopMemoryContext
Definition: mcxt.c:149
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:454
void MemoryContextSetIdentifier(MemoryContext context, const char *id)
Definition: mcxt.c:612
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:160
#define ALLOCSET_SMALL_SIZES
Definition: memutils.h:170
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:124
#define CURSOR_OPT_SCROLL
Definition: parsenodes.h:3369
#define CURSOR_OPT_HOLD
Definition: parsenodes.h:3373
#define CURSOR_OPT_BINARY
Definition: parsenodes.h:3368
#define CURSOR_OPT_NO_SCROLL
Definition: parsenodes.h:3370
#define lfirst_node(type, lc)
Definition: pg_list.h:176
#define NIL
Definition: pg_list.h:68
void ReleaseCachedPlan(CachedPlan *plan, ResourceOwner owner)
Definition: plancache.c:1435
#define sprintf
Definition: port.h:241
@ PORTAL_FAILED
Definition: portal.h:110
@ PORTAL_NEW
Definition: portal.h:105
@ PORTAL_ACTIVE
Definition: portal.h:108
@ PORTAL_DONE
Definition: portal.h:109
@ PORTAL_READY
Definition: portal.h:107
@ PORTAL_DEFINED
Definition: portal.h:106
struct PortalData * Portal
Definition: portal.h:113
@ PORTAL_MULTI_QUERY
Definition: portal.h:95
@ PORTAL_ONE_SELECT
Definition: portal.h:91
#define PortalIsValid(p)
Definition: portal.h:212
void PortalCleanup(Portal portal)
Definition: portalcmds.c:274
void PersistHoldablePortal(Portal portal)
Definition: portalcmds.c:327
void AtAbort_Portals(void)
Definition: portalmem.c:783
void AtSubAbort_Portals(SubTransactionId mySubid, SubTransactionId parentSubid, ResourceOwner myXactOwner, ResourceOwner parentXactOwner)
Definition: portalmem.c:981
void EnablePortalManager(void)
Definition: portalmem.c:104
void MarkPortalDone(Portal portal)
Definition: portalmem.c:416
#define MAX_PORTALNAME_LEN
Definition: portalmem.c:46
void PinPortal(Portal portal)
Definition: portalmem.c:373
Datum pg_cursor(PG_FUNCTION_ARGS)
Definition: portalmem.c:1133
static HTAB * PortalHashTable
Definition: portalmem.c:54
void PortalDefineQuery(Portal portal, const char *prepStmtName, const char *sourceText, CommandTag commandTag, List *stmts, CachedPlan *cplan, CachedPlanSource *plansource)
Definition: portalmem.c:282
#define PortalHashTableInsert(PORTAL, NAME)
Definition: portalmem.c:68
Portal CreateNewPortal(void)
Definition: portalmem.c:235
bool PreCommit_Portals(bool isPrepare)
Definition: portalmem.c:679
static MemoryContext TopPortalContext
Definition: portalmem.c:91
void MarkPortalFailed(Portal portal)
Definition: portalmem.c:444
static void PortalReleaseCachedPlan(Portal portal)
Definition: portalmem.c:312
void UnpinPortal(Portal portal)
Definition: portalmem.c:382
void HoldPinnedPortals(void)
Definition: portalmem.c:1209
PlannedStmt * PortalGetPrimaryStmt(Portal portal)
Definition: portalmem.c:151
void MarkPortalActive(Portal portal)
Definition: portalmem.c:397
void PortalDrop(Portal portal, bool isTopCommit)
Definition: portalmem.c:470
#define PortalHashTableLookup(NAME, PORTAL)
Definition: portalmem.c:56
bool ThereAreNoReadyPortals(void)
Definition: portalmem.c:1173
Portal GetPortalByName(const char *name)
Definition: portalmem.c:130
void AtSubCommit_Portals(SubTransactionId mySubid, SubTransactionId parentSubid, int parentLevel, ResourceOwner parentXactOwner)
Definition: portalmem.c:945
#define PortalHashTableDelete(PORTAL)
Definition: portalmem.c:81
void AtCleanup_Portals(void)
Definition: portalmem.c:860
void PortalHashTableDeleteAll(void)
Definition: portalmem.c:609
static void HoldPortal(Portal portal)
Definition: portalmem.c:638
Portal CreatePortal(const char *name, bool allowDup, bool dupSilent)
Definition: portalmem.c:175
void AtSubCleanup_Portals(SubTransactionId mySubid)
Definition: portalmem.c:1094
void PortalErrorCleanup(void)
Definition: portalmem.c:919
void ForgetPortalSnapshots(void)
Definition: portalmem.c:1258
struct portalhashent PortalHashEnt
void PortalCreateHoldStore(Portal portal)
Definition: portalmem.c:333
#define PORTALS_PER_USER
Definition: portalmem.c:38
uintptr_t Datum
Definition: postgres.h:69
static Datum BoolGetDatum(bool X)
Definition: postgres.h:107
tree ctl
Definition: radixtree.h:1838
void ResourceOwnerNewParent(ResourceOwner owner, ResourceOwner newparent)
Definition: resowner.c:914
ResourceOwner ResourceOwnerCreate(ResourceOwner parent, const char *name)
Definition: resowner.c:421
void ResourceOwnerRelease(ResourceOwner owner, ResourceReleasePhase phase, bool isCommit, bool isTopLevel)
Definition: resowner.c:658
void ResourceOwnerDelete(ResourceOwner owner)
Definition: resowner.c:871
ResourceOwner CurTransactionResourceOwner
Definition: resowner.c:174
@ RESOURCE_RELEASE_LOCKS
Definition: resowner.h:55
@ RESOURCE_RELEASE_BEFORE_LOCKS
Definition: resowner.h:54
@ RESOURCE_RELEASE_AFTER_LOCKS
Definition: resowner.h:56
void UnregisterSnapshotFromOwner(Snapshot snapshot, ResourceOwner owner)
Definition: snapmgr.c:866
bool ActiveSnapshotSet(void)
Definition: snapmgr.c:799
void PopActiveSnapshot(void)
Definition: snapmgr.c:762
Definition: dynahash.c:220
Definition: pg_list.h:54
SubTransactionId createSubid
Definition: portal.h:131
CachedPlanSource * plansource
Definition: portal.h:141
Snapshot portalSnapshot
Definition: portal.h:170
SubTransactionId activeSubid
Definition: portal.h:132
CommandTag commandTag
Definition: portal.h:137
const char * sourceText
Definition: portal.h:136
bool atEnd
Definition: portal.h:200
bool atStart
Definition: portal.h:199
List * stmts
Definition: portal.h:139
ResourceOwner resowner
Definition: portal.h:121
TimestampTz creation_time
Definition: portal.h:204
bool autoHeld
Definition: portal.h:153
bool portalPinned
Definition: portal.h:152
int createLevel
Definition: portal.h:133
MemoryContext holdContext
Definition: portal.h:178
QueryCompletion qc
Definition: portal.h:138
MemoryContext portalContext
Definition: portal.h:120
bool visible
Definition: portal.h:205
Snapshot holdSnapshot
Definition: portal.h:188
const char * name
Definition: portal.h:118
const char * prepStmtName
Definition: portal.h:119
CachedPlan * cplan
Definition: portal.h:140
Tuplestorestate * holdStore
Definition: portal.h:177
int cursorOptions
Definition: portal.h:148
void(* cleanup)(Portal portal)
Definition: portal.h:122
PortalStrategy strategy
Definition: portal.h:147
PortalStatus status
Definition: portal.h:151
uint64 nprocessed
Definition: cmdtag.h:32
CommandTag commandTag
Definition: cmdtag.h:31
TupleDesc setDesc
Definition: execnodes.h:359
Tuplestorestate * setResult
Definition: execnodes.h:358
Portal portal
Definition: portalmem.c:51
char portalname[MAX_PORTALNAME_LEN]
Definition: portalmem.c:50
Tuplestorestate * tuplestore_begin_heap(bool randomAccess, bool interXact, int maxKBytes)
Definition: tuplestore.c:330
void tuplestore_putvalues(Tuplestorestate *state, TupleDesc tdesc, const Datum *values, const bool *isnull)
Definition: tuplestore.c:784
void tuplestore_end(Tuplestorestate *state)
Definition: tuplestore.c:492
static Datum TimestampTzGetDatum(TimestampTz X)
Definition: timestamp.h:52
const char * name
SubTransactionId GetCurrentSubTransactionId(void)
Definition: xact.c:791
int GetCurrentTransactionNestLevel(void)
Definition: xact.c:929
TimestampTz GetCurrentStatementStartTimestamp(void)
Definition: xact.c:879