PostgreSQL Source Code git master
Loading...
Searching...
No Matches
portalmem.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * portalmem.c
4 * backend portal memory management
5 *
6 * Portals are objects representing the execution state of a query.
7 * This module provides memory management services for portals, but it
8 * doesn't actually run the executor for them.
9 *
10 *
11 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
12 * Portions Copyright (c) 1994, Regents of the University of California
13 *
14 * IDENTIFICATION
15 * src/backend/utils/mmgr/portalmem.c
16 *
17 *-------------------------------------------------------------------------
18 */
19#include "postgres.h"
20
21#include "access/xact.h"
22#include "commands/portalcmds.h"
23#include "funcapi.h"
24#include "miscadmin.h"
25#include "storage/ipc.h"
26#include "utils/builtins.h"
27#include "utils/memutils.h"
28#include "utils/snapmgr.h"
29#include "utils/timestamp.h"
30
31/*
32 * Estimate of the maximum number of open portals a user would have,
33 * used in initially sizing the PortalHashTable in EnablePortalManager().
34 * Since the hash table can expand, there's no need to make this overly
35 * generous, and keeping it small avoids unnecessary overhead in the
36 * hash_seq_search() calls executed during transaction end.
37 */
38#define PORTALS_PER_USER 16
39
40
41/* ----------------
42 * Global state
43 * ----------------
44 */
45
46#define MAX_PORTALNAME_LEN NAMEDATALEN
47
53
55
56#define PortalHashTableLookup(NAME, PORTAL) \
57do { \
58 PortalHashEnt *hentry; \
59 \
60 hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
61 (NAME), HASH_FIND, NULL); \
62 if (hentry) \
63 PORTAL = hentry->portal; \
64 else \
65 PORTAL = NULL; \
66} while(0)
67
68#define PortalHashTableInsert(PORTAL, NAME) \
69do { \
70 PortalHashEnt *hentry; bool found; \
71 \
72 hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
73 (NAME), HASH_ENTER, &found); \
74 if (found) \
75 elog(ERROR, "duplicate portal name"); \
76 hentry->portal = PORTAL; \
77 /* To avoid duplicate storage, make PORTAL->name point to htab entry */ \
78 PORTAL->name = hentry->portalname; \
79} while(0)
80
81#define PortalHashTableDelete(PORTAL) \
82do { \
83 PortalHashEnt *hentry; \
84 \
85 hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
86 PORTAL->name, HASH_REMOVE, NULL); \
87 if (hentry == NULL) \
88 elog(WARNING, "trying to delete portal name that does not exist"); \
89} while(0)
90
92
93
94/* ----------------------------------------------------------------
95 * public portal interface functions
96 * ----------------------------------------------------------------
97 */
98
99/*
100 * EnablePortalManager
101 * Enables the portal management module at backend startup.
102 */
103void
105{
106 HASHCTL ctl;
107
109
111 "TopPortalContext",
113
114 ctl.keysize = MAX_PORTALNAME_LEN;
115 ctl.entrysize = sizeof(PortalHashEnt);
116
117 /*
118 * use PORTALS_PER_USER as a guess of how many hash table entries to
119 * create, initially
120 */
123}
124
125/*
126 * GetPortalByName
127 * Returns a portal given a portal name, or NULL if name not found.
128 */
129Portal
131{
132 Portal portal;
133
134 if (name)
136 else
137 portal = NULL;
138
139 return portal;
140}
141
142/*
143 * PortalGetPrimaryStmt
144 * Get the "primary" stmt within a portal, ie, the one marked canSetTag.
145 *
146 * Returns NULL if no such stmt. If multiple PlannedStmt structs within the
147 * portal are marked canSetTag, returns the first one. Neither of these
148 * cases should occur in present usages of this function.
149 */
152{
153 ListCell *lc;
154
155 foreach(lc, portal->stmts)
156 {
158
159 if (stmt->canSetTag)
160 return stmt;
161 }
162 return NULL;
163}
164
165/*
166 * CreatePortal
167 * Returns a new portal given a name.
168 *
169 * allowDup: if true, automatically drop any pre-existing portal of the
170 * same name (if false, an error is raised).
171 *
172 * dupSilent: if true, don't even emit a WARNING.
173 */
174Portal
175CreatePortal(const char *name, bool allowDup, bool dupSilent)
176{
177 Portal portal;
178
179 Assert(name);
180
181 portal = GetPortalByName(name);
182 if (PortalIsValid(portal))
183 {
184 if (!allowDup)
187 errmsg("cursor \"%s\" already exists", name)));
188 if (!dupSilent)
191 errmsg("closing existing cursor \"%s\"",
192 name)));
193 PortalDrop(portal, false);
194 }
195
196 /* make new portal structure */
197 portal = (Portal) MemoryContextAllocZero(TopPortalContext, sizeof *portal);
198
199 /* initialize portal context; typically it won't store much */
201 "PortalContext",
203
204 /* create a resource owner for the portal */
206 "Portal");
207
208 /* initialize portal fields that don't start off zero */
209 portal->status = PORTAL_NEW;
210 portal->cleanup = PortalCleanup;
212 portal->activeSubid = portal->createSubid;
216 portal->atStart = true;
217 portal->atEnd = true; /* disallow fetches until query is set */
218 portal->visible = true;
220
221 /* put portal in table (sets portal->name) */
223
224 /* for named portals reuse portal->name copy */
225 MemoryContextSetIdentifier(portal->portalContext, portal->name[0] ? portal->name : "<unnamed>");
226
227 return portal;
228}
229
230/*
231 * CreateNewPortal
232 * Create a new portal, assigning it a random nonconflicting name.
233 */
234Portal
236{
237 static unsigned int unnamed_portal_count = 0;
238
239 char portalname[MAX_PORTALNAME_LEN];
240
241 /* Select a nonconflicting name */
242 for (;;)
243 {
245 sprintf(portalname, "<unnamed portal %u>", unnamed_portal_count);
246 if (GetPortalByName(portalname) == NULL)
247 break;
248 }
249
250 return CreatePortal(portalname, false, false);
251}
252
253/*
254 * PortalDefineQuery
255 * A simple subroutine to establish a portal's query.
256 *
257 * Notes: as of PG 8.4, caller MUST supply a sourceText string; it is not
258 * allowed anymore to pass NULL. (If you really don't have source text,
259 * you can pass a constant string, perhaps "(query not available)".)
260 *
261 * commandTag shall be NULL if and only if the original query string
262 * (before rewriting) was an empty string. Also, the passed commandTag must
263 * be a pointer to a constant string, since it is not copied.
264 *
265 * If cplan is provided, then it is a cached plan containing the stmts, and
266 * the caller must have done GetCachedPlan(), causing a refcount increment.
267 * The refcount will be released when the portal is destroyed.
268 *
269 * If cplan is NULL, then it is the caller's responsibility to ensure that
270 * the passed plan trees have adequate lifetime. Typically this is done by
271 * copying them into the portal's context.
272 *
273 * The caller is also responsible for ensuring that the passed prepStmtName
274 * (if not NULL) and sourceText have adequate lifetime.
275 *
276 * NB: this function mustn't do much beyond storing the passed values; in
277 * particular don't do anything that risks elog(ERROR). If that were to
278 * happen here before storing the cplan reference, we'd leak the plancache
279 * refcount that the caller is trying to hand off to us.
280 */
281void
283 const char *prepStmtName,
284 const char *sourceText,
285 CommandTag commandTag,
286 List *stmts,
287 CachedPlan *cplan)
288{
289 Assert(PortalIsValid(portal));
290 Assert(portal->status == PORTAL_NEW);
291
292 Assert(sourceText != NULL);
293 Assert(commandTag != CMDTAG_UNKNOWN || stmts == NIL);
294
295 portal->prepStmtName = prepStmtName;
296 portal->sourceText = sourceText;
297 portal->commandTag = commandTag;
298 SetQueryCompletion(&portal->qc, commandTag, 0);
299 portal->stmts = stmts;
300 portal->cplan = cplan;
301 portal->status = PORTAL_DEFINED;
302}
303
304/*
305 * PortalReleaseCachedPlan
306 * Release a portal's reference to its cached plan, if any.
307 */
308static void
310{
311 if (portal->cplan)
312 {
313 ReleaseCachedPlan(portal->cplan, NULL);
314 portal->cplan = NULL;
315
316 /*
317 * We must also clear portal->stmts which is now a dangling reference
318 * to the cached plan's plan list. This protects any code that might
319 * try to examine the Portal later.
320 */
321 portal->stmts = NIL;
322 }
323}
324
325/*
326 * PortalCreateHoldStore
327 * Create the tuplestore for a portal.
328 */
329void
331{
333
334 Assert(portal->holdContext == NULL);
335 Assert(portal->holdStore == NULL);
336 Assert(portal->holdSnapshot == NULL);
337
338 /*
339 * Create the memory context that is used for storage of the tuple set.
340 * Note this is NOT a child of the portal's portalContext.
341 */
342 portal->holdContext =
344 "PortalHoldContext",
346
347 /*
348 * Create the tuple store, selecting cross-transaction temp files, and
349 * enabling random access only if cursor requires scrolling.
350 *
351 * XXX: Should maintenance_work_mem be used for the portal size?
352 */
354
355 portal->holdStore =
357 true, work_mem);
358
360}
361
362/*
363 * PinPortal
364 * Protect a portal from dropping.
365 *
366 * A pinned portal is still unpinned and dropped at transaction or
367 * subtransaction abort.
368 */
369void
371{
372 if (portal->portalPinned)
373 elog(ERROR, "portal already pinned");
374
375 portal->portalPinned = true;
376}
377
378void
380{
381 if (!portal->portalPinned)
382 elog(ERROR, "portal not pinned");
383
384 portal->portalPinned = false;
385}
386
387/*
388 * MarkPortalActive
389 * Transition a portal from READY to ACTIVE state.
390 *
391 * NOTE: never set portal->status = PORTAL_ACTIVE directly; call this instead.
392 */
393void
395{
396 /* For safety, this is a runtime test not just an Assert */
397 if (portal->status != PORTAL_READY)
400 errmsg("portal \"%s\" cannot be run", portal->name)));
401 /* Perform the state transition */
402 portal->status = PORTAL_ACTIVE;
404}
405
406/*
407 * MarkPortalDone
408 * Transition a portal from ACTIVE to DONE state.
409 *
410 * NOTE: never set portal->status = PORTAL_DONE directly; call this instead.
411 */
412void
414{
415 /* Perform the state transition */
416 Assert(portal->status == PORTAL_ACTIVE);
417 portal->status = PORTAL_DONE;
418
419 /*
420 * Allow portalcmds.c to clean up the state it knows about. We might as
421 * well do that now, since the portal can't be executed any more.
422 *
423 * In some cases involving execution of a ROLLBACK command in an already
424 * aborted transaction, this is necessary, or we'd reach AtCleanup_Portals
425 * with the cleanup hook still unexecuted.
426 */
427 if (portal->cleanup)
428 {
429 portal->cleanup(portal);
430 portal->cleanup = NULL;
431 }
432}
433
434/*
435 * MarkPortalFailed
436 * Transition a portal into FAILED state.
437 *
438 * NOTE: never set portal->status = PORTAL_FAILED directly; call this instead.
439 */
440void
442{
443 /* Perform the state transition */
444 Assert(portal->status != PORTAL_DONE);
445 portal->status = PORTAL_FAILED;
446
447 /*
448 * Allow portalcmds.c to clean up the state it knows about. We might as
449 * well do that now, since the portal can't be executed any more.
450 *
451 * In some cases involving cleanup of an already aborted transaction, this
452 * is necessary, or we'd reach AtCleanup_Portals with the cleanup hook
453 * still unexecuted.
454 */
455 if (portal->cleanup)
456 {
457 portal->cleanup(portal);
458 portal->cleanup = NULL;
459 }
460}
461
462/*
463 * PortalDrop
464 * Destroy the portal.
465 */
466void
468{
469 Assert(PortalIsValid(portal));
470
471 /*
472 * Don't allow dropping a pinned portal, it's still needed by whoever
473 * pinned it.
474 */
475 if (portal->portalPinned)
478 errmsg("cannot drop pinned portal \"%s\"", portal->name)));
479
480 /*
481 * Not sure if the PORTAL_ACTIVE case can validly happen or not...
482 */
483 if (portal->status == PORTAL_ACTIVE)
486 errmsg("cannot drop active portal \"%s\"", portal->name)));
487
488 /*
489 * Allow portalcmds.c to clean up the state it knows about, in particular
490 * shutting down the executor if still active. This step potentially runs
491 * user-defined code so failure has to be expected. It's the cleanup
492 * hook's responsibility to not try to do that more than once, in the case
493 * that failure occurs and then we come back to drop the portal again
494 * during transaction abort.
495 *
496 * Note: in most paths of control, this will have been done already in
497 * MarkPortalDone or MarkPortalFailed. We're just making sure.
498 */
499 if (portal->cleanup)
500 {
501 portal->cleanup(portal);
502 portal->cleanup = NULL;
503 }
504
505 /* There shouldn't be an active snapshot anymore, except after error */
506 Assert(portal->portalSnapshot == NULL || !isTopCommit);
507
508 /*
509 * Remove portal from hash table. Because we do this here, we will not
510 * come back to try to remove the portal again if there's any error in the
511 * subsequent steps. Better to leak a little memory than to get into an
512 * infinite error-recovery loop.
513 */
514 PortalHashTableDelete(portal);
515
516 /* drop cached plan reference, if any */
518
519 /*
520 * If portal has a snapshot protecting its data, release that. This needs
521 * a little care since the registration will be attached to the portal's
522 * resowner; if the portal failed, we will already have released the
523 * resowner (and the snapshot) during transaction abort.
524 */
525 if (portal->holdSnapshot)
526 {
527 if (portal->resowner)
529 portal->resowner);
530 portal->holdSnapshot = NULL;
531 }
532
533 /*
534 * Release any resources still attached to the portal. There are several
535 * cases being covered here:
536 *
537 * Top transaction commit (indicated by isTopCommit): normally we should
538 * do nothing here and let the regular end-of-transaction resource
539 * releasing mechanism handle these resources too. However, if we have a
540 * FAILED portal (eg, a cursor that got an error), we'd better clean up
541 * its resources to avoid resource-leakage warning messages.
542 *
543 * Sub transaction commit: never comes here at all, since we don't kill
544 * any portals in AtSubCommit_Portals().
545 *
546 * Main or sub transaction abort: we will do nothing here because
547 * portal->resowner was already set NULL; the resources were already
548 * cleaned up in transaction abort.
549 *
550 * Ordinary portal drop: must release resources. However, if the portal
551 * is not FAILED then we do not release its locks. The locks become the
552 * responsibility of the transaction's ResourceOwner (since it is the
553 * parent of the portal's owner) and will be released when the transaction
554 * eventually ends.
555 */
556 if (portal->resowner &&
557 (!isTopCommit || portal->status == PORTAL_FAILED))
558 {
559 bool isCommit = (portal->status != PORTAL_FAILED);
560
563 isCommit, false);
566 isCommit, false);
569 isCommit, false);
571 }
572 portal->resowner = NULL;
573
574 /*
575 * Delete tuplestore if present. We should do this even under error
576 * conditions; since the tuplestore would have been using cross-
577 * transaction storage, its temp files need to be explicitly deleted.
578 */
579 if (portal->holdStore)
580 {
581 MemoryContext oldcontext;
582
583 oldcontext = MemoryContextSwitchTo(portal->holdContext);
584 tuplestore_end(portal->holdStore);
585 MemoryContextSwitchTo(oldcontext);
586 portal->holdStore = NULL;
587 }
588
589 /* delete tuplestore storage, if any */
590 if (portal->holdContext)
592
593 /* release subsidiary storage */
595
596 /* release portal struct (it's in TopPortalContext) */
597 pfree(portal);
598}
599
600/*
601 * Delete all declared cursors.
602 *
603 * Used by commands: CLOSE ALL, DISCARD ALL
604 */
605void
607{
608 HASH_SEQ_STATUS status;
610
611 if (PortalHashTable == NULL)
612 return;
613
615 while ((hentry = hash_seq_search(&status)) != NULL)
616 {
617 Portal portal = hentry->portal;
618
619 /* Can't close the active portal (the one running the command) */
620 if (portal->status == PORTAL_ACTIVE)
621 continue;
622
623 PortalDrop(portal, false);
624
625 /* Restart the iteration in case that led to other drops */
626 hash_seq_term(&status);
628 }
629}
630
631/*
632 * "Hold" a portal. Prepare it for access by later transactions.
633 */
634static void
636{
637 /*
638 * Note that PersistHoldablePortal() must release all resources used by
639 * the portal that are local to the creating transaction.
640 */
641 PortalCreateHoldStore(portal);
642 PersistHoldablePortal(portal);
643
644 /* drop cached plan reference, if any */
646
647 /*
648 * Any resources belonging to the portal will be released in the upcoming
649 * transaction-wide cleanup; the portal will no longer have its own
650 * resources.
651 */
652 portal->resowner = NULL;
653
654 /*
655 * Having successfully exported the holdable cursor, mark it as not
656 * belonging to this transaction.
657 */
660 portal->createLevel = 0;
661}
662
663/*
664 * Pre-commit processing for portals.
665 *
666 * Holdable cursors created in this transaction need to be converted to
667 * materialized form, since we are going to close down the executor and
668 * release locks. Non-holdable portals created in this transaction are
669 * simply removed. Portals remaining from prior transactions should be
670 * left untouched.
671 *
672 * Returns true if any portals changed state (possibly causing user-defined
673 * code to be run), false if not.
674 */
675bool
677{
678 bool result = false;
679 HASH_SEQ_STATUS status;
681
683
684 while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
685 {
686 Portal portal = hentry->portal;
687
688 /*
689 * There should be no pinned portals anymore. Complain if someone
690 * leaked one. Auto-held portals are allowed; we assume that whoever
691 * pinned them is managing them.
692 */
693 if (portal->portalPinned && !portal->autoHeld)
694 elog(ERROR, "cannot commit while a portal is pinned");
695
696 /*
697 * Do not touch active portals --- this can only happen in the case of
698 * a multi-transaction utility command, such as VACUUM, or a commit in
699 * a procedure.
700 *
701 * Note however that any resource owner attached to such a portal is
702 * still going to go away, so don't leave a dangling pointer. Also
703 * unregister any snapshots held by the portal, mainly to avoid
704 * snapshot leak warnings from ResourceOwnerRelease().
705 */
706 if (portal->status == PORTAL_ACTIVE)
707 {
708 if (portal->holdSnapshot)
709 {
710 if (portal->resowner)
712 portal->resowner);
713 portal->holdSnapshot = NULL;
714 }
715 portal->resowner = NULL;
716 /* Clear portalSnapshot too, for cleanliness */
717 portal->portalSnapshot = NULL;
718 continue;
719 }
720
721 /* Is it a holdable portal created in the current xact? */
722 if ((portal->cursorOptions & CURSOR_OPT_HOLD) &&
724 portal->status == PORTAL_READY)
725 {
726 /*
727 * We are exiting the transaction that created a holdable cursor.
728 * Instead of dropping the portal, prepare it for access by later
729 * transactions.
730 *
731 * However, if this is PREPARE TRANSACTION rather than COMMIT,
732 * refuse PREPARE, because the semantics seem pretty unclear.
733 */
734 if (isPrepare)
737 errmsg("cannot PREPARE a transaction that has created a cursor WITH HOLD")));
738
739 HoldPortal(portal);
740
741 /* Report we changed state */
742 result = true;
743 }
744 else if (portal->createSubid == InvalidSubTransactionId)
745 {
746 /*
747 * Do nothing to cursors held over from a previous transaction
748 * (including ones we just froze in a previous cycle of this loop)
749 */
750 continue;
751 }
752 else
753 {
754 /* Zap all non-holdable portals */
755 PortalDrop(portal, true);
756
757 /* Report we changed state */
758 result = true;
759 }
760
761 /*
762 * After either freezing or dropping a portal, we have to restart the
763 * iteration, because we could have invoked user-defined code that
764 * caused a drop of the next portal in the hash chain.
765 */
766 hash_seq_term(&status);
768 }
769
770 return result;
771}
772
773/*
774 * Abort processing for portals.
775 *
776 * At this point we run the cleanup hook if present, but we can't release the
777 * portal's memory until the cleanup call.
778 */
779void
781{
782 HASH_SEQ_STATUS status;
784
786
787 while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
788 {
789 Portal portal = hentry->portal;
790
791 /*
792 * When elog(FATAL) is progress, we need to set the active portal to
793 * failed, so that PortalCleanup() doesn't run the executor shutdown.
794 */
796 MarkPortalFailed(portal);
797
798 /*
799 * Do nothing else to cursors held over from a previous transaction.
800 */
802 continue;
803
804 /*
805 * Do nothing to auto-held cursors. This is similar to the case of a
806 * cursor from a previous transaction, but it could also be that the
807 * cursor was auto-held in this transaction, so it wants to live on.
808 */
809 if (portal->autoHeld)
810 continue;
811
812 /*
813 * If it was created in the current transaction, we can't do normal
814 * shutdown on a READY portal either; it might refer to objects
815 * created in the failed transaction. See comments in
816 * AtSubAbort_Portals.
817 */
818 if (portal->status == PORTAL_READY)
819 MarkPortalFailed(portal);
820
821 /*
822 * Allow portalcmds.c to clean up the state it knows about, if we
823 * haven't already.
824 */
825 if (portal->cleanup)
826 {
827 portal->cleanup(portal);
828 portal->cleanup = NULL;
829 }
830
831 /* drop cached plan reference, if any */
833
834 /*
835 * Any resources belonging to the portal will be released in the
836 * upcoming transaction-wide cleanup; they will be gone before we run
837 * PortalDrop.
838 */
839 portal->resowner = NULL;
840
841 /*
842 * Although we can't delete the portal data structure proper, we can
843 * release any memory in subsidiary contexts, such as executor state.
844 * The cleanup hook was the last thing that might have needed data
845 * there. But leave active portals alone.
846 */
847 if (portal->status != PORTAL_ACTIVE)
849 }
850}
851
852/*
853 * Post-abort cleanup for portals.
854 *
855 * Delete all portals not held over from prior transactions.
856 */
857void
859{
860 HASH_SEQ_STATUS status;
862
864
865 while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
866 {
867 Portal portal = hentry->portal;
868
869 /*
870 * Do not touch active portals --- this can only happen in the case of
871 * a multi-transaction command.
872 */
873 if (portal->status == PORTAL_ACTIVE)
874 continue;
875
876 /*
877 * Do nothing to cursors held over from a previous transaction or
878 * auto-held ones.
879 */
880 if (portal->createSubid == InvalidSubTransactionId || portal->autoHeld)
881 {
882 Assert(portal->status != PORTAL_ACTIVE);
883 Assert(portal->resowner == NULL);
884 continue;
885 }
886
887 /*
888 * If a portal is still pinned, forcibly unpin it. PortalDrop will not
889 * let us drop the portal otherwise. Whoever pinned the portal was
890 * interrupted by the abort too and won't try to use it anymore.
891 */
892 if (portal->portalPinned)
893 portal->portalPinned = false;
894
895 /*
896 * We had better not call any user-defined code during cleanup, so if
897 * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
898 */
899 if (portal->cleanup)
900 {
901 elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
902 portal->cleanup = NULL;
903 }
904
905 /* Zap it. */
906 PortalDrop(portal, false);
907 }
908}
909
910/*
911 * Portal-related cleanup when we return to the main loop on error.
912 *
913 * This is different from the cleanup at transaction abort. Auto-held portals
914 * are cleaned up on error but not on transaction abort.
915 */
916void
918{
919 HASH_SEQ_STATUS status;
921
923
924 while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
925 {
926 Portal portal = hentry->portal;
927
928 if (portal->autoHeld)
929 {
930 portal->portalPinned = false;
931 PortalDrop(portal, false);
932 }
933 }
934}
935
936/*
937 * Pre-subcommit processing for portals.
938 *
939 * Reassign portals created or used in the current subtransaction to the
940 * parent subtransaction.
941 */
942void
945 int parentLevel,
947{
948 HASH_SEQ_STATUS status;
950
952
953 while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
954 {
955 Portal portal = hentry->portal;
956
957 if (portal->createSubid == mySubid)
958 {
959 portal->createSubid = parentSubid;
960 portal->createLevel = parentLevel;
961 if (portal->resowner)
963 }
964 if (portal->activeSubid == mySubid)
965 portal->activeSubid = parentSubid;
966 }
967}
968
969/*
970 * Subtransaction abort handling for portals.
971 *
972 * Deactivate portals created or used during the failed subtransaction.
973 * Note that per AtSubCommit_Portals, this will catch portals created/used
974 * in descendants of the subtransaction too.
975 *
976 * We don't destroy any portals here; that's done in AtSubCleanup_Portals.
977 */
978void
983{
984 HASH_SEQ_STATUS status;
986
988
989 while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
990 {
991 Portal portal = hentry->portal;
992
993 /* Was it created in this subtransaction? */
994 if (portal->createSubid != mySubid)
995 {
996 /* No, but maybe it was used in this subtransaction? */
997 if (portal->activeSubid == mySubid)
998 {
999 /* Maintain activeSubid until the portal is removed */
1000 portal->activeSubid = parentSubid;
1001
1002 /*
1003 * A MarkPortalActive() caller ran an upper-level portal in
1004 * this subtransaction and left the portal ACTIVE. This can't
1005 * happen, but force the portal into FAILED state for the same
1006 * reasons discussed below.
1007 *
1008 * We assume we can get away without forcing upper-level READY
1009 * portals to fail, even if they were run and then suspended.
1010 * In theory a suspended upper-level portal could have
1011 * acquired some references to objects that are about to be
1012 * destroyed, but there should be sufficient defenses against
1013 * such cases: the portal's original query cannot contain such
1014 * references, and any references within, say, cached plans of
1015 * PL/pgSQL functions are not from active queries and should
1016 * be protected by revalidation logic.
1017 */
1018 if (portal->status == PORTAL_ACTIVE)
1019 MarkPortalFailed(portal);
1020
1021 /*
1022 * Also, if we failed it during the current subtransaction
1023 * (either just above, or earlier), reattach its resource
1024 * owner to the current subtransaction's resource owner, so
1025 * that any resources it still holds will be released while
1026 * cleaning up this subtransaction. This prevents some corner
1027 * cases wherein we might get Asserts or worse while cleaning
1028 * up objects created during the current subtransaction
1029 * (because they're still referenced within this portal).
1030 */
1031 if (portal->status == PORTAL_FAILED && portal->resowner)
1032 {
1034 portal->resowner = NULL;
1035 }
1036 }
1037 /* Done if it wasn't created in this subtransaction */
1038 continue;
1039 }
1040
1041 /*
1042 * Force any live portals of my own subtransaction into FAILED state.
1043 * We have to do this because they might refer to objects created or
1044 * changed in the failed subtransaction, leading to crashes within
1045 * ExecutorEnd when portalcmds.c tries to close down the portal.
1046 * Currently, every MarkPortalActive() caller ensures it updates the
1047 * portal status again before relinquishing control, so ACTIVE can't
1048 * happen here. If it does happen, dispose the portal like existing
1049 * MarkPortalActive() callers would.
1050 */
1051 if (portal->status == PORTAL_READY ||
1052 portal->status == PORTAL_ACTIVE)
1053 MarkPortalFailed(portal);
1054
1055 /*
1056 * Allow portalcmds.c to clean up the state it knows about, if we
1057 * haven't already.
1058 */
1059 if (portal->cleanup)
1060 {
1061 portal->cleanup(portal);
1062 portal->cleanup = NULL;
1063 }
1064
1065 /* drop cached plan reference, if any */
1067
1068 /*
1069 * Any resources belonging to the portal will be released in the
1070 * upcoming transaction-wide cleanup; they will be gone before we run
1071 * PortalDrop.
1072 */
1073 portal->resowner = NULL;
1074
1075 /*
1076 * Although we can't delete the portal data structure proper, we can
1077 * release any memory in subsidiary contexts, such as executor state.
1078 * The cleanup hook was the last thing that might have needed data
1079 * there.
1080 */
1082 }
1083}
1084
1085/*
1086 * Post-subabort cleanup for portals.
1087 *
1088 * Drop all portals created in the failed subtransaction (but note that
1089 * we will not drop any that were reassigned to the parent above).
1090 */
1091void
1093{
1094 HASH_SEQ_STATUS status;
1096
1098
1099 while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1100 {
1101 Portal portal = hentry->portal;
1102
1103 if (portal->createSubid != mySubid)
1104 continue;
1105
1106 /*
1107 * If a portal is still pinned, forcibly unpin it. PortalDrop will not
1108 * let us drop the portal otherwise. Whoever pinned the portal was
1109 * interrupted by the abort too and won't try to use it anymore.
1110 */
1111 if (portal->portalPinned)
1112 portal->portalPinned = false;
1113
1114 /*
1115 * We had better not call any user-defined code during cleanup, so if
1116 * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
1117 */
1118 if (portal->cleanup)
1119 {
1120 elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
1121 portal->cleanup = NULL;
1122 }
1123
1124 /* Zap it. */
1125 PortalDrop(portal, false);
1126 }
1127}
1128
1129/* Find all available cursors */
1130Datum
1132{
1133 ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
1136
1137 /*
1138 * We put all the tuples into a tuplestore in one scan of the hashtable.
1139 * This avoids any issue of the hashtable possibly changing between calls.
1140 */
1141 InitMaterializedSRF(fcinfo, 0);
1142
1144 while ((hentry = hash_seq_search(&hash_seq)) != NULL)
1145 {
1146 Portal portal = hentry->portal;
1147 Datum values[6];
1148 bool nulls[6] = {0};
1149
1150 /* report only "visible" entries */
1151 if (!portal->visible)
1152 continue;
1153 /* also ignore it if PortalDefineQuery hasn't been called yet */
1154 if (!portal->sourceText)
1155 continue;
1156
1157 values[0] = CStringGetTextDatum(portal->name);
1163
1164 tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
1165 }
1166
1167 return (Datum) 0;
1168}
1169
1170bool
1172{
1173 HASH_SEQ_STATUS status;
1175
1177
1178 while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1179 {
1180 Portal portal = hentry->portal;
1181
1182 if (portal->status == PORTAL_READY)
1183 return false;
1184 }
1185
1186 return true;
1187}
1188
1189/*
1190 * Hold all pinned portals.
1191 *
1192 * When initiating a COMMIT or ROLLBACK inside a procedure, this must be
1193 * called to protect internally-generated cursors from being dropped during
1194 * the transaction shutdown. Currently, SPI calls this automatically; PLs
1195 * that initiate COMMIT or ROLLBACK some other way are on the hook to do it
1196 * themselves. (Note that we couldn't do this in, say, AtAbort_Portals
1197 * because we need to run user-defined code while persisting a portal.
1198 * It's too late to do that once transaction abort has started.)
1199 *
1200 * We protect such portals by converting them to held cursors. We mark them
1201 * as "auto-held" so that exception exit knows to clean them up. (In normal,
1202 * non-exception code paths, the PL needs to clean such portals itself, since
1203 * transaction end won't do it anymore; but that should be normal practice
1204 * anyway.)
1205 */
1206void
1208{
1209 HASH_SEQ_STATUS status;
1211
1213
1214 while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1215 {
1216 Portal portal = hentry->portal;
1217
1218 if (portal->portalPinned && !portal->autoHeld)
1219 {
1220 /*
1221 * Doing transaction control, especially abort, inside a cursor
1222 * loop that is not read-only, for example using UPDATE ...
1223 * RETURNING, has weird semantics issues. Also, this
1224 * implementation wouldn't work, because such portals cannot be
1225 * held. (The core grammar enforces that only SELECT statements
1226 * can drive a cursor, but for example PL/pgSQL does not restrict
1227 * it.)
1228 */
1229 if (portal->strategy != PORTAL_ONE_SELECT)
1230 ereport(ERROR,
1232 errmsg("cannot perform transaction commands inside a cursor loop that is not read-only")));
1233
1234 /* Verify it's in a suitable state to be held */
1235 if (portal->status != PORTAL_READY)
1236 elog(ERROR, "pinned portal is not ready to be auto-held");
1237
1238 HoldPortal(portal);
1239 portal->autoHeld = true;
1240 }
1241 }
1242}
1243
1244/*
1245 * Drop the outer active snapshots for all portals, so that no snapshots
1246 * remain active.
1247 *
1248 * Like HoldPinnedPortals, this must be called when initiating a COMMIT or
1249 * ROLLBACK inside a procedure. This has to be separate from that since it
1250 * should not be run until we're done with steps that are likely to fail.
1251 *
1252 * It's tempting to fold this into PreCommit_Portals, but to do so, we'd
1253 * need to clean up snapshot management in VACUUM and perhaps other places.
1254 */
1255void
1257{
1258 HASH_SEQ_STATUS status;
1260 int numPortalSnaps = 0;
1261 int numActiveSnaps = 0;
1262
1263 /* First, scan PortalHashTable and clear portalSnapshot fields */
1265
1266 while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1267 {
1268 Portal portal = hentry->portal;
1269
1270 if (portal->portalSnapshot != NULL)
1271 {
1272 portal->portalSnapshot = NULL;
1274 }
1275 /* portal->holdSnapshot will be cleaned up in PreCommit_Portals */
1276 }
1277
1278 /*
1279 * Now, pop all the active snapshots, which should be just those that were
1280 * portal snapshots. Ideally we'd drive this directly off the portal
1281 * scan, but there's no good way to visit the portals in the correct
1282 * order. So just cross-check after the fact.
1283 */
1284 while (ActiveSnapshotSet())
1285 {
1288 }
1289
1291 elog(ERROR, "portal snapshots (%d) did not account for all active snapshots (%d)",
1293}
static Datum values[MAXATTR]
Definition bootstrap.c:155
#define CStringGetTextDatum(s)
Definition builtins.h:97
uint32 SubTransactionId
Definition c.h:670
#define InvalidSubTransactionId
Definition c.h:672
#define Assert(condition)
Definition c.h:873
static void SetQueryCompletion(QueryCompletion *qc, CommandTag commandTag, uint64 nprocessed)
Definition cmdtag.h:37
CommandTag
Definition cmdtag.h:23
HTAB * hash_create(const char *tabname, int64 nelem, const HASHCTL *info, int flags)
Definition dynahash.c:358
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition dynahash.c:1415
void hash_seq_term(HASH_SEQ_STATUS *status)
Definition dynahash.c:1509
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition dynahash.c:1380
int errcode(int sqlerrcode)
Definition elog.c:863
int errmsg(const char *fmt,...)
Definition elog.c:1080
#define WARNING
Definition elog.h:36
#define ERROR
Definition elog.h:39
#define elog(elevel,...)
Definition elog.h:226
#define ereport(elevel,...)
Definition elog.h:150
#define PG_FUNCTION_ARGS
Definition fmgr.h:193
void InitMaterializedSRF(FunctionCallInfo fcinfo, bits32 flags)
Definition funcapi.c:76
int work_mem
Definition globals.c:131
#define HASH_STRINGS
Definition hsearch.h:96
#define HASH_ELEM
Definition hsearch.h:95
#define stmt
bool shmem_exit_inprogress
Definition ipc.c:46
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition mcxt.c:1266
void pfree(void *pointer)
Definition mcxt.c:1616
void MemoryContextDeleteChildren(MemoryContext context)
Definition mcxt.c:555
MemoryContext TopMemoryContext
Definition mcxt.c:166
void MemoryContextDelete(MemoryContext context)
Definition mcxt.c:472
void MemoryContextSetIdentifier(MemoryContext context, const char *id)
Definition mcxt.c:661
#define AllocSetContextCreate
Definition memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition memutils.h:160
#define ALLOCSET_SMALL_SIZES
Definition memutils.h:170
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition palloc.h:124
#define CURSOR_OPT_SCROLL
#define CURSOR_OPT_HOLD
#define CURSOR_OPT_BINARY
#define CURSOR_OPT_NO_SCROLL
#define lfirst_node(type, lc)
Definition pg_list.h:176
#define NIL
Definition pg_list.h:68
void ReleaseCachedPlan(CachedPlan *plan, ResourceOwner owner)
Definition plancache.c:1426
#define sprintf
Definition port.h:262
@ PORTAL_FAILED
Definition portal.h:110
@ PORTAL_NEW
Definition portal.h:105
@ PORTAL_ACTIVE
Definition portal.h:108
@ PORTAL_DONE
Definition portal.h:109
@ PORTAL_READY
Definition portal.h:107
@ PORTAL_DEFINED
Definition portal.h:106
struct PortalData * Portal
Definition portal.h:113
@ PORTAL_MULTI_QUERY
Definition portal.h:95
@ PORTAL_ONE_SELECT
Definition portal.h:91
#define PortalIsValid(p)
Definition portal.h:211
void PortalCleanup(Portal portal)
Definition portalcmds.c:274
void PersistHoldablePortal(Portal portal)
Definition portalcmds.c:327
void AtAbort_Portals(void)
Definition portalmem.c:780
void AtSubAbort_Portals(SubTransactionId mySubid, SubTransactionId parentSubid, ResourceOwner myXactOwner, ResourceOwner parentXactOwner)
Definition portalmem.c:979
void EnablePortalManager(void)
Definition portalmem.c:104
void MarkPortalDone(Portal portal)
Definition portalmem.c:413
#define MAX_PORTALNAME_LEN
Definition portalmem.c:46
void PinPortal(Portal portal)
Definition portalmem.c:370
Datum pg_cursor(PG_FUNCTION_ARGS)
Definition portalmem.c:1131
static HTAB * PortalHashTable
Definition portalmem.c:54
#define PortalHashTableInsert(PORTAL, NAME)
Definition portalmem.c:68
Portal CreateNewPortal(void)
Definition portalmem.c:235
bool PreCommit_Portals(bool isPrepare)
Definition portalmem.c:676
static MemoryContext TopPortalContext
Definition portalmem.c:91
void MarkPortalFailed(Portal portal)
Definition portalmem.c:441
static void PortalReleaseCachedPlan(Portal portal)
Definition portalmem.c:309
void UnpinPortal(Portal portal)
Definition portalmem.c:379
void HoldPinnedPortals(void)
Definition portalmem.c:1207
PlannedStmt * PortalGetPrimaryStmt(Portal portal)
Definition portalmem.c:151
void MarkPortalActive(Portal portal)
Definition portalmem.c:394
void PortalDrop(Portal portal, bool isTopCommit)
Definition portalmem.c:467
#define PortalHashTableLookup(NAME, PORTAL)
Definition portalmem.c:56
bool ThereAreNoReadyPortals(void)
Definition portalmem.c:1171
Portal GetPortalByName(const char *name)
Definition portalmem.c:130
void AtSubCommit_Portals(SubTransactionId mySubid, SubTransactionId parentSubid, int parentLevel, ResourceOwner parentXactOwner)
Definition portalmem.c:943
#define PortalHashTableDelete(PORTAL)
Definition portalmem.c:81
void AtCleanup_Portals(void)
Definition portalmem.c:858
void PortalDefineQuery(Portal portal, const char *prepStmtName, const char *sourceText, CommandTag commandTag, List *stmts, CachedPlan *cplan)
Definition portalmem.c:282
void PortalHashTableDeleteAll(void)
Definition portalmem.c:606
static void HoldPortal(Portal portal)
Definition portalmem.c:635
Portal CreatePortal(const char *name, bool allowDup, bool dupSilent)
Definition portalmem.c:175
void AtSubCleanup_Portals(SubTransactionId mySubid)
Definition portalmem.c:1092
void PortalErrorCleanup(void)
Definition portalmem.c:917
void ForgetPortalSnapshots(void)
Definition portalmem.c:1256
struct portalhashent PortalHashEnt
void PortalCreateHoldStore(Portal portal)
Definition portalmem.c:330
#define PORTALS_PER_USER
Definition portalmem.c:38
static Datum BoolGetDatum(bool X)
Definition postgres.h:112
uint64_t Datum
Definition postgres.h:70
static int fb(int x)
tree ctl
Definition radixtree.h:1838
void ResourceOwnerNewParent(ResourceOwner owner, ResourceOwner newparent)
Definition resowner.c:911
ResourceOwner ResourceOwnerCreate(ResourceOwner parent, const char *name)
Definition resowner.c:418
void ResourceOwnerRelease(ResourceOwner owner, ResourceReleasePhase phase, bool isCommit, bool isTopLevel)
Definition resowner.c:655
void ResourceOwnerDelete(ResourceOwner owner)
Definition resowner.c:868
ResourceOwner CurTransactionResourceOwner
Definition resowner.c:174
@ RESOURCE_RELEASE_LOCKS
Definition resowner.h:55
@ RESOURCE_RELEASE_BEFORE_LOCKS
Definition resowner.h:54
@ RESOURCE_RELEASE_AFTER_LOCKS
Definition resowner.h:56
void UnregisterSnapshotFromOwner(Snapshot snapshot, ResourceOwner owner)
Definition snapmgr.c:879
bool ActiveSnapshotSet(void)
Definition snapmgr.c:812
void PopActiveSnapshot(void)
Definition snapmgr.c:775
Definition pg_list.h:54
SubTransactionId createSubid
Definition portal.h:131
Snapshot portalSnapshot
Definition portal.h:169
SubTransactionId activeSubid
Definition portal.h:132
CommandTag commandTag
Definition portal.h:137
const char * sourceText
Definition portal.h:136
bool atEnd
Definition portal.h:199
bool atStart
Definition portal.h:198
List * stmts
Definition portal.h:139
ResourceOwner resowner
Definition portal.h:121
TimestampTz creation_time
Definition portal.h:203
bool autoHeld
Definition portal.h:152
bool portalPinned
Definition portal.h:151
int createLevel
Definition portal.h:133
MemoryContext holdContext
Definition portal.h:177
QueryCompletion qc
Definition portal.h:138
MemoryContext portalContext
Definition portal.h:120
bool visible
Definition portal.h:204
Snapshot holdSnapshot
Definition portal.h:187
const char * name
Definition portal.h:118
const char * prepStmtName
Definition portal.h:119
CachedPlan * cplan
Definition portal.h:140
Tuplestorestate * holdStore
Definition portal.h:176
int cursorOptions
Definition portal.h:147
void(* cleanup)(Portal portal)
Definition portal.h:122
PortalStrategy strategy
Definition portal.h:146
PortalStatus status
Definition portal.h:150
Portal portal
Definition portalmem.c:51
char portalname[MAX_PORTALNAME_LEN]
Definition portalmem.c:50
Tuplestorestate * tuplestore_begin_heap(bool randomAccess, bool interXact, int maxKBytes)
Definition tuplestore.c:330
void tuplestore_putvalues(Tuplestorestate *state, TupleDesc tdesc, const Datum *values, const bool *isnull)
Definition tuplestore.c:784
void tuplestore_end(Tuplestorestate *state)
Definition tuplestore.c:492
static Datum TimestampTzGetDatum(TimestampTz X)
Definition timestamp.h:52
const char * name
SubTransactionId GetCurrentSubTransactionId(void)
Definition xact.c:792
int GetCurrentTransactionNestLevel(void)
Definition xact.c:930
TimestampTz GetCurrentStatementStartTimestamp(void)
Definition xact.c:880