1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077 |
- /*-------------------------------------------------------------------------
- *
- * tableam.h
- * POSTGRES table access method definitions.
- *
- *
- * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- * src/include/access/tableam.h
- *
- * NOTES
- * See tableam.sgml for higher level documentation.
- *
- *-------------------------------------------------------------------------
- */
- #ifndef TABLEAM_H
- #define TABLEAM_H
- #include "access/relscan.h"
- #include "access/sdir.h"
- #include "access/xact.h"
- #include "utils/guc.h"
- #include "utils/rel.h"
- #include "utils/snapshot.h"
- #define DEFAULT_TABLE_ACCESS_METHOD "heap"
- /* GUCs */
- extern PGDLLIMPORT char *default_table_access_method;
- extern PGDLLIMPORT bool synchronize_seqscans;
- struct BulkInsertStateData;
- struct IndexInfo;
- struct SampleScanState;
- struct TBMIterateResult;
- struct VacuumParams;
- struct ValidateIndexState;
- /*
- * Bitmask values for the flags argument to the scan_begin callback.
- */
- typedef enum ScanOptions
- {
- /* one of SO_TYPE_* may be specified */
- SO_TYPE_SEQSCAN = 1 << 0,
- SO_TYPE_BITMAPSCAN = 1 << 1,
- SO_TYPE_SAMPLESCAN = 1 << 2,
- SO_TYPE_TIDSCAN = 1 << 3,
- SO_TYPE_TIDRANGESCAN = 1 << 4,
- SO_TYPE_ANALYZE = 1 << 5,
- /* several of SO_ALLOW_* may be specified */
- /* allow or disallow use of access strategy */
- SO_ALLOW_STRAT = 1 << 6,
- /* report location to syncscan logic? */
- SO_ALLOW_SYNC = 1 << 7,
- /* verify visibility page-at-a-time? */
- SO_ALLOW_PAGEMODE = 1 << 8,
- /* unregister snapshot at scan end? */
- SO_TEMP_SNAPSHOT = 1 << 9
- } ScanOptions;
- /*
- * Result codes for table_{update,delete,lock_tuple}, and for visibility
- * routines inside table AMs.
- */
- typedef enum TM_Result
- {
- /*
- * Signals that the action succeeded (i.e. update/delete performed, lock
- * was acquired)
- */
- TM_Ok,
- /* The affected tuple wasn't visible to the relevant snapshot */
- TM_Invisible,
- /* The affected tuple was already modified by the calling backend */
- TM_SelfModified,
- /*
- * The affected tuple was updated by another transaction. This includes
- * the case where tuple was moved to another partition.
- */
- TM_Updated,
- /* The affected tuple was deleted by another transaction */
- TM_Deleted,
- /*
- * The affected tuple is currently being modified by another session. This
- * will only be returned if table_(update/delete/lock_tuple) are
- * instructed not to wait.
- */
- TM_BeingModified,
- /* lock couldn't be acquired, action skipped. Only used by lock_tuple */
- TM_WouldBlock
- } TM_Result;
- /*
- * When table_tuple_update, table_tuple_delete, or table_tuple_lock fail
- * because the target tuple is already outdated, they fill in this struct to
- * provide information to the caller about what happened.
- *
- * ctid is the target's ctid link: it is the same as the target's TID if the
- * target was deleted, or the location of the replacement tuple if the target
- * was updated.
- *
- * xmax is the outdating transaction's XID. If the caller wants to visit the
- * replacement tuple, it must check that this matches before believing the
- * replacement is really a match.
- *
- * cmax is the outdating command's CID, but only when the failure code is
- * TM_SelfModified (i.e., something in the current transaction outdated the
- * tuple); otherwise cmax is zero. (We make this restriction because
- * HeapTupleHeaderGetCmax doesn't work for tuples outdated in other
- * transactions.)
- */
- typedef struct TM_FailureData
- {
- ItemPointerData ctid;
- TransactionId xmax;
- CommandId cmax;
- bool traversed;
- } TM_FailureData;
- /*
- * State used when calling table_index_delete_tuples().
- *
- * Represents the status of table tuples, referenced by table TID and taken by
- * index AM from index tuples. State consists of high level parameters of the
- * deletion operation, plus two mutable palloc()'d arrays for information
- * about the status of individual table tuples. These are conceptually one
- * single array. Using two arrays keeps the TM_IndexDelete struct small,
- * which makes sorting the first array (the deltids array) fast.
- *
- * Some index AM callers perform simple index tuple deletion (by specifying
- * bottomup = false), and include only known-dead deltids. These known-dead
- * entries are all marked knowndeletable = true directly (typically these are
- * TIDs from LP_DEAD-marked index tuples), but that isn't strictly required.
- *
- * Callers that specify bottomup = true are "bottom-up index deletion"
- * callers. The considerations for the tableam are more subtle with these
- * callers because they ask the tableam to perform highly speculative work,
- * and might only expect the tableam to check a small fraction of all entries.
- * Caller is not allowed to specify knowndeletable = true for any entry
- * because everything is highly speculative. Bottom-up caller provides
- * context and hints to tableam -- see comments below for details on how index
- * AMs and tableams should coordinate during bottom-up index deletion.
- *
- * Simple index deletion callers may ask the tableam to perform speculative
- * work, too. This is a little like bottom-up deletion, but not too much.
- * The tableam will only perform speculative work when it's practically free
- * to do so in passing for simple deletion caller (while always performing
- * whatever work is needed to enable knowndeletable/LP_DEAD index tuples to
- * be deleted within index AM). This is the real reason why it's possible for
- * simple index deletion caller to specify knowndeletable = false up front
- * (this means "check if it's possible for me to delete corresponding index
- * tuple when it's cheap to do so in passing"). The index AM should only
- * include "extra" entries for index tuples whose TIDs point to a table block
- * that tableam is expected to have to visit anyway (in the event of a block
- * orientated tableam). The tableam isn't strictly obligated to check these
- * "extra" TIDs, but a block-based AM should always manage to do so in
- * practice.
- *
- * The final contents of the deltids/status arrays are interesting to callers
- * that ask tableam to perform speculative work (i.e. when _any_ items have
- * knowndeletable set to false up front). These index AM callers will
- * naturally need to consult final state to determine which index tuples are
- * in fact deletable.
- *
- * The index AM can keep track of which index tuple relates to which deltid by
- * setting idxoffnum (and/or relying on each entry being uniquely identifiable
- * using tid), which is important when the final contents of the array will
- * need to be interpreted -- the array can shrink from initial size after
- * tableam processing and/or have entries in a new order (tableam may sort
- * deltids array for its own reasons). Bottom-up callers may find that final
- * ndeltids is 0 on return from call to tableam, in which case no index tuple
- * deletions are possible. Simple deletion callers can rely on any entries
- * they know to be deletable appearing in the final array as deletable.
- */
- typedef struct TM_IndexDelete
- {
- ItemPointerData tid; /* table TID from index tuple */
- int16 id; /* Offset into TM_IndexStatus array */
- } TM_IndexDelete;
- typedef struct TM_IndexStatus
- {
- OffsetNumber idxoffnum; /* Index am page offset number */
- bool knowndeletable; /* Currently known to be deletable? */
- /* Bottom-up index deletion specific fields follow */
- bool promising; /* Promising (duplicate) index tuple? */
- int16 freespace; /* Space freed in index if deleted */
- } TM_IndexStatus;
- /*
- * Index AM/tableam coordination is central to the design of bottom-up index
- * deletion. The index AM provides hints about where to look to the tableam
- * by marking some entries as "promising". Index AM does this with duplicate
- * index tuples that are strongly suspected to be old versions left behind by
- * UPDATEs that did not logically modify indexed values. Index AM may find it
- * helpful to only mark entries as promising when they're thought to have been
- * affected by such an UPDATE in the recent past.
- *
- * Bottom-up index deletion casts a wide net at first, usually by including
- * all TIDs on a target index page. It is up to the tableam to worry about
- * the cost of checking transaction status information. The tableam is in
- * control, but needs careful guidance from the index AM. Index AM requests
- * that bottomupfreespace target be met, while tableam measures progress
- * towards that goal by tallying the per-entry freespace value for known
- * deletable entries. (All !bottomup callers can just set these space related
- * fields to zero.)
- */
- typedef struct TM_IndexDeleteOp
- {
- Relation irel; /* Target index relation */
- BlockNumber iblknum; /* Index block number (for error reports) */
- bool bottomup; /* Bottom-up (not simple) deletion? */
- int bottomupfreespace; /* Bottom-up space target */
- /* Mutable per-TID information follows (index AM initializes entries) */
- int ndeltids; /* Current # of deltids/status elements */
- TM_IndexDelete *deltids;
- TM_IndexStatus *status;
- } TM_IndexDeleteOp;
- /* "options" flag bits for table_tuple_insert */
- /* TABLE_INSERT_SKIP_WAL was 0x0001; RelationNeedsWAL() now governs */
- #define TABLE_INSERT_SKIP_FSM 0x0002
- #define TABLE_INSERT_FROZEN 0x0004
- #define TABLE_INSERT_NO_LOGICAL 0x0008
- /* flag bits for table_tuple_lock */
- /* Follow tuples whose update is in progress if lock modes don't conflict */
- #define TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS (1 << 0)
- /* Follow update chain and lock latest version of tuple */
- #define TUPLE_LOCK_FLAG_FIND_LAST_VERSION (1 << 1)
- /* Typedef for callback function for table_index_build_scan */
- typedef void (*IndexBuildCallback) (Relation index,
- ItemPointer tid,
- Datum *values,
- bool *isnull,
- bool tupleIsAlive,
- void *state);
- /*
- * API struct for a table AM. Note this must be allocated in a
- * server-lifetime manner, typically as a static const struct, which then gets
- * returned by FormData_pg_am.amhandler.
- *
- * In most cases it's not appropriate to call the callbacks directly, use the
- * table_* wrapper functions instead.
- *
- * GetTableAmRoutine() asserts that required callbacks are filled in, remember
- * to update when adding a callback.
- */
- typedef struct TableAmRoutine
- {
- /* this must be set to T_TableAmRoutine */
- NodeTag type;
- /* ------------------------------------------------------------------------
- * Slot related callbacks.
- * ------------------------------------------------------------------------
- */
- /*
- * Return slot implementation suitable for storing a tuple of this AM.
- */
- const TupleTableSlotOps *(*slot_callbacks) (Relation rel);
- /* ------------------------------------------------------------------------
- * Table scan callbacks.
- * ------------------------------------------------------------------------
- */
- /*
- * Start a scan of `rel`. The callback has to return a TableScanDesc,
- * which will typically be embedded in a larger, AM specific, struct.
- *
- * If nkeys != 0, the results need to be filtered by those scan keys.
- *
- * pscan, if not NULL, will have already been initialized with
- * parallelscan_initialize(), and has to be for the same relation. Will
- * only be set coming from table_beginscan_parallel().
- *
- * `flags` is a bitmask indicating the type of scan (ScanOptions's
- * SO_TYPE_*, currently only one may be specified), options controlling
- * the scan's behaviour (ScanOptions's SO_ALLOW_*, several may be
- * specified, an AM may ignore unsupported ones) and whether the snapshot
- * needs to be deallocated at scan_end (ScanOptions's SO_TEMP_SNAPSHOT).
- */
- TableScanDesc (*scan_begin) (Relation rel,
- Snapshot snapshot,
- int nkeys, struct ScanKeyData *key,
- ParallelTableScanDesc pscan,
- uint32 flags);
- /*
- * Release resources and deallocate scan. If TableScanDesc.temp_snap,
- * TableScanDesc.rs_snapshot needs to be unregistered.
- */
- void (*scan_end) (TableScanDesc scan);
- /*
- * Restart relation scan. If set_params is set to true, allow_{strat,
- * sync, pagemode} (see scan_begin) changes should be taken into account.
- */
- void (*scan_rescan) (TableScanDesc scan, struct ScanKeyData *key,
- bool set_params, bool allow_strat,
- bool allow_sync, bool allow_pagemode);
- /*
- * Return next tuple from `scan`, store in slot.
- */
- bool (*scan_getnextslot) (TableScanDesc scan,
- ScanDirection direction,
- TupleTableSlot *slot);
- /*-----------
- * Optional functions to provide scanning for ranges of ItemPointers.
- * Implementations must either provide both of these functions, or neither
- * of them.
- *
- * Implementations of scan_set_tidrange must themselves handle
- * ItemPointers of any value. i.e, they must handle each of the following:
- *
- * 1) mintid or maxtid is beyond the end of the table; and
- * 2) mintid is above maxtid; and
- * 3) item offset for mintid or maxtid is beyond the maximum offset
- * allowed by the AM.
- *
- * Implementations can assume that scan_set_tidrange is always called
- * before can_getnextslot_tidrange or after scan_rescan and before any
- * further calls to scan_getnextslot_tidrange.
- */
- void (*scan_set_tidrange) (TableScanDesc scan,
- ItemPointer mintid,
- ItemPointer maxtid);
- /*
- * Return next tuple from `scan` that's in the range of TIDs defined by
- * scan_set_tidrange.
- */
- bool (*scan_getnextslot_tidrange) (TableScanDesc scan,
- ScanDirection direction,
- TupleTableSlot *slot);
- /* ------------------------------------------------------------------------
- * Parallel table scan related functions.
- * ------------------------------------------------------------------------
- */
- /*
- * Estimate the size of shared memory needed for a parallel scan of this
- * relation. The snapshot does not need to be accounted for.
- */
- Size (*parallelscan_estimate) (Relation rel);
- /*
- * Initialize ParallelTableScanDesc for a parallel scan of this relation.
- * `pscan` will be sized according to parallelscan_estimate() for the same
- * relation.
- */
- Size (*parallelscan_initialize) (Relation rel,
- ParallelTableScanDesc pscan);
- /*
- * Reinitialize `pscan` for a new scan. `rel` will be the same relation as
- * when `pscan` was initialized by parallelscan_initialize.
- */
- void (*parallelscan_reinitialize) (Relation rel,
- ParallelTableScanDesc pscan);
- /* ------------------------------------------------------------------------
- * Index Scan Callbacks
- * ------------------------------------------------------------------------
- */
- /*
- * Prepare to fetch tuples from the relation, as needed when fetching
- * tuples for an index scan. The callback has to return an
- * IndexFetchTableData, which the AM will typically embed in a larger
- * structure with additional information.
- *
- * Tuples for an index scan can then be fetched via index_fetch_tuple.
- */
- struct IndexFetchTableData *(*index_fetch_begin) (Relation rel);
- /*
- * Reset index fetch. Typically this will release cross index fetch
- * resources held in IndexFetchTableData.
- */
- void (*index_fetch_reset) (struct IndexFetchTableData *data);
- /*
- * Release resources and deallocate index fetch.
- */
- void (*index_fetch_end) (struct IndexFetchTableData *data);
- /*
- * Fetch tuple at `tid` into `slot`, after doing a visibility test
- * according to `snapshot`. If a tuple was found and passed the visibility
- * test, return true, false otherwise.
- *
- * Note that AMs that do not necessarily update indexes when indexed
- * columns do not change, need to return the current/correct version of
- * the tuple that is visible to the snapshot, even if the tid points to an
- * older version of the tuple.
- *
- * *call_again is false on the first call to index_fetch_tuple for a tid.
- * If there potentially is another tuple matching the tid, *call_again
- * needs to be set to true by index_fetch_tuple, signaling to the caller
- * that index_fetch_tuple should be called again for the same tid.
- *
- * *all_dead, if all_dead is not NULL, should be set to true by
- * index_fetch_tuple iff it is guaranteed that no backend needs to see
- * that tuple. Index AMs can use that to avoid returning that tid in
- * future searches.
- */
- bool (*index_fetch_tuple) (struct IndexFetchTableData *scan,
- ItemPointer tid,
- Snapshot snapshot,
- TupleTableSlot *slot,
- bool *call_again, bool *all_dead);
- /* ------------------------------------------------------------------------
- * Callbacks for non-modifying operations on individual tuples
- * ------------------------------------------------------------------------
- */
- /*
- * Fetch tuple at `tid` into `slot`, after doing a visibility test
- * according to `snapshot`. If a tuple was found and passed the visibility
- * test, returns true, false otherwise.
- */
- bool (*tuple_fetch_row_version) (Relation rel,
- ItemPointer tid,
- Snapshot snapshot,
- TupleTableSlot *slot);
- /*
- * Is tid valid for a scan of this relation.
- */
- bool (*tuple_tid_valid) (TableScanDesc scan,
- ItemPointer tid);
- /*
- * Return the latest version of the tuple at `tid`, by updating `tid` to
- * point at the newest version.
- */
- void (*tuple_get_latest_tid) (TableScanDesc scan,
- ItemPointer tid);
- /*
- * Does the tuple in `slot` satisfy `snapshot`? The slot needs to be of
- * the appropriate type for the AM.
- */
- bool (*tuple_satisfies_snapshot) (Relation rel,
- TupleTableSlot *slot,
- Snapshot snapshot);
- /* see table_index_delete_tuples() */
- TransactionId (*index_delete_tuples) (Relation rel,
- TM_IndexDeleteOp *delstate);
- /* ------------------------------------------------------------------------
- * Manipulations of physical tuples.
- * ------------------------------------------------------------------------
- */
- /* see table_tuple_insert() for reference about parameters */
- void (*tuple_insert) (Relation rel, TupleTableSlot *slot,
- CommandId cid, int options,
- struct BulkInsertStateData *bistate);
- /* see table_tuple_insert_speculative() for reference about parameters */
- void (*tuple_insert_speculative) (Relation rel,
- TupleTableSlot *slot,
- CommandId cid,
- int options,
- struct BulkInsertStateData *bistate,
- uint32 specToken);
- /* see table_tuple_complete_speculative() for reference about parameters */
- void (*tuple_complete_speculative) (Relation rel,
- TupleTableSlot *slot,
- uint32 specToken,
- bool succeeded);
- /* see table_multi_insert() for reference about parameters */
- void (*multi_insert) (Relation rel, TupleTableSlot **slots, int nslots,
- CommandId cid, int options, struct BulkInsertStateData *bistate);
- /* see table_tuple_delete() for reference about parameters */
- TM_Result (*tuple_delete) (Relation rel,
- ItemPointer tid,
- CommandId cid,
- Snapshot snapshot,
- Snapshot crosscheck,
- bool wait,
- TM_FailureData *tmfd,
- bool changingPart);
- /* see table_tuple_update() for reference about parameters */
- TM_Result (*tuple_update) (Relation rel,
- ItemPointer otid,
- TupleTableSlot *slot,
- CommandId cid,
- Snapshot snapshot,
- Snapshot crosscheck,
- bool wait,
- TM_FailureData *tmfd,
- LockTupleMode *lockmode,
- bool *update_indexes);
- /* see table_tuple_lock() for reference about parameters */
- TM_Result (*tuple_lock) (Relation rel,
- ItemPointer tid,
- Snapshot snapshot,
- TupleTableSlot *slot,
- CommandId cid,
- LockTupleMode mode,
- LockWaitPolicy wait_policy,
- uint8 flags,
- TM_FailureData *tmfd);
- /*
- * Perform operations necessary to complete insertions made via
- * tuple_insert and multi_insert with a BulkInsertState specified. In-tree
- * access methods ceased to use this.
- *
- * Typically callers of tuple_insert and multi_insert will just pass all
- * the flags that apply to them, and each AM has to decide which of them
- * make sense for it, and then only take actions in finish_bulk_insert for
- * those flags, and ignore others.
- *
- * Optional callback.
- */
- void (*finish_bulk_insert) (Relation rel, int options);
- /* ------------------------------------------------------------------------
- * DDL related functionality.
- * ------------------------------------------------------------------------
- */
- /*
- * This callback needs to create a new relation filenode for `rel`, with
- * appropriate durability behaviour for `persistence`.
- *
- * Note that only the subset of the relcache filled by
- * RelationBuildLocalRelation() can be relied upon and that the relation's
- * catalog entries will either not yet exist (new relation), or will still
- * reference the old relfilenode.
- *
- * As output *freezeXid, *minmulti must be set to the values appropriate
- * for pg_class.{relfrozenxid, relminmxid}. For AMs that don't need those
- * fields to be filled they can be set to InvalidTransactionId and
- * InvalidMultiXactId, respectively.
- *
- * See also table_relation_set_new_filenode().
- */
- void (*relation_set_new_filenode) (Relation rel,
- const RelFileNode *newrnode,
- char persistence,
- TransactionId *freezeXid,
- MultiXactId *minmulti);
- /*
- * This callback needs to remove all contents from `rel`'s current
- * relfilenode. No provisions for transactional behaviour need to be made.
- * Often this can be implemented by truncating the underlying storage to
- * its minimal size.
- *
- * See also table_relation_nontransactional_truncate().
- */
- void (*relation_nontransactional_truncate) (Relation rel);
- /*
- * See table_relation_copy_data().
- *
- * This can typically be implemented by directly copying the underlying
- * storage, unless it contains references to the tablespace internally.
- */
- void (*relation_copy_data) (Relation rel,
- const RelFileNode *newrnode);
- /* See table_relation_copy_for_cluster() */
- void (*relation_copy_for_cluster) (Relation NewTable,
- Relation OldTable,
- Relation OldIndex,
- bool use_sort,
- TransactionId OldestXmin,
- TransactionId *xid_cutoff,
- MultiXactId *multi_cutoff,
- double *num_tuples,
- double *tups_vacuumed,
- double *tups_recently_dead);
- /*
- * React to VACUUM command on the relation. The VACUUM can be triggered by
- * a user or by autovacuum. The specific actions performed by the AM will
- * depend heavily on the individual AM.
- *
- * On entry a transaction is already established, and the relation is
- * locked with a ShareUpdateExclusive lock.
- *
- * Note that neither VACUUM FULL (and CLUSTER), nor ANALYZE go through
- * this routine, even if (for ANALYZE) it is part of the same VACUUM
- * command.
- *
- * There probably, in the future, needs to be a separate callback to
- * integrate with autovacuum's scheduling.
- */
- void (*relation_vacuum) (Relation rel,
- struct VacuumParams *params,
- BufferAccessStrategy bstrategy);
- /*
- * Prepare to analyze block `blockno` of `scan`. The scan has been started
- * with table_beginscan_analyze(). See also
- * table_scan_analyze_next_block().
- *
- * The callback may acquire resources like locks that are held until
- * table_scan_analyze_next_tuple() returns false. It e.g. can make sense
- * to hold a lock until all tuples on a block have been analyzed by
- * scan_analyze_next_tuple.
- *
- * The callback can return false if the block is not suitable for
- * sampling, e.g. because it's a metapage that could never contain tuples.
- *
- * XXX: This obviously is primarily suited for block-based AMs. It's not
- * clear what a good interface for non block based AMs would be, so there
- * isn't one yet.
- */
- bool (*scan_analyze_next_block) (TableScanDesc scan,
- BlockNumber blockno,
- BufferAccessStrategy bstrategy);
- /*
- * See table_scan_analyze_next_tuple().
- *
- * Not every AM might have a meaningful concept of dead rows, in which
- * case it's OK to not increment *deadrows - but note that that may
- * influence autovacuum scheduling (see comment for relation_vacuum
- * callback).
- */
- bool (*scan_analyze_next_tuple) (TableScanDesc scan,
- TransactionId OldestXmin,
- double *liverows,
- double *deadrows,
- TupleTableSlot *slot);
- /* see table_index_build_range_scan for reference about parameters */
- double (*index_build_range_scan) (Relation table_rel,
- Relation index_rel,
- struct IndexInfo *index_info,
- bool allow_sync,
- bool anyvisible,
- bool progress,
- BlockNumber start_blockno,
- BlockNumber numblocks,
- IndexBuildCallback callback,
- void *callback_state,
- TableScanDesc scan);
- /* see table_index_validate_scan for reference about parameters */
- void (*index_validate_scan) (Relation table_rel,
- Relation index_rel,
- struct IndexInfo *index_info,
- Snapshot snapshot,
- struct ValidateIndexState *state);
- /* ------------------------------------------------------------------------
- * Miscellaneous functions.
- * ------------------------------------------------------------------------
- */
- /*
- * See table_relation_size().
- *
- * Note that currently a few callers use the MAIN_FORKNUM size to figure
- * out the range of potentially interesting blocks (brin, analyze). It's
- * probable that we'll need to revise the interface for those at some
- * point.
- */
- uint64 (*relation_size) (Relation rel, ForkNumber forkNumber);
- /*
- * This callback should return true if the relation requires a TOAST table
- * and false if it does not. It may wish to examine the relation's tuple
- * descriptor before making a decision, but if it uses some other method
- * of storing large values (or if it does not support them) it can simply
- * return false.
- */
- bool (*relation_needs_toast_table) (Relation rel);
- /*
- * This callback should return the OID of the table AM that implements
- * TOAST tables for this AM. If the relation_needs_toast_table callback
- * always returns false, this callback is not required.
- */
- Oid (*relation_toast_am) (Relation rel);
- /*
- * This callback is invoked when detoasting a value stored in a toast
- * table implemented by this AM. See table_relation_fetch_toast_slice()
- * for more details.
- */
- void (*relation_fetch_toast_slice) (Relation toastrel, Oid valueid,
- int32 attrsize,
- int32 sliceoffset,
- int32 slicelength,
- struct varlena *result);
- /* ------------------------------------------------------------------------
- * Planner related functions.
- * ------------------------------------------------------------------------
- */
- /*
- * See table_relation_estimate_size().
- *
- * While block oriented, it shouldn't be too hard for an AM that doesn't
- * internally use blocks to convert into a usable representation.
- *
- * This differs from the relation_size callback by returning size
- * estimates (both relation size and tuple count) for planning purposes,
- * rather than returning a currently correct estimate.
- */
- void (*relation_estimate_size) (Relation rel, int32 *attr_widths,
- BlockNumber *pages, double *tuples,
- double *allvisfrac);
- /* ------------------------------------------------------------------------
- * Executor related functions.
- * ------------------------------------------------------------------------
- */
- /*
- * Prepare to fetch / check / return tuples from `tbmres->blockno` as part
- * of a bitmap table scan. `scan` was started via table_beginscan_bm().
- * Return false if there are no tuples to be found on the page, true
- * otherwise.
- *
- * This will typically read and pin the target block, and do the necessary
- * work to allow scan_bitmap_next_tuple() to return tuples (e.g. it might
- * make sense to perform tuple visibility checks at this time). For some
- * AMs it will make more sense to do all the work referencing `tbmres`
- * contents here, for others it might be better to defer more work to
- * scan_bitmap_next_tuple.
- *
- * If `tbmres->blockno` is -1, this is a lossy scan and all visible tuples
- * on the page have to be returned, otherwise the tuples at offsets in
- * `tbmres->offsets` need to be returned.
- *
- * XXX: Currently this may only be implemented if the AM uses md.c as its
- * storage manager, and uses ItemPointer->ip_blkid in a manner that maps
- * blockids directly to the underlying storage. nodeBitmapHeapscan.c
- * performs prefetching directly using that interface. This probably
- * needs to be rectified at a later point.
- *
- * XXX: Currently this may only be implemented if the AM uses the
- * visibilitymap, as nodeBitmapHeapscan.c unconditionally accesses it to
- * perform prefetching. This probably needs to be rectified at a later
- * point.
- *
- * Optional callback, but either both scan_bitmap_next_block and
- * scan_bitmap_next_tuple need to exist, or neither.
- */
- bool (*scan_bitmap_next_block) (TableScanDesc scan,
- struct TBMIterateResult *tbmres);
- /*
- * Fetch the next tuple of a bitmap table scan into `slot` and return true
- * if a visible tuple was found, false otherwise.
- *
- * For some AMs it will make more sense to do all the work referencing
- * `tbmres` contents in scan_bitmap_next_block, for others it might be
- * better to defer more work to this callback.
- *
- * Optional callback, but either both scan_bitmap_next_block and
- * scan_bitmap_next_tuple need to exist, or neither.
- */
- bool (*scan_bitmap_next_tuple) (TableScanDesc scan,
- struct TBMIterateResult *tbmres,
- TupleTableSlot *slot);
- /*
- * Prepare to fetch tuples from the next block in a sample scan. Return
- * false if the sample scan is finished, true otherwise. `scan` was
- * started via table_beginscan_sampling().
- *
- * Typically this will first determine the target block by calling the
- * TsmRoutine's NextSampleBlock() callback if not NULL, or alternatively
- * perform a sequential scan over all blocks. The determined block is
- * then typically read and pinned.
- *
- * As the TsmRoutine interface is block based, a block needs to be passed
- * to NextSampleBlock(). If that's not appropriate for an AM, it
- * internally needs to perform mapping between the internal and a block
- * based representation.
- *
- * Note that it's not acceptable to hold deadlock prone resources such as
- * lwlocks until scan_sample_next_tuple() has exhausted the tuples on the
- * block - the tuple is likely to be returned to an upper query node, and
- * the next call could be off a long while. Holding buffer pins and such
- * is obviously OK.
- *
- * Currently it is required to implement this interface, as there's no
- * alternative way (contrary e.g. to bitmap scans) to implement sample
- * scans. If infeasible to implement, the AM may raise an error.
- */
- bool (*scan_sample_next_block) (TableScanDesc scan,
- struct SampleScanState *scanstate);
- /*
- * This callback, only called after scan_sample_next_block has returned
- * true, should determine the next tuple to be returned from the selected
- * block using the TsmRoutine's NextSampleTuple() callback.
- *
- * The callback needs to perform visibility checks, and only return
- * visible tuples. That obviously can mean calling NextSampleTuple()
- * multiple times.
- *
- * The TsmRoutine interface assumes that there's a maximum offset on a
- * given page, so if that doesn't apply to an AM, it needs to emulate that
- * assumption somehow.
- */
- bool (*scan_sample_next_tuple) (TableScanDesc scan,
- struct SampleScanState *scanstate,
- TupleTableSlot *slot);
- } TableAmRoutine;
- /* ----------------------------------------------------------------------------
- * Slot functions.
- * ----------------------------------------------------------------------------
- */
- /*
- * Returns slot callbacks suitable for holding tuples of the appropriate type
- * for the relation. Works for tables, views, foreign tables and partitioned
- * tables.
- */
- extern const TupleTableSlotOps *table_slot_callbacks(Relation rel);
- /*
- * Returns slot using the callbacks returned by table_slot_callbacks(), and
- * registers it on *reglist.
- */
- extern TupleTableSlot *table_slot_create(Relation rel, List **reglist);
- /* ----------------------------------------------------------------------------
- * Table scan functions.
- * ----------------------------------------------------------------------------
- */
- /*
- * Start a scan of `rel`. Returned tuples pass a visibility test of
- * `snapshot`, and if nkeys != 0, the results are filtered by those scan keys.
- */
- static inline TableScanDesc
- table_beginscan(Relation rel, Snapshot snapshot,
- int nkeys, struct ScanKeyData *key)
- {
- uint32 flags = SO_TYPE_SEQSCAN |
- SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
- return rel->rd_tableam->scan_begin(rel, snapshot, nkeys, key, NULL, flags);
- }
- /*
- * Like table_beginscan(), but for scanning catalog. It'll automatically use a
- * snapshot appropriate for scanning catalog relations.
- */
- extern TableScanDesc table_beginscan_catalog(Relation rel, int nkeys,
- struct ScanKeyData *key);
- /*
- * Like table_beginscan(), but table_beginscan_strat() offers an extended API
- * that lets the caller control whether a nondefault buffer access strategy
- * can be used, and whether syncscan can be chosen (possibly resulting in the
- * scan not starting from block zero). Both of these default to true with
- * plain table_beginscan.
- */
- static inline TableScanDesc
- table_beginscan_strat(Relation rel, Snapshot snapshot,
- int nkeys, struct ScanKeyData *key,
- bool allow_strat, bool allow_sync)
- {
- uint32 flags = SO_TYPE_SEQSCAN | SO_ALLOW_PAGEMODE;
- if (allow_strat)
- flags |= SO_ALLOW_STRAT;
- if (allow_sync)
- flags |= SO_ALLOW_SYNC;
- return rel->rd_tableam->scan_begin(rel, snapshot, nkeys, key, NULL, flags);
- }
- /*
- * table_beginscan_bm is an alternative entry point for setting up a
- * TableScanDesc for a bitmap heap scan. Although that scan technology is
- * really quite unlike a standard seqscan, there is just enough commonality to
- * make it worth using the same data structure.
- */
- static inline TableScanDesc
- table_beginscan_bm(Relation rel, Snapshot snapshot,
- int nkeys, struct ScanKeyData *key)
- {
- uint32 flags = SO_TYPE_BITMAPSCAN | SO_ALLOW_PAGEMODE;
- return rel->rd_tableam->scan_begin(rel, snapshot, nkeys, key, NULL, flags);
- }
- /*
- * table_beginscan_sampling is an alternative entry point for setting up a
- * TableScanDesc for a TABLESAMPLE scan. As with bitmap scans, it's worth
- * using the same data structure although the behavior is rather different.
- * In addition to the options offered by table_beginscan_strat, this call
- * also allows control of whether page-mode visibility checking is used.
- */
- static inline TableScanDesc
- table_beginscan_sampling(Relation rel, Snapshot snapshot,
- int nkeys, struct ScanKeyData *key,
- bool allow_strat, bool allow_sync,
- bool allow_pagemode)
- {
- uint32 flags = SO_TYPE_SAMPLESCAN;
- if (allow_strat)
- flags |= SO_ALLOW_STRAT;
- if (allow_sync)
- flags |= SO_ALLOW_SYNC;
- if (allow_pagemode)
- flags |= SO_ALLOW_PAGEMODE;
- return rel->rd_tableam->scan_begin(rel, snapshot, nkeys, key, NULL, flags);
- }
- /*
- * table_beginscan_tid is an alternative entry point for setting up a
- * TableScanDesc for a Tid scan. As with bitmap scans, it's worth using
- * the same data structure although the behavior is rather different.
- */
- static inline TableScanDesc
- table_beginscan_tid(Relation rel, Snapshot snapshot)
- {
- uint32 flags = SO_TYPE_TIDSCAN;
- return rel->rd_tableam->scan_begin(rel, snapshot, 0, NULL, NULL, flags);
- }
- /*
- * table_beginscan_analyze is an alternative entry point for setting up a
- * TableScanDesc for an ANALYZE scan. As with bitmap scans, it's worth using
- * the same data structure although the behavior is rather different.
- */
- static inline TableScanDesc
- table_beginscan_analyze(Relation rel)
- {
- uint32 flags = SO_TYPE_ANALYZE;
- return rel->rd_tableam->scan_begin(rel, NULL, 0, NULL, NULL, flags);
- }
- /*
- * End relation scan.
- */
- static inline void
- table_endscan(TableScanDesc scan)
- {
- scan->rs_rd->rd_tableam->scan_end(scan);
- }
- /*
- * Restart a relation scan.
- */
- static inline void
- table_rescan(TableScanDesc scan,
- struct ScanKeyData *key)
- {
- scan->rs_rd->rd_tableam->scan_rescan(scan, key, false, false, false, false);
- }
- /*
- * Restart a relation scan after changing params.
- *
- * This call allows changing the buffer strategy, syncscan, and pagemode
- * options before starting a fresh scan. Note that although the actual use of
- * syncscan might change (effectively, enabling or disabling reporting), the
- * previously selected startblock will be kept.
- */
- static inline void
- table_rescan_set_params(TableScanDesc scan, struct ScanKeyData *key,
- bool allow_strat, bool allow_sync, bool allow_pagemode)
- {
- scan->rs_rd->rd_tableam->scan_rescan(scan, key, true,
- allow_strat, allow_sync,
- allow_pagemode);
- }
- /*
- * Update snapshot used by the scan.
- */
- extern void table_scan_update_snapshot(TableScanDesc scan, Snapshot snapshot);
- /*
- * Return next tuple from `scan`, store in slot.
- */
- static inline bool
- table_scan_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
- {
- slot->tts_tableOid = RelationGetRelid(sscan->rs_rd);
- /*
- * We don't expect direct calls to table_scan_getnextslot with valid
- * CheckXidAlive for catalog or regular tables. See detailed comments in
- * xact.c where these variables are declared.
- */
- if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan))
- elog(ERROR, "unexpected table_scan_getnextslot call during logical decoding");
- return sscan->rs_rd->rd_tableam->scan_getnextslot(sscan, direction, slot);
- }
- /* ----------------------------------------------------------------------------
- * TID Range scanning related functions.
- * ----------------------------------------------------------------------------
- */
- /*
- * table_beginscan_tidrange is the entry point for setting up a TableScanDesc
- * for a TID range scan.
- */
- static inline TableScanDesc
- table_beginscan_tidrange(Relation rel, Snapshot snapshot,
- ItemPointer mintid,
- ItemPointer maxtid)
- {
- TableScanDesc sscan;
- uint32 flags = SO_TYPE_TIDRANGESCAN | SO_ALLOW_PAGEMODE;
- sscan = rel->rd_tableam->scan_begin(rel, snapshot, 0, NULL, NULL, flags);
- /* Set the range of TIDs to scan */
- sscan->rs_rd->rd_tableam->scan_set_tidrange(sscan, mintid, maxtid);
- return sscan;
- }
- /*
- * table_rescan_tidrange resets the scan position and sets the minimum and
- * maximum TID range to scan for a TableScanDesc created by
- * table_beginscan_tidrange.
- */
- static inline void
- table_rescan_tidrange(TableScanDesc sscan, ItemPointer mintid,
- ItemPointer maxtid)
- {
- /* Ensure table_beginscan_tidrange() was used. */
- Assert((sscan->rs_flags & SO_TYPE_TIDRANGESCAN) != 0);
- sscan->rs_rd->rd_tableam->scan_rescan(sscan, NULL, false, false, false, false);
- sscan->rs_rd->rd_tableam->scan_set_tidrange(sscan, mintid, maxtid);
- }
- /*
- * Fetch the next tuple from `sscan` for a TID range scan created by
- * table_beginscan_tidrange(). Stores the tuple in `slot` and returns true,
- * or returns false if no more tuples exist in the range.
- */
- static inline bool
- table_scan_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction,
- TupleTableSlot *slot)
- {
- /* Ensure table_beginscan_tidrange() was used. */
- Assert((sscan->rs_flags & SO_TYPE_TIDRANGESCAN) != 0);
- return sscan->rs_rd->rd_tableam->scan_getnextslot_tidrange(sscan,
- direction,
- slot);
- }
- /* ----------------------------------------------------------------------------
- * Parallel table scan related functions.
- * ----------------------------------------------------------------------------
- */
- /*
- * Estimate the size of shared memory needed for a parallel scan of this
- * relation.
- */
- extern Size table_parallelscan_estimate(Relation rel, Snapshot snapshot);
- /*
- * Initialize ParallelTableScanDesc for a parallel scan of this
- * relation. `pscan` needs to be sized according to parallelscan_estimate()
- * for the same relation. Call this just once in the leader process; then,
- * individual workers attach via table_beginscan_parallel.
- */
- extern void table_parallelscan_initialize(Relation rel,
- ParallelTableScanDesc pscan,
- Snapshot snapshot);
- /*
- * Begin a parallel scan. `pscan` needs to have been initialized with
- * table_parallelscan_initialize(), for the same relation. The initialization
- * does not need to have happened in this backend.
- *
- * Caller must hold a suitable lock on the relation.
- */
- extern TableScanDesc table_beginscan_parallel(Relation rel,
- ParallelTableScanDesc pscan);
- /*
- * Restart a parallel scan. Call this in the leader process. Caller is
- * responsible for making sure that all workers have finished the scan
- * beforehand.
- */
- static inline void
- table_parallelscan_reinitialize(Relation rel, ParallelTableScanDesc pscan)
- {
- rel->rd_tableam->parallelscan_reinitialize(rel, pscan);
- }
- /* ----------------------------------------------------------------------------
- * Index scan related functions.
- * ----------------------------------------------------------------------------
- */
- /*
- * Prepare to fetch tuples from the relation, as needed when fetching tuples
- * for an index scan.
- *
- * Tuples for an index scan can then be fetched via table_index_fetch_tuple().
- */
- static inline IndexFetchTableData *
- table_index_fetch_begin(Relation rel)
- {
- return rel->rd_tableam->index_fetch_begin(rel);
- }
- /*
- * Reset index fetch. Typically this will release cross index fetch resources
- * held in IndexFetchTableData.
- */
- static inline void
- table_index_fetch_reset(struct IndexFetchTableData *scan)
- {
- scan->rel->rd_tableam->index_fetch_reset(scan);
- }
- /*
- * Release resources and deallocate index fetch.
- */
- static inline void
- table_index_fetch_end(struct IndexFetchTableData *scan)
- {
- scan->rel->rd_tableam->index_fetch_end(scan);
- }
- /*
- * Fetches, as part of an index scan, tuple at `tid` into `slot`, after doing
- * a visibility test according to `snapshot`. If a tuple was found and passed
- * the visibility test, returns true, false otherwise. Note that *tid may be
- * modified when we return true (see later remarks on multiple row versions
- * reachable via a single index entry).
- *
- * *call_again needs to be false on the first call to table_index_fetch_tuple() for
- * a tid. If there potentially is another tuple matching the tid, *call_again
- * will be set to true, signaling that table_index_fetch_tuple() should be called
- * again for the same tid.
- *
- * *all_dead, if all_dead is not NULL, will be set to true by
- * table_index_fetch_tuple() iff it is guaranteed that no backend needs to see
- * that tuple. Index AMs can use that to avoid returning that tid in future
- * searches.
- *
- * The difference between this function and table_tuple_fetch_row_version()
- * is that this function returns the currently visible version of a row if
- * the AM supports storing multiple row versions reachable via a single index
- * entry (like heap's HOT). Whereas table_tuple_fetch_row_version() only
- * evaluates the tuple exactly at `tid`. Outside of index entry ->table tuple
- * lookups, table_tuple_fetch_row_version() is what's usually needed.
- */
- static inline bool
- table_index_fetch_tuple(struct IndexFetchTableData *scan,
- ItemPointer tid,
- Snapshot snapshot,
- TupleTableSlot *slot,
- bool *call_again, bool *all_dead)
- {
- /*
- * We don't expect direct calls to table_index_fetch_tuple with valid
- * CheckXidAlive for catalog or regular tables. See detailed comments in
- * xact.c where these variables are declared.
- */
- if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan))
- elog(ERROR, "unexpected table_index_fetch_tuple call during logical decoding");
- return scan->rel->rd_tableam->index_fetch_tuple(scan, tid, snapshot,
- slot, call_again,
- all_dead);
- }
- /*
- * This is a convenience wrapper around table_index_fetch_tuple() which
- * returns whether there are table tuple items corresponding to an index
- * entry. This likely is only useful to verify if there's a conflict in a
- * unique index.
- */
- extern bool table_index_fetch_tuple_check(Relation rel,
- ItemPointer tid,
- Snapshot snapshot,
- bool *all_dead);
- /* ------------------------------------------------------------------------
- * Functions for non-modifying operations on individual tuples
- * ------------------------------------------------------------------------
- */
- /*
- * Fetch tuple at `tid` into `slot`, after doing a visibility test according to
- * `snapshot`. If a tuple was found and passed the visibility test, returns
- * true, false otherwise.
- *
- * See table_index_fetch_tuple's comment about what the difference between
- * these functions is. It is correct to use this function outside of index
- * entry->table tuple lookups.
- */
- static inline bool
- table_tuple_fetch_row_version(Relation rel,
- ItemPointer tid,
- Snapshot snapshot,
- TupleTableSlot *slot)
- {
- /*
- * We don't expect direct calls to table_tuple_fetch_row_version with
- * valid CheckXidAlive for catalog or regular tables. See detailed
- * comments in xact.c where these variables are declared.
- */
- if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan))
- elog(ERROR, "unexpected table_tuple_fetch_row_version call during logical decoding");
- return rel->rd_tableam->tuple_fetch_row_version(rel, tid, snapshot, slot);
- }
- /*
- * Verify that `tid` is a potentially valid tuple identifier. That doesn't
- * mean that the pointed to row needs to exist or be visible, but that
- * attempting to fetch the row (e.g. with table_tuple_get_latest_tid() or
- * table_tuple_fetch_row_version()) should not error out if called with that
- * tid.
- *
- * `scan` needs to have been started via table_beginscan().
- */
- static inline bool
- table_tuple_tid_valid(TableScanDesc scan, ItemPointer tid)
- {
- return scan->rs_rd->rd_tableam->tuple_tid_valid(scan, tid);
- }
- /*
- * Return the latest version of the tuple at `tid`, by updating `tid` to
- * point at the newest version.
- */
- extern void table_tuple_get_latest_tid(TableScanDesc scan, ItemPointer tid);
- /*
- * Return true iff tuple in slot satisfies the snapshot.
- *
- * This assumes the slot's tuple is valid, and of the appropriate type for the
- * AM.
- *
- * Some AMs might modify the data underlying the tuple as a side-effect. If so
- * they ought to mark the relevant buffer dirty.
- */
- static inline bool
- table_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot,
- Snapshot snapshot)
- {
- return rel->rd_tableam->tuple_satisfies_snapshot(rel, slot, snapshot);
- }
- /*
- * Determine which index tuples are safe to delete based on their table TID.
- *
- * Determines which entries from index AM caller's TM_IndexDeleteOp state
- * point to vacuumable table tuples. Entries that are found by tableam to be
- * vacuumable are naturally safe for index AM to delete, and so get directly
- * marked as deletable. See comments above TM_IndexDelete and comments above
- * TM_IndexDeleteOp for full details.
- *
- * Returns a latestRemovedXid transaction ID that caller generally places in
- * its index deletion WAL record. This might be used during subsequent REDO
- * of the WAL record when in Hot Standby mode -- a recovery conflict for the
- * index deletion operation might be required on the standby.
- */
- static inline TransactionId
- table_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate)
- {
- return rel->rd_tableam->index_delete_tuples(rel, delstate);
- }
- /* ----------------------------------------------------------------------------
- * Functions for manipulations of physical tuples.
- * ----------------------------------------------------------------------------
- */
- /*
- * Insert a tuple from a slot into table AM routine.
- *
- * The options bitmask allows the caller to specify options that may change the
- * behaviour of the AM. The AM will ignore options that it does not support.
- *
- * If the TABLE_INSERT_SKIP_FSM option is specified, AMs are free to not reuse
- * free space in the relation. This can save some cycles when we know the
- * relation is new and doesn't contain useful amounts of free space.
- * TABLE_INSERT_SKIP_FSM is commonly passed directly to
- * RelationGetBufferForTuple. See that method for more information.
- *
- * TABLE_INSERT_FROZEN should only be specified for inserts into
- * relfilenodes created during the current subtransaction and when
- * there are no prior snapshots or pre-existing portals open.
- * This causes rows to be frozen, which is an MVCC violation and
- * requires explicit options chosen by user.
- *
- * TABLE_INSERT_NO_LOGICAL force-disables the emitting of logical decoding
- * information for the tuple. This should solely be used during table rewrites
- * where RelationIsLogicallyLogged(relation) is not yet accurate for the new
- * relation.
- *
- * Note that most of these options will be applied when inserting into the
- * heap's TOAST table, too, if the tuple requires any out-of-line data.
- *
- * The BulkInsertState object (if any; bistate can be NULL for default
- * behavior) is also just passed through to RelationGetBufferForTuple. If
- * `bistate` is provided, table_finish_bulk_insert() needs to be called.
- *
- * On return the slot's tts_tid and tts_tableOid are updated to reflect the
- * insertion. But note that any toasting of fields within the slot is NOT
- * reflected in the slots contents.
- */
- static inline void
- table_tuple_insert(Relation rel, TupleTableSlot *slot, CommandId cid,
- int options, struct BulkInsertStateData *bistate)
- {
- rel->rd_tableam->tuple_insert(rel, slot, cid, options,
- bistate);
- }
- /*
- * Perform a "speculative insertion". These can be backed out afterwards
- * without aborting the whole transaction. Other sessions can wait for the
- * speculative insertion to be confirmed, turning it into a regular tuple, or
- * aborted, as if it never existed. Speculatively inserted tuples behave as
- * "value locks" of short duration, used to implement INSERT .. ON CONFLICT.
- *
- * A transaction having performed a speculative insertion has to either abort,
- * or finish the speculative insertion with
- * table_tuple_complete_speculative(succeeded = ...).
- */
- static inline void
- table_tuple_insert_speculative(Relation rel, TupleTableSlot *slot,
- CommandId cid, int options,
- struct BulkInsertStateData *bistate,
- uint32 specToken)
- {
- rel->rd_tableam->tuple_insert_speculative(rel, slot, cid, options,
- bistate, specToken);
- }
- /*
- * Complete "speculative insertion" started in the same transaction. If
- * succeeded is true, the tuple is fully inserted, if false, it's removed.
- */
- static inline void
- table_tuple_complete_speculative(Relation rel, TupleTableSlot *slot,
- uint32 specToken, bool succeeded)
- {
- rel->rd_tableam->tuple_complete_speculative(rel, slot, specToken,
- succeeded);
- }
- /*
- * Insert multiple tuples into a table.
- *
- * This is like table_tuple_insert(), but inserts multiple tuples in one
- * operation. That's often faster than calling table_tuple_insert() in a loop,
- * because e.g. the AM can reduce WAL logging and page locking overhead.
- *
- * Except for taking `nslots` tuples as input, and an array of TupleTableSlots
- * in `slots`, the parameters for table_multi_insert() are the same as for
- * table_tuple_insert().
- *
- * Note: this leaks memory into the current memory context. You can create a
- * temporary context before calling this, if that's a problem.
- */
- static inline void
- table_multi_insert(Relation rel, TupleTableSlot **slots, int nslots,
- CommandId cid, int options, struct BulkInsertStateData *bistate)
- {
- rel->rd_tableam->multi_insert(rel, slots, nslots,
- cid, options, bistate);
- }
- /*
- * Delete a tuple.
- *
- * NB: do not call this directly unless prepared to deal with
- * concurrent-update conditions. Use simple_table_tuple_delete instead.
- *
- * Input parameters:
- * relation - table to be modified (caller must hold suitable lock)
- * tid - TID of tuple to be deleted
- * cid - delete command ID (used for visibility test, and stored into
- * cmax if successful)
- * crosscheck - if not InvalidSnapshot, also check tuple against this
- * wait - true if should wait for any conflicting update to commit/abort
- * Output parameters:
- * tmfd - filled in failure cases (see below)
- * changingPart - true iff the tuple is being moved to another partition
- * table due to an update of the partition key. Otherwise, false.
- *
- * Normal, successful return value is TM_Ok, which means we did actually
- * delete it. Failure return codes are TM_SelfModified, TM_Updated, and
- * TM_BeingModified (the last only possible if wait == false).
- *
- * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
- * t_xmax, and, if possible, and, if possible, t_cmax. See comments for
- * struct TM_FailureData for additional info.
- */
- static inline TM_Result
- table_tuple_delete(Relation rel, ItemPointer tid, CommandId cid,
- Snapshot snapshot, Snapshot crosscheck, bool wait,
- TM_FailureData *tmfd, bool changingPart)
- {
- return rel->rd_tableam->tuple_delete(rel, tid, cid,
- snapshot, crosscheck,
- wait, tmfd, changingPart);
- }
- /*
- * Update a tuple.
- *
- * NB: do not call this directly unless you are prepared to deal with
- * concurrent-update conditions. Use simple_table_tuple_update instead.
- *
- * Input parameters:
- * relation - table to be modified (caller must hold suitable lock)
- * otid - TID of old tuple to be replaced
- * slot - newly constructed tuple data to store
- * cid - update command ID (used for visibility test, and stored into
- * cmax/cmin if successful)
- * crosscheck - if not InvalidSnapshot, also check old tuple against this
- * wait - true if should wait for any conflicting update to commit/abort
- * Output parameters:
- * tmfd - filled in failure cases (see below)
- * lockmode - filled with lock mode acquired on tuple
- * update_indexes - in success cases this is set to true if new index entries
- * are required for this tuple
- *
- * Normal, successful return value is TM_Ok, which means we did actually
- * update it. Failure return codes are TM_SelfModified, TM_Updated, and
- * TM_BeingModified (the last only possible if wait == false).
- *
- * On success, the slot's tts_tid and tts_tableOid are updated to match the new
- * stored tuple; in particular, slot->tts_tid is set to the TID where the
- * new tuple was inserted, and its HEAP_ONLY_TUPLE flag is set iff a HOT
- * update was done. However, any TOAST changes in the new tuple's
- * data are not reflected into *newtup.
- *
- * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
- * t_xmax, and, if possible, t_cmax. See comments for struct TM_FailureData
- * for additional info.
- */
- static inline TM_Result
- table_tuple_update(Relation rel, ItemPointer otid, TupleTableSlot *slot,
- CommandId cid, Snapshot snapshot, Snapshot crosscheck,
- bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode,
- bool *update_indexes)
- {
- return rel->rd_tableam->tuple_update(rel, otid, slot,
- cid, snapshot, crosscheck,
- wait, tmfd,
- lockmode, update_indexes);
- }
- /*
- * Lock a tuple in the specified mode.
- *
- * Input parameters:
- * relation: relation containing tuple (caller must hold suitable lock)
- * tid: TID of tuple to lock
- * snapshot: snapshot to use for visibility determinations
- * cid: current command ID (used for visibility test, and stored into
- * tuple's cmax if lock is successful)
- * mode: lock mode desired
- * wait_policy: what to do if tuple lock is not available
- * flags:
- * If TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS, follow the update chain to
- * also lock descendant tuples if lock modes don't conflict.
- * If TUPLE_LOCK_FLAG_FIND_LAST_VERSION, follow the update chain and lock
- * latest version.
- *
- * Output parameters:
- * *slot: contains the target tuple
- * *tmfd: filled in failure cases (see below)
- *
- * Function result may be:
- * TM_Ok: lock was successfully acquired
- * TM_Invisible: lock failed because tuple was never visible to us
- * TM_SelfModified: lock failed because tuple updated by self
- * TM_Updated: lock failed because tuple updated by other xact
- * TM_Deleted: lock failed because tuple deleted by other xact
- * TM_WouldBlock: lock couldn't be acquired and wait_policy is skip
- *
- * In the failure cases other than TM_Invisible and TM_Deleted, the routine
- * fills *tmfd with the tuple's t_ctid, t_xmax, and, if possible, t_cmax. See
- * comments for struct TM_FailureData for additional info.
- */
- static inline TM_Result
- table_tuple_lock(Relation rel, ItemPointer tid, Snapshot snapshot,
- TupleTableSlot *slot, CommandId cid, LockTupleMode mode,
- LockWaitPolicy wait_policy, uint8 flags,
- TM_FailureData *tmfd)
- {
- return rel->rd_tableam->tuple_lock(rel, tid, snapshot, slot,
- cid, mode, wait_policy,
- flags, tmfd);
- }
- /*
- * Perform operations necessary to complete insertions made via
- * tuple_insert and multi_insert with a BulkInsertState specified.
- */
- static inline void
- table_finish_bulk_insert(Relation rel, int options)
- {
- /* optional callback */
- if (rel->rd_tableam && rel->rd_tableam->finish_bulk_insert)
- rel->rd_tableam->finish_bulk_insert(rel, options);
- }
- /* ------------------------------------------------------------------------
- * DDL related functionality.
- * ------------------------------------------------------------------------
- */
- /*
- * Create storage for `rel` in `newrnode`, with persistence set to
- * `persistence`.
- *
- * This is used both during relation creation and various DDL operations to
- * create a new relfilenode that can be filled from scratch. When creating
- * new storage for an existing relfilenode, this should be called before the
- * relcache entry has been updated.
- *
- * *freezeXid, *minmulti are set to the xid / multixact horizon for the table
- * that pg_class.{relfrozenxid, relminmxid} have to be set to.
- */
- static inline void
- table_relation_set_new_filenode(Relation rel,
- const RelFileNode *newrnode,
- char persistence,
- TransactionId *freezeXid,
- MultiXactId *minmulti)
- {
- rel->rd_tableam->relation_set_new_filenode(rel, newrnode, persistence,
- freezeXid, minmulti);
- }
- /*
- * Remove all table contents from `rel`, in a non-transactional manner.
- * Non-transactional meaning that there's no need to support rollbacks. This
- * commonly only is used to perform truncations for relfilenodes created in the
- * current transaction.
- */
- static inline void
- table_relation_nontransactional_truncate(Relation rel)
- {
- rel->rd_tableam->relation_nontransactional_truncate(rel);
- }
- /*
- * Copy data from `rel` into the new relfilenode `newrnode`. The new
- * relfilenode may not have storage associated before this function is
- * called. This is only supposed to be used for low level operations like
- * changing a relation's tablespace.
- */
- static inline void
- table_relation_copy_data(Relation rel, const RelFileNode *newrnode)
- {
- rel->rd_tableam->relation_copy_data(rel, newrnode);
- }
- /*
- * Copy data from `OldTable` into `NewTable`, as part of a CLUSTER or VACUUM
- * FULL.
- *
- * Additional Input parameters:
- * - use_sort - if true, the table contents are sorted appropriate for
- * `OldIndex`; if false and OldIndex is not InvalidOid, the data is copied
- * in that index's order; if false and OldIndex is InvalidOid, no sorting is
- * performed
- * - OldIndex - see use_sort
- * - OldestXmin - computed by vacuum_set_xid_limits(), even when
- * not needed for the relation's AM
- * - *xid_cutoff - ditto
- * - *multi_cutoff - ditto
- *
- * Output parameters:
- * - *xid_cutoff - rel's new relfrozenxid value, may be invalid
- * - *multi_cutoff - rel's new relminmxid value, may be invalid
- * - *tups_vacuumed - stats, for logging, if appropriate for AM
- * - *tups_recently_dead - stats, for logging, if appropriate for AM
- */
- static inline void
- table_relation_copy_for_cluster(Relation OldTable, Relation NewTable,
- Relation OldIndex,
- bool use_sort,
- TransactionId OldestXmin,
- TransactionId *xid_cutoff,
- MultiXactId *multi_cutoff,
- double *num_tuples,
- double *tups_vacuumed,
- double *tups_recently_dead)
- {
- OldTable->rd_tableam->relation_copy_for_cluster(OldTable, NewTable, OldIndex,
- use_sort, OldestXmin,
- xid_cutoff, multi_cutoff,
- num_tuples, tups_vacuumed,
- tups_recently_dead);
- }
- /*
- * Perform VACUUM on the relation. The VACUUM can be triggered by a user or by
- * autovacuum. The specific actions performed by the AM will depend heavily on
- * the individual AM.
- *
- * On entry a transaction needs to already been established, and the
- * table is locked with a ShareUpdateExclusive lock.
- *
- * Note that neither VACUUM FULL (and CLUSTER), nor ANALYZE go through this
- * routine, even if (for ANALYZE) it is part of the same VACUUM command.
- */
- static inline void
- table_relation_vacuum(Relation rel, struct VacuumParams *params,
- BufferAccessStrategy bstrategy)
- {
- rel->rd_tableam->relation_vacuum(rel, params, bstrategy);
- }
- /*
- * Prepare to analyze block `blockno` of `scan`. The scan needs to have been
- * started with table_beginscan_analyze(). Note that this routine might
- * acquire resources like locks that are held until
- * table_scan_analyze_next_tuple() returns false.
- *
- * Returns false if block is unsuitable for sampling, true otherwise.
- */
- static inline bool
- table_scan_analyze_next_block(TableScanDesc scan, BlockNumber blockno,
- BufferAccessStrategy bstrategy)
- {
- return scan->rs_rd->rd_tableam->scan_analyze_next_block(scan, blockno,
- bstrategy);
- }
- /*
- * Iterate over tuples in the block selected with
- * table_scan_analyze_next_block() (which needs to have returned true, and
- * this routine may not have returned false for the same block before). If a
- * tuple that's suitable for sampling is found, true is returned and a tuple
- * is stored in `slot`.
- *
- * *liverows and *deadrows are incremented according to the encountered
- * tuples.
- */
- static inline bool
- table_scan_analyze_next_tuple(TableScanDesc scan, TransactionId OldestXmin,
- double *liverows, double *deadrows,
- TupleTableSlot *slot)
- {
- return scan->rs_rd->rd_tableam->scan_analyze_next_tuple(scan, OldestXmin,
- liverows, deadrows,
- slot);
- }
- /*
- * table_index_build_scan - scan the table to find tuples to be indexed
- *
- * This is called back from an access-method-specific index build procedure
- * after the AM has done whatever setup it needs. The parent table relation
- * is scanned to find tuples that should be entered into the index. Each
- * such tuple is passed to the AM's callback routine, which does the right
- * things to add it to the new index. After we return, the AM's index
- * build procedure does whatever cleanup it needs.
- *
- * The total count of live tuples is returned. This is for updating pg_class
- * statistics. (It's annoying not to be able to do that here, but we want to
- * merge that update with others; see index_update_stats.) Note that the
- * index AM itself must keep track of the number of index tuples; we don't do
- * so here because the AM might reject some of the tuples for its own reasons,
- * such as being unable to store NULLs.
- *
- * If 'progress', the PROGRESS_SCAN_BLOCKS_TOTAL counter is updated when
- * starting the scan, and PROGRESS_SCAN_BLOCKS_DONE is updated as we go along.
- *
- * A side effect is to set indexInfo->ii_BrokenHotChain to true if we detect
- * any potentially broken HOT chains. Currently, we set this if there are any
- * RECENTLY_DEAD or DELETE_IN_PROGRESS entries in a HOT chain, without trying
- * very hard to detect whether they're really incompatible with the chain tip.
- * This only really makes sense for heap AM, it might need to be generalized
- * for other AMs later.
- */
- static inline double
- table_index_build_scan(Relation table_rel,
- Relation index_rel,
- struct IndexInfo *index_info,
- bool allow_sync,
- bool progress,
- IndexBuildCallback callback,
- void *callback_state,
- TableScanDesc scan)
- {
- return table_rel->rd_tableam->index_build_range_scan(table_rel,
- index_rel,
- index_info,
- allow_sync,
- false,
- progress,
- 0,
- InvalidBlockNumber,
- callback,
- callback_state,
- scan);
- }
- /*
- * As table_index_build_scan(), except that instead of scanning the complete
- * table, only the given number of blocks are scanned. Scan to end-of-rel can
- * be signaled by passing InvalidBlockNumber as numblocks. Note that
- * restricting the range to scan cannot be done when requesting syncscan.
- *
- * When "anyvisible" mode is requested, all tuples visible to any transaction
- * are indexed and counted as live, including those inserted or deleted by
- * transactions that are still in progress.
- */
- static inline double
- table_index_build_range_scan(Relation table_rel,
- Relation index_rel,
- struct IndexInfo *index_info,
- bool allow_sync,
- bool anyvisible,
- bool progress,
- BlockNumber start_blockno,
- BlockNumber numblocks,
- IndexBuildCallback callback,
- void *callback_state,
- TableScanDesc scan)
- {
- return table_rel->rd_tableam->index_build_range_scan(table_rel,
- index_rel,
- index_info,
- allow_sync,
- anyvisible,
- progress,
- start_blockno,
- numblocks,
- callback,
- callback_state,
- scan);
- }
- /*
- * table_index_validate_scan - second table scan for concurrent index build
- *
- * See validate_index() for an explanation.
- */
- static inline void
- table_index_validate_scan(Relation table_rel,
- Relation index_rel,
- struct IndexInfo *index_info,
- Snapshot snapshot,
- struct ValidateIndexState *state)
- {
- table_rel->rd_tableam->index_validate_scan(table_rel,
- index_rel,
- index_info,
- snapshot,
- state);
- }
- /* ----------------------------------------------------------------------------
- * Miscellaneous functionality
- * ----------------------------------------------------------------------------
- */
- /*
- * Return the current size of `rel` in bytes. If `forkNumber` is
- * InvalidForkNumber, return the relation's overall size, otherwise the size
- * for the indicated fork.
- *
- * Note that the overall size might not be the equivalent of the sum of sizes
- * for the individual forks for some AMs, e.g. because the AMs storage does
- * not neatly map onto the builtin types of forks.
- */
- static inline uint64
- table_relation_size(Relation rel, ForkNumber forkNumber)
- {
- return rel->rd_tableam->relation_size(rel, forkNumber);
- }
- /*
- * table_relation_needs_toast_table - does this relation need a toast table?
- */
- static inline bool
- table_relation_needs_toast_table(Relation rel)
- {
- return rel->rd_tableam->relation_needs_toast_table(rel);
- }
- /*
- * Return the OID of the AM that should be used to implement the TOAST table
- * for this relation.
- */
- static inline Oid
- table_relation_toast_am(Relation rel)
- {
- return rel->rd_tableam->relation_toast_am(rel);
- }
- /*
- * Fetch all or part of a TOAST value from a TOAST table.
- *
- * If this AM is never used to implement a TOAST table, then this callback
- * is not needed. But, if toasted values are ever stored in a table of this
- * type, then you will need this callback.
- *
- * toastrel is the relation in which the toasted value is stored.
- *
- * valueid identifes which toast value is to be fetched. For the heap,
- * this corresponds to the values stored in the chunk_id column.
- *
- * attrsize is the total size of the toast value to be fetched.
- *
- * sliceoffset is the offset within the toast value of the first byte that
- * should be fetched.
- *
- * slicelength is the number of bytes from the toast value that should be
- * fetched.
- *
- * result is caller-allocated space into which the fetched bytes should be
- * stored.
- */
- static inline void
- table_relation_fetch_toast_slice(Relation toastrel, Oid valueid,
- int32 attrsize, int32 sliceoffset,
- int32 slicelength, struct varlena *result)
- {
- toastrel->rd_tableam->relation_fetch_toast_slice(toastrel, valueid,
- attrsize,
- sliceoffset, slicelength,
- result);
- }
- /* ----------------------------------------------------------------------------
- * Planner related functionality
- * ----------------------------------------------------------------------------
- */
- /*
- * Estimate the current size of the relation, as an AM specific workhorse for
- * estimate_rel_size(). Look there for an explanation of the parameters.
- */
- static inline void
- table_relation_estimate_size(Relation rel, int32 *attr_widths,
- BlockNumber *pages, double *tuples,
- double *allvisfrac)
- {
- rel->rd_tableam->relation_estimate_size(rel, attr_widths, pages, tuples,
- allvisfrac);
- }
- /* ----------------------------------------------------------------------------
- * Executor related functionality
- * ----------------------------------------------------------------------------
- */
- /*
- * Prepare to fetch / check / return tuples from `tbmres->blockno` as part of
- * a bitmap table scan. `scan` needs to have been started via
- * table_beginscan_bm(). Returns false if there are no tuples to be found on
- * the page, true otherwise.
- *
- * Note, this is an optionally implemented function, therefore should only be
- * used after verifying the presence (at plan time or such).
- */
- static inline bool
- table_scan_bitmap_next_block(TableScanDesc scan,
- struct TBMIterateResult *tbmres)
- {
- /*
- * We don't expect direct calls to table_scan_bitmap_next_block with valid
- * CheckXidAlive for catalog or regular tables. See detailed comments in
- * xact.c where these variables are declared.
- */
- if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan))
- elog(ERROR, "unexpected table_scan_bitmap_next_block call during logical decoding");
- return scan->rs_rd->rd_tableam->scan_bitmap_next_block(scan,
- tbmres);
- }
- /*
- * Fetch the next tuple of a bitmap table scan into `slot` and return true if
- * a visible tuple was found, false otherwise.
- * table_scan_bitmap_next_block() needs to previously have selected a
- * block (i.e. returned true), and no previous
- * table_scan_bitmap_next_tuple() for the same block may have
- * returned false.
- */
- static inline bool
- table_scan_bitmap_next_tuple(TableScanDesc scan,
- struct TBMIterateResult *tbmres,
- TupleTableSlot *slot)
- {
- /*
- * We don't expect direct calls to table_scan_bitmap_next_tuple with valid
- * CheckXidAlive for catalog or regular tables. See detailed comments in
- * xact.c where these variables are declared.
- */
- if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan))
- elog(ERROR, "unexpected table_scan_bitmap_next_tuple call during logical decoding");
- return scan->rs_rd->rd_tableam->scan_bitmap_next_tuple(scan,
- tbmres,
- slot);
- }
- /*
- * Prepare to fetch tuples from the next block in a sample scan. Returns false
- * if the sample scan is finished, true otherwise. `scan` needs to have been
- * started via table_beginscan_sampling().
- *
- * This will call the TsmRoutine's NextSampleBlock() callback if necessary
- * (i.e. NextSampleBlock is not NULL), or perform a sequential scan over the
- * underlying relation.
- */
- static inline bool
- table_scan_sample_next_block(TableScanDesc scan,
- struct SampleScanState *scanstate)
- {
- /*
- * We don't expect direct calls to table_scan_sample_next_block with valid
- * CheckXidAlive for catalog or regular tables. See detailed comments in
- * xact.c where these variables are declared.
- */
- if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan))
- elog(ERROR, "unexpected table_scan_sample_next_block call during logical decoding");
- return scan->rs_rd->rd_tableam->scan_sample_next_block(scan, scanstate);
- }
- /*
- * Fetch the next sample tuple into `slot` and return true if a visible tuple
- * was found, false otherwise. table_scan_sample_next_block() needs to
- * previously have selected a block (i.e. returned true), and no previous
- * table_scan_sample_next_tuple() for the same block may have returned false.
- *
- * This will call the TsmRoutine's NextSampleTuple() callback.
- */
- static inline bool
- table_scan_sample_next_tuple(TableScanDesc scan,
- struct SampleScanState *scanstate,
- TupleTableSlot *slot)
- {
- /*
- * We don't expect direct calls to table_scan_sample_next_tuple with valid
- * CheckXidAlive for catalog or regular tables. See detailed comments in
- * xact.c where these variables are declared.
- */
- if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan))
- elog(ERROR, "unexpected table_scan_sample_next_tuple call during logical decoding");
- return scan->rs_rd->rd_tableam->scan_sample_next_tuple(scan, scanstate,
- slot);
- }
- /* ----------------------------------------------------------------------------
- * Functions to make modifications a bit simpler.
- * ----------------------------------------------------------------------------
- */
- extern void simple_table_tuple_insert(Relation rel, TupleTableSlot *slot);
- extern void simple_table_tuple_delete(Relation rel, ItemPointer tid,
- Snapshot snapshot);
- extern void simple_table_tuple_update(Relation rel, ItemPointer otid,
- TupleTableSlot *slot, Snapshot snapshot,
- bool *update_indexes);
- /* ----------------------------------------------------------------------------
- * Helper functions to implement parallel scans for block oriented AMs.
- * ----------------------------------------------------------------------------
- */
- extern Size table_block_parallelscan_estimate(Relation rel);
- extern Size table_block_parallelscan_initialize(Relation rel,
- ParallelTableScanDesc pscan);
- extern void table_block_parallelscan_reinitialize(Relation rel,
- ParallelTableScanDesc pscan);
- extern BlockNumber table_block_parallelscan_nextpage(Relation rel,
- ParallelBlockTableScanWorker pbscanwork,
- ParallelBlockTableScanDesc pbscan);
- extern void table_block_parallelscan_startblock_init(Relation rel,
- ParallelBlockTableScanWorker pbscanwork,
- ParallelBlockTableScanDesc pbscan);
- /* ----------------------------------------------------------------------------
- * Helper functions to implement relation sizing for block oriented AMs.
- * ----------------------------------------------------------------------------
- */
- extern uint64 table_block_relation_size(Relation rel, ForkNumber forkNumber);
- extern void table_block_relation_estimate_size(Relation rel,
- int32 *attr_widths,
- BlockNumber *pages,
- double *tuples,
- double *allvisfrac,
- Size overhead_bytes_per_tuple,
- Size usable_bytes_per_page);
- /* ----------------------------------------------------------------------------
- * Functions in tableamapi.c
- * ----------------------------------------------------------------------------
- */
- extern const TableAmRoutine *GetTableAmRoutine(Oid amhandler);
- extern const TableAmRoutine *GetHeapamTableAmRoutine(void);
- extern bool check_default_table_access_method(char **newval, void **extra,
- GucSource source);
- #endif /* TABLEAM_H */
|