nbtree.h 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288
  1. /*-------------------------------------------------------------------------
  2. *
  3. * nbtree.h
  4. * header file for postgres btree access method implementation.
  5. *
  6. *
  7. * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
  8. * Portions Copyright (c) 1994, Regents of the University of California
  9. *
  10. * src/include/access/nbtree.h
  11. *
  12. *-------------------------------------------------------------------------
  13. */
  14. #ifndef NBTREE_H
  15. #define NBTREE_H
  16. #include "access/amapi.h"
  17. #include "access/itup.h"
  18. #include "access/sdir.h"
  19. #include "access/tableam.h"
  20. #include "access/xlogreader.h"
  21. #include "catalog/pg_am_d.h"
  22. #include "catalog/pg_index.h"
  23. #include "lib/stringinfo.h"
  24. #include "storage/bufmgr.h"
  25. #include "storage/shm_toc.h"
  26. /* There's room for a 16-bit vacuum cycle ID in BTPageOpaqueData */
  27. typedef uint16 BTCycleId;
  28. /*
  29. * BTPageOpaqueData -- At the end of every page, we store a pointer
  30. * to both siblings in the tree. This is used to do forward/backward
  31. * index scans. The next-page link is also critical for recovery when
  32. * a search has navigated to the wrong page due to concurrent page splits
  33. * or deletions; see src/backend/access/nbtree/README for more info.
  34. *
  35. * In addition, we store the page's btree level (counting upwards from
  36. * zero at a leaf page) as well as some flag bits indicating the page type
  37. * and status. If the page is deleted, a BTDeletedPageData struct is stored
  38. * in the page's tuple area, while a standard BTPageOpaqueData struct is
  39. * stored in the page special area.
  40. *
  41. * We also store a "vacuum cycle ID". When a page is split while VACUUM is
  42. * processing the index, a nonzero value associated with the VACUUM run is
  43. * stored into both halves of the split page. (If VACUUM is not running,
  44. * both pages receive zero cycleids.) This allows VACUUM to detect whether
  45. * a page was split since it started, with a small probability of false match
  46. * if the page was last split some exact multiple of MAX_BT_CYCLE_ID VACUUMs
  47. * ago. Also, during a split, the BTP_SPLIT_END flag is cleared in the left
  48. * (original) page, and set in the right page, but only if the next page
  49. * to its right has a different cycleid.
  50. *
  51. * NOTE: the BTP_LEAF flag bit is redundant since level==0 could be tested
  52. * instead.
  53. *
  54. * NOTE: the btpo_level field used to be a union type in order to allow
  55. * deleted pages to store a 32-bit safexid in the same field. We now store
  56. * 64-bit/full safexid values using BTDeletedPageData instead.
  57. */
  58. typedef struct BTPageOpaqueData
  59. {
  60. BlockNumber btpo_prev; /* left sibling, or P_NONE if leftmost */
  61. BlockNumber btpo_next; /* right sibling, or P_NONE if rightmost */
  62. uint32 btpo_level; /* tree level --- zero for leaf pages */
  63. uint16 btpo_flags; /* flag bits, see below */
  64. BTCycleId btpo_cycleid; /* vacuum cycle ID of latest split */
  65. } BTPageOpaqueData;
  66. typedef BTPageOpaqueData *BTPageOpaque;
  67. #define BTPageGetOpaque(page) ((BTPageOpaque) PageGetSpecialPointer(page))
  68. /* Bits defined in btpo_flags */
  69. #define BTP_LEAF (1 << 0) /* leaf page, i.e. not internal page */
  70. #define BTP_ROOT (1 << 1) /* root page (has no parent) */
  71. #define BTP_DELETED (1 << 2) /* page has been deleted from tree */
  72. #define BTP_META (1 << 3) /* meta-page */
  73. #define BTP_HALF_DEAD (1 << 4) /* empty, but still in tree */
  74. #define BTP_SPLIT_END (1 << 5) /* rightmost page of split group */
  75. #define BTP_HAS_GARBAGE (1 << 6) /* page has LP_DEAD tuples (deprecated) */
  76. #define BTP_INCOMPLETE_SPLIT (1 << 7) /* right sibling's downlink is missing */
  77. #define BTP_HAS_FULLXID (1 << 8) /* contains BTDeletedPageData */
  78. /*
  79. * The max allowed value of a cycle ID is a bit less than 64K. This is
  80. * for convenience of pg_filedump and similar utilities: we want to use
  81. * the last 2 bytes of special space as an index type indicator, and
  82. * restricting cycle ID lets btree use that space for vacuum cycle IDs
  83. * while still allowing index type to be identified.
  84. */
  85. #define MAX_BT_CYCLE_ID 0xFF7F
  86. /*
  87. * The Meta page is always the first page in the btree index.
  88. * Its primary purpose is to point to the location of the btree root page.
  89. * We also point to the "fast" root, which is the current effective root;
  90. * see README for discussion.
  91. */
  92. typedef struct BTMetaPageData
  93. {
  94. uint32 btm_magic; /* should contain BTREE_MAGIC */
  95. uint32 btm_version; /* nbtree version (always <= BTREE_VERSION) */
  96. BlockNumber btm_root; /* current root location */
  97. uint32 btm_level; /* tree level of the root page */
  98. BlockNumber btm_fastroot; /* current "fast" root location */
  99. uint32 btm_fastlevel; /* tree level of the "fast" root page */
  100. /* remaining fields only valid when btm_version >= BTREE_NOVAC_VERSION */
  101. /* number of deleted, non-recyclable pages during last cleanup */
  102. uint32 btm_last_cleanup_num_delpages;
  103. /* number of heap tuples during last cleanup (deprecated) */
  104. float8 btm_last_cleanup_num_heap_tuples;
  105. bool btm_allequalimage; /* are all columns "equalimage"? */
  106. } BTMetaPageData;
  107. #define BTPageGetMeta(p) \
  108. ((BTMetaPageData *) PageGetContents(p))
  109. /*
  110. * The current Btree version is 4. That's what you'll get when you create
  111. * a new index.
  112. *
  113. * Btree version 3 was used in PostgreSQL v11. It is mostly the same as
  114. * version 4, but heap TIDs were not part of the keyspace. Index tuples
  115. * with duplicate keys could be stored in any order. We continue to
  116. * support reading and writing Btree versions 2 and 3, so that they don't
  117. * need to be immediately re-indexed at pg_upgrade. In order to get the
  118. * new heapkeyspace semantics, however, a REINDEX is needed.
  119. *
  120. * Deduplication is safe to use when the btm_allequalimage field is set to
  121. * true. It's safe to read the btm_allequalimage field on version 3, but
  122. * only version 4 indexes make use of deduplication. Even version 4
  123. * indexes created on PostgreSQL v12 will need a REINDEX to make use of
  124. * deduplication, though, since there is no other way to set
  125. * btm_allequalimage to true (pg_upgrade hasn't been taught to set the
  126. * metapage field).
  127. *
  128. * Btree version 2 is mostly the same as version 3. There are two new
  129. * fields in the metapage that were introduced in version 3. A version 2
  130. * metapage will be automatically upgraded to version 3 on the first
  131. * insert to it. INCLUDE indexes cannot use version 2.
  132. */
  133. #define BTREE_METAPAGE 0 /* first page is meta */
  134. #define BTREE_MAGIC 0x053162 /* magic number in metapage */
  135. #define BTREE_VERSION 4 /* current version number */
  136. #define BTREE_MIN_VERSION 2 /* minimum supported version */
  137. #define BTREE_NOVAC_VERSION 3 /* version with all meta fields set */
  138. /*
  139. * Maximum size of a btree index entry, including its tuple header.
  140. *
  141. * We actually need to be able to fit three items on every page,
  142. * so restrict any one item to 1/3 the per-page available space.
  143. *
  144. * There are rare cases where _bt_truncate() will need to enlarge
  145. * a heap index tuple to make space for a tiebreaker heap TID
  146. * attribute, which we account for here.
  147. */
  148. #define BTMaxItemSize(page) \
  149. MAXALIGN_DOWN((PageGetPageSize(page) - \
  150. MAXALIGN(SizeOfPageHeaderData + \
  151. 3*sizeof(ItemIdData) + \
  152. 3*sizeof(ItemPointerData)) - \
  153. MAXALIGN(sizeof(BTPageOpaqueData))) / 3)
  154. #define BTMaxItemSizeNoHeapTid(page) \
  155. MAXALIGN_DOWN((PageGetPageSize(page) - \
  156. MAXALIGN(SizeOfPageHeaderData + 3*sizeof(ItemIdData)) - \
  157. MAXALIGN(sizeof(BTPageOpaqueData))) / 3)
  158. /*
  159. * MaxTIDsPerBTreePage is an upper bound on the number of heap TIDs tuples
  160. * that may be stored on a btree leaf page. It is used to size the
  161. * per-page temporary buffers.
  162. *
  163. * Note: we don't bother considering per-tuple overheads here to keep
  164. * things simple (value is based on how many elements a single array of
  165. * heap TIDs must have to fill the space between the page header and
  166. * special area). The value is slightly higher (i.e. more conservative)
  167. * than necessary as a result, which is considered acceptable.
  168. */
  169. #define MaxTIDsPerBTreePage \
  170. (int) ((BLCKSZ - SizeOfPageHeaderData - sizeof(BTPageOpaqueData)) / \
  171. sizeof(ItemPointerData))
  172. /*
  173. * The leaf-page fillfactor defaults to 90% but is user-adjustable.
  174. * For pages above the leaf level, we use a fixed 70% fillfactor.
  175. * The fillfactor is applied during index build and when splitting
  176. * a rightmost page; when splitting non-rightmost pages we try to
  177. * divide the data equally. When splitting a page that's entirely
  178. * filled with a single value (duplicates), the effective leaf-page
  179. * fillfactor is 96%, regardless of whether the page is a rightmost
  180. * page.
  181. */
  182. #define BTREE_MIN_FILLFACTOR 10
  183. #define BTREE_DEFAULT_FILLFACTOR 90
  184. #define BTREE_NONLEAF_FILLFACTOR 70
  185. #define BTREE_SINGLEVAL_FILLFACTOR 96
  186. /*
  187. * In general, the btree code tries to localize its knowledge about
  188. * page layout to a couple of routines. However, we need a special
  189. * value to indicate "no page number" in those places where we expect
  190. * page numbers. We can use zero for this because we never need to
  191. * make a pointer to the metadata page.
  192. */
  193. #define P_NONE 0
  194. /*
  195. * Macros to test whether a page is leftmost or rightmost on its tree level,
  196. * as well as other state info kept in the opaque data.
  197. */
  198. #define P_LEFTMOST(opaque) ((opaque)->btpo_prev == P_NONE)
  199. #define P_RIGHTMOST(opaque) ((opaque)->btpo_next == P_NONE)
  200. #define P_ISLEAF(opaque) (((opaque)->btpo_flags & BTP_LEAF) != 0)
  201. #define P_ISROOT(opaque) (((opaque)->btpo_flags & BTP_ROOT) != 0)
  202. #define P_ISDELETED(opaque) (((opaque)->btpo_flags & BTP_DELETED) != 0)
  203. #define P_ISMETA(opaque) (((opaque)->btpo_flags & BTP_META) != 0)
  204. #define P_ISHALFDEAD(opaque) (((opaque)->btpo_flags & BTP_HALF_DEAD) != 0)
  205. #define P_IGNORE(opaque) (((opaque)->btpo_flags & (BTP_DELETED|BTP_HALF_DEAD)) != 0)
  206. #define P_HAS_GARBAGE(opaque) (((opaque)->btpo_flags & BTP_HAS_GARBAGE) != 0)
  207. #define P_INCOMPLETE_SPLIT(opaque) (((opaque)->btpo_flags & BTP_INCOMPLETE_SPLIT) != 0)
  208. #define P_HAS_FULLXID(opaque) (((opaque)->btpo_flags & BTP_HAS_FULLXID) != 0)
  209. /*
  210. * BTDeletedPageData is the page contents of a deleted page
  211. */
  212. typedef struct BTDeletedPageData
  213. {
  214. FullTransactionId safexid; /* See BTPageIsRecyclable() */
  215. } BTDeletedPageData;
  216. static inline void
  217. BTPageSetDeleted(Page page, FullTransactionId safexid)
  218. {
  219. BTPageOpaque opaque;
  220. PageHeader header;
  221. BTDeletedPageData *contents;
  222. opaque = BTPageGetOpaque(page);
  223. header = ((PageHeader) page);
  224. opaque->btpo_flags &= ~BTP_HALF_DEAD;
  225. opaque->btpo_flags |= BTP_DELETED | BTP_HAS_FULLXID;
  226. header->pd_lower = MAXALIGN(SizeOfPageHeaderData) +
  227. sizeof(BTDeletedPageData);
  228. header->pd_upper = header->pd_special;
  229. /* Set safexid in deleted page */
  230. contents = ((BTDeletedPageData *) PageGetContents(page));
  231. contents->safexid = safexid;
  232. }
  233. static inline FullTransactionId
  234. BTPageGetDeleteXid(Page page)
  235. {
  236. BTPageOpaque opaque;
  237. BTDeletedPageData *contents;
  238. /* We only expect to be called with a deleted page */
  239. Assert(!PageIsNew(page));
  240. opaque = BTPageGetOpaque(page);
  241. Assert(P_ISDELETED(opaque));
  242. /* pg_upgrade'd deleted page -- must be safe to delete now */
  243. if (!P_HAS_FULLXID(opaque))
  244. return FirstNormalFullTransactionId;
  245. /* Get safexid from deleted page */
  246. contents = ((BTDeletedPageData *) PageGetContents(page));
  247. return contents->safexid;
  248. }
  249. /*
  250. * Is an existing page recyclable?
  251. *
  252. * This exists to centralize the policy on which deleted pages are now safe to
  253. * re-use. However, _bt_pendingfsm_finalize() duplicates some of the same
  254. * logic because it doesn't work directly with pages -- keep the two in sync.
  255. *
  256. * Note: PageIsNew() pages are always safe to recycle, but we can't deal with
  257. * them here (caller is responsible for that case themselves). Caller might
  258. * well need special handling for new pages anyway.
  259. */
  260. static inline bool
  261. BTPageIsRecyclable(Page page)
  262. {
  263. BTPageOpaque opaque;
  264. Assert(!PageIsNew(page));
  265. /* Recycling okay iff page is deleted and safexid is old enough */
  266. opaque = BTPageGetOpaque(page);
  267. if (P_ISDELETED(opaque))
  268. {
  269. /*
  270. * The page was deleted, but when? If it was just deleted, a scan
  271. * might have seen the downlink to it, and will read the page later.
  272. * As long as that can happen, we must keep the deleted page around as
  273. * a tombstone.
  274. *
  275. * For that check if the deletion XID could still be visible to
  276. * anyone. If not, then no scan that's still in progress could have
  277. * seen its downlink, and we can recycle it.
  278. *
  279. * XXX: If we had the heap relation we could be more aggressive about
  280. * recycling deleted pages in non-catalog relations. For now we just
  281. * pass NULL. That is at least simple and consistent.
  282. */
  283. return GlobalVisCheckRemovableFullXid(NULL, BTPageGetDeleteXid(page));
  284. }
  285. return false;
  286. }
  287. /*
  288. * BTVacState and BTPendingFSM are private nbtree.c state used during VACUUM.
  289. * They are exported for use by page deletion related code in nbtpage.c.
  290. */
  291. typedef struct BTPendingFSM
  292. {
  293. BlockNumber target; /* Page deleted by current VACUUM */
  294. FullTransactionId safexid; /* Page's BTDeletedPageData.safexid */
  295. } BTPendingFSM;
  296. typedef struct BTVacState
  297. {
  298. IndexVacuumInfo *info;
  299. IndexBulkDeleteResult *stats;
  300. IndexBulkDeleteCallback callback;
  301. void *callback_state;
  302. BTCycleId cycleid;
  303. MemoryContext pagedelcontext;
  304. /*
  305. * _bt_pendingfsm_finalize() state
  306. */
  307. int bufsize; /* pendingpages space (in # elements) */
  308. int maxbufsize; /* max bufsize that respects work_mem */
  309. BTPendingFSM *pendingpages; /* One entry per newly deleted page */
  310. int npendingpages; /* current # valid pendingpages */
  311. } BTVacState;
  312. /*
  313. * Lehman and Yao's algorithm requires a ``high key'' on every non-rightmost
  314. * page. The high key is not a tuple that is used to visit the heap. It is
  315. * a pivot tuple (see "Notes on B-Tree tuple format" below for definition).
  316. * The high key on a page is required to be greater than or equal to any
  317. * other key that appears on the page. If we find ourselves trying to
  318. * insert a key that is strictly > high key, we know we need to move right
  319. * (this should only happen if the page was split since we examined the
  320. * parent page).
  321. *
  322. * Our insertion algorithm guarantees that we can use the initial least key
  323. * on our right sibling as the high key. Once a page is created, its high
  324. * key changes only if the page is split.
  325. *
  326. * On a non-rightmost page, the high key lives in item 1 and data items
  327. * start in item 2. Rightmost pages have no high key, so we store data
  328. * items beginning in item 1.
  329. */
  330. #define P_HIKEY ((OffsetNumber) 1)
  331. #define P_FIRSTKEY ((OffsetNumber) 2)
  332. #define P_FIRSTDATAKEY(opaque) (P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY)
  333. /*
  334. * Notes on B-Tree tuple format, and key and non-key attributes:
  335. *
  336. * INCLUDE B-Tree indexes have non-key attributes. These are extra
  337. * attributes that may be returned by index-only scans, but do not influence
  338. * the order of items in the index (formally, non-key attributes are not
  339. * considered to be part of the key space). Non-key attributes are only
  340. * present in leaf index tuples whose item pointers actually point to heap
  341. * tuples (non-pivot tuples). _bt_check_natts() enforces the rules
  342. * described here.
  343. *
  344. * Non-pivot tuple format (plain/non-posting variant):
  345. *
  346. * t_tid | t_info | key values | INCLUDE columns, if any
  347. *
  348. * t_tid points to the heap TID, which is a tiebreaker key column as of
  349. * BTREE_VERSION 4.
  350. *
  351. * Non-pivot tuples complement pivot tuples, which only have key columns.
  352. * The sole purpose of pivot tuples is to represent how the key space is
  353. * separated. In general, any B-Tree index that has more than one level
  354. * (i.e. any index that does not just consist of a metapage and a single
  355. * leaf root page) must have some number of pivot tuples, since pivot
  356. * tuples are used for traversing the tree. Suffix truncation can omit
  357. * trailing key columns when a new pivot is formed, which makes minus
  358. * infinity their logical value. Since BTREE_VERSION 4 indexes treat heap
  359. * TID as a trailing key column that ensures that all index tuples are
  360. * physically unique, it is necessary to represent heap TID as a trailing
  361. * key column in pivot tuples, though very often this can be truncated
  362. * away, just like any other key column. (Actually, the heap TID is
  363. * omitted rather than truncated, since its representation is different to
  364. * the non-pivot representation.)
  365. *
  366. * Pivot tuple format:
  367. *
  368. * t_tid | t_info | key values | [heap TID]
  369. *
  370. * We store the number of columns present inside pivot tuples by abusing
  371. * their t_tid offset field, since pivot tuples never need to store a real
  372. * offset (pivot tuples generally store a downlink in t_tid, though). The
  373. * offset field only stores the number of columns/attributes when the
  374. * INDEX_ALT_TID_MASK bit is set, which doesn't count the trailing heap
  375. * TID column sometimes stored in pivot tuples -- that's represented by
  376. * the presence of BT_PIVOT_HEAP_TID_ATTR. The INDEX_ALT_TID_MASK bit in
  377. * t_info is always set on BTREE_VERSION 4 pivot tuples, since
  378. * BTreeTupleIsPivot() must work reliably on heapkeyspace versions.
  379. *
  380. * In version 2 or version 3 (!heapkeyspace) indexes, INDEX_ALT_TID_MASK
  381. * might not be set in pivot tuples. BTreeTupleIsPivot() won't work
  382. * reliably as a result. The number of columns stored is implicitly the
  383. * same as the number of columns in the index, just like any non-pivot
  384. * tuple. (The number of columns stored should not vary, since suffix
  385. * truncation of key columns is unsafe within any !heapkeyspace index.)
  386. *
  387. * The 12 least significant bits from t_tid's offset number are used to
  388. * represent the number of key columns within a pivot tuple. This leaves 4
  389. * status bits (BT_STATUS_OFFSET_MASK bits), which are shared by all tuples
  390. * that have the INDEX_ALT_TID_MASK bit set (set in t_info) to store basic
  391. * tuple metadata. BTreeTupleIsPivot() and BTreeTupleIsPosting() use the
  392. * BT_STATUS_OFFSET_MASK bits.
  393. *
  394. * Sometimes non-pivot tuples also use a representation that repurposes
  395. * t_tid to store metadata rather than a TID. PostgreSQL v13 introduced a
  396. * new non-pivot tuple format to support deduplication: posting list
  397. * tuples. Deduplication merges together multiple equal non-pivot tuples
  398. * into a logically equivalent, space efficient representation. A posting
  399. * list is an array of ItemPointerData elements. Non-pivot tuples are
  400. * merged together to form posting list tuples lazily, at the point where
  401. * we'd otherwise have to split a leaf page.
  402. *
  403. * Posting tuple format (alternative non-pivot tuple representation):
  404. *
  405. * t_tid | t_info | key values | posting list (TID array)
  406. *
  407. * Posting list tuples are recognized as such by having the
  408. * INDEX_ALT_TID_MASK status bit set in t_info and the BT_IS_POSTING status
  409. * bit set in t_tid's offset number. These flags redefine the content of
  410. * the posting tuple's t_tid to store the location of the posting list
  411. * (instead of a block number), as well as the total number of heap TIDs
  412. * present in the tuple (instead of a real offset number).
  413. *
  414. * The 12 least significant bits from t_tid's offset number are used to
  415. * represent the number of heap TIDs present in the tuple, leaving 4 status
  416. * bits (the BT_STATUS_OFFSET_MASK bits). Like any non-pivot tuple, the
  417. * number of columns stored is always implicitly the total number in the
  418. * index (in practice there can never be non-key columns stored, since
  419. * deduplication is not supported with INCLUDE indexes).
  420. */
  421. #define INDEX_ALT_TID_MASK INDEX_AM_RESERVED_BIT
  422. /* Item pointer offset bit masks */
  423. #define BT_OFFSET_MASK 0x0FFF
  424. #define BT_STATUS_OFFSET_MASK 0xF000
  425. /* BT_STATUS_OFFSET_MASK status bits */
  426. #define BT_PIVOT_HEAP_TID_ATTR 0x1000
  427. #define BT_IS_POSTING 0x2000
  428. /*
  429. * Note: BTreeTupleIsPivot() can have false negatives (but not false
  430. * positives) when used with !heapkeyspace indexes
  431. */
  432. static inline bool
  433. BTreeTupleIsPivot(IndexTuple itup)
  434. {
  435. if ((itup->t_info & INDEX_ALT_TID_MASK) == 0)
  436. return false;
  437. /* absence of BT_IS_POSTING in offset number indicates pivot tuple */
  438. if ((ItemPointerGetOffsetNumberNoCheck(&itup->t_tid) & BT_IS_POSTING) != 0)
  439. return false;
  440. return true;
  441. }
  442. static inline bool
  443. BTreeTupleIsPosting(IndexTuple itup)
  444. {
  445. if ((itup->t_info & INDEX_ALT_TID_MASK) == 0)
  446. return false;
  447. /* presence of BT_IS_POSTING in offset number indicates posting tuple */
  448. if ((ItemPointerGetOffsetNumberNoCheck(&itup->t_tid) & BT_IS_POSTING) == 0)
  449. return false;
  450. return true;
  451. }
  452. static inline void
  453. BTreeTupleSetPosting(IndexTuple itup, uint16 nhtids, int postingoffset)
  454. {
  455. Assert(nhtids > 1);
  456. Assert((nhtids & BT_STATUS_OFFSET_MASK) == 0);
  457. Assert((size_t) postingoffset == MAXALIGN(postingoffset));
  458. Assert(postingoffset < INDEX_SIZE_MASK);
  459. Assert(!BTreeTupleIsPivot(itup));
  460. itup->t_info |= INDEX_ALT_TID_MASK;
  461. ItemPointerSetOffsetNumber(&itup->t_tid, (nhtids | BT_IS_POSTING));
  462. ItemPointerSetBlockNumber(&itup->t_tid, postingoffset);
  463. }
  464. static inline uint16
  465. BTreeTupleGetNPosting(IndexTuple posting)
  466. {
  467. OffsetNumber existing;
  468. Assert(BTreeTupleIsPosting(posting));
  469. existing = ItemPointerGetOffsetNumberNoCheck(&posting->t_tid);
  470. return (existing & BT_OFFSET_MASK);
  471. }
  472. static inline uint32
  473. BTreeTupleGetPostingOffset(IndexTuple posting)
  474. {
  475. Assert(BTreeTupleIsPosting(posting));
  476. return ItemPointerGetBlockNumberNoCheck(&posting->t_tid);
  477. }
  478. static inline ItemPointer
  479. BTreeTupleGetPosting(IndexTuple posting)
  480. {
  481. return (ItemPointer) ((char *) posting +
  482. BTreeTupleGetPostingOffset(posting));
  483. }
  484. static inline ItemPointer
  485. BTreeTupleGetPostingN(IndexTuple posting, int n)
  486. {
  487. return BTreeTupleGetPosting(posting) + n;
  488. }
  489. /*
  490. * Get/set downlink block number in pivot tuple.
  491. *
  492. * Note: Cannot assert that tuple is a pivot tuple. If we did so then
  493. * !heapkeyspace indexes would exhibit false positive assertion failures.
  494. */
  495. static inline BlockNumber
  496. BTreeTupleGetDownLink(IndexTuple pivot)
  497. {
  498. return ItemPointerGetBlockNumberNoCheck(&pivot->t_tid);
  499. }
  500. static inline void
  501. BTreeTupleSetDownLink(IndexTuple pivot, BlockNumber blkno)
  502. {
  503. ItemPointerSetBlockNumber(&pivot->t_tid, blkno);
  504. }
  505. /*
  506. * Get number of attributes within tuple.
  507. *
  508. * Note that this does not include an implicit tiebreaker heap TID
  509. * attribute, if any. Note also that the number of key attributes must be
  510. * explicitly represented in all heapkeyspace pivot tuples.
  511. *
  512. * Note: This is defined as a macro rather than an inline function to
  513. * avoid including rel.h.
  514. */
  515. #define BTreeTupleGetNAtts(itup, rel) \
  516. ( \
  517. (BTreeTupleIsPivot(itup)) ? \
  518. ( \
  519. ItemPointerGetOffsetNumberNoCheck(&(itup)->t_tid) & BT_OFFSET_MASK \
  520. ) \
  521. : \
  522. IndexRelationGetNumberOfAttributes(rel) \
  523. )
  524. /*
  525. * Set number of key attributes in tuple.
  526. *
  527. * The heap TID tiebreaker attribute bit may also be set here, indicating that
  528. * a heap TID value will be stored at the end of the tuple (i.e. using the
  529. * special pivot tuple representation).
  530. */
  531. static inline void
  532. BTreeTupleSetNAtts(IndexTuple itup, uint16 nkeyatts, bool heaptid)
  533. {
  534. Assert(nkeyatts <= INDEX_MAX_KEYS);
  535. Assert((nkeyatts & BT_STATUS_OFFSET_MASK) == 0);
  536. Assert(!heaptid || nkeyatts > 0);
  537. Assert(!BTreeTupleIsPivot(itup) || nkeyatts == 0);
  538. itup->t_info |= INDEX_ALT_TID_MASK;
  539. if (heaptid)
  540. nkeyatts |= BT_PIVOT_HEAP_TID_ATTR;
  541. /* BT_IS_POSTING bit is deliberately unset here */
  542. ItemPointerSetOffsetNumber(&itup->t_tid, nkeyatts);
  543. Assert(BTreeTupleIsPivot(itup));
  544. }
  545. /*
  546. * Get/set leaf page's "top parent" link from its high key. Used during page
  547. * deletion.
  548. *
  549. * Note: Cannot assert that tuple is a pivot tuple. If we did so then
  550. * !heapkeyspace indexes would exhibit false positive assertion failures.
  551. */
  552. static inline BlockNumber
  553. BTreeTupleGetTopParent(IndexTuple leafhikey)
  554. {
  555. return ItemPointerGetBlockNumberNoCheck(&leafhikey->t_tid);
  556. }
  557. static inline void
  558. BTreeTupleSetTopParent(IndexTuple leafhikey, BlockNumber blkno)
  559. {
  560. ItemPointerSetBlockNumber(&leafhikey->t_tid, blkno);
  561. BTreeTupleSetNAtts(leafhikey, 0, false);
  562. }
  563. /*
  564. * Get tiebreaker heap TID attribute, if any.
  565. *
  566. * This returns the first/lowest heap TID in the case of a posting list tuple.
  567. */
  568. static inline ItemPointer
  569. BTreeTupleGetHeapTID(IndexTuple itup)
  570. {
  571. if (BTreeTupleIsPivot(itup))
  572. {
  573. /* Pivot tuple heap TID representation? */
  574. if ((ItemPointerGetOffsetNumberNoCheck(&itup->t_tid) &
  575. BT_PIVOT_HEAP_TID_ATTR) != 0)
  576. return (ItemPointer) ((char *) itup + IndexTupleSize(itup) -
  577. sizeof(ItemPointerData));
  578. /* Heap TID attribute was truncated */
  579. return NULL;
  580. }
  581. else if (BTreeTupleIsPosting(itup))
  582. return BTreeTupleGetPosting(itup);
  583. return &itup->t_tid;
  584. }
  585. /*
  586. * Get maximum heap TID attribute, which could be the only TID in the case of
  587. * a non-pivot tuple that does not have a posting list tuple.
  588. *
  589. * Works with non-pivot tuples only.
  590. */
  591. static inline ItemPointer
  592. BTreeTupleGetMaxHeapTID(IndexTuple itup)
  593. {
  594. Assert(!BTreeTupleIsPivot(itup));
  595. if (BTreeTupleIsPosting(itup))
  596. {
  597. uint16 nposting = BTreeTupleGetNPosting(itup);
  598. return BTreeTupleGetPostingN(itup, nposting - 1);
  599. }
  600. return &itup->t_tid;
  601. }
  602. /*
  603. * Operator strategy numbers for B-tree have been moved to access/stratnum.h,
  604. * because many places need to use them in ScanKeyInit() calls.
  605. *
  606. * The strategy numbers are chosen so that we can commute them by
  607. * subtraction, thus:
  608. */
  609. #define BTCommuteStrategyNumber(strat) (BTMaxStrategyNumber + 1 - (strat))
  610. /*
  611. * When a new operator class is declared, we require that the user
  612. * supply us with an amproc procedure (BTORDER_PROC) for determining
  613. * whether, for two keys a and b, a < b, a = b, or a > b. This routine
  614. * must return < 0, 0, > 0, respectively, in these three cases.
  615. *
  616. * To facilitate accelerated sorting, an operator class may choose to
  617. * offer a second procedure (BTSORTSUPPORT_PROC). For full details, see
  618. * src/include/utils/sortsupport.h.
  619. *
  620. * To support window frames defined by "RANGE offset PRECEDING/FOLLOWING",
  621. * an operator class may choose to offer a third amproc procedure
  622. * (BTINRANGE_PROC), independently of whether it offers sortsupport.
  623. * For full details, see doc/src/sgml/btree.sgml.
  624. *
  625. * To facilitate B-Tree deduplication, an operator class may choose to
  626. * offer a forth amproc procedure (BTEQUALIMAGE_PROC). For full details,
  627. * see doc/src/sgml/btree.sgml.
  628. */
  629. #define BTORDER_PROC 1
  630. #define BTSORTSUPPORT_PROC 2
  631. #define BTINRANGE_PROC 3
  632. #define BTEQUALIMAGE_PROC 4
  633. #define BTOPTIONS_PROC 5
  634. #define BTNProcs 5
  635. /*
  636. * We need to be able to tell the difference between read and write
  637. * requests for pages, in order to do locking correctly.
  638. */
  639. #define BT_READ BUFFER_LOCK_SHARE
  640. #define BT_WRITE BUFFER_LOCK_EXCLUSIVE
  641. /*
  642. * BTStackData -- As we descend a tree, we push the location of pivot
  643. * tuples whose downlink we are about to follow onto a private stack. If
  644. * we split a leaf, we use this stack to walk back up the tree and insert
  645. * data into its parent page at the correct location. We also have to
  646. * recursively insert into the grandparent page if and when the parent page
  647. * splits. Our private stack can become stale due to concurrent page
  648. * splits and page deletions, but it should never give us an irredeemably
  649. * bad picture.
  650. */
  651. typedef struct BTStackData
  652. {
  653. BlockNumber bts_blkno;
  654. OffsetNumber bts_offset;
  655. struct BTStackData *bts_parent;
  656. } BTStackData;
  657. typedef BTStackData *BTStack;
  658. /*
  659. * BTScanInsertData is the btree-private state needed to find an initial
  660. * position for an indexscan, or to insert new tuples -- an "insertion
  661. * scankey" (not to be confused with a search scankey). It's used to descend
  662. * a B-Tree using _bt_search.
  663. *
  664. * heapkeyspace indicates if we expect all keys in the index to be physically
  665. * unique because heap TID is used as a tiebreaker attribute, and if index may
  666. * have truncated key attributes in pivot tuples. This is actually a property
  667. * of the index relation itself (not an indexscan). heapkeyspace indexes are
  668. * indexes whose version is >= version 4. It's convenient to keep this close
  669. * by, rather than accessing the metapage repeatedly.
  670. *
  671. * allequalimage is set to indicate that deduplication is safe for the index.
  672. * This is also a property of the index relation rather than an indexscan.
  673. *
  674. * anynullkeys indicates if any of the keys had NULL value when scankey was
  675. * built from index tuple (note that already-truncated tuple key attributes
  676. * set NULL as a placeholder key value, which also affects value of
  677. * anynullkeys). This is a convenience for unique index non-pivot tuple
  678. * insertion, which usually temporarily unsets scantid, but shouldn't iff
  679. * anynullkeys is true. Value generally matches non-pivot tuple's HasNulls
  680. * bit, but may not when inserting into an INCLUDE index (tuple header value
  681. * is affected by the NULL-ness of both key and non-key attributes).
  682. *
  683. * When nextkey is false (the usual case), _bt_search and _bt_binsrch will
  684. * locate the first item >= scankey. When nextkey is true, they will locate
  685. * the first item > scan key.
  686. *
  687. * pivotsearch is set to true by callers that want to re-find a leaf page
  688. * using a scankey built from a leaf page's high key. Most callers set this
  689. * to false.
  690. *
  691. * scantid is the heap TID that is used as a final tiebreaker attribute. It
  692. * is set to NULL when index scan doesn't need to find a position for a
  693. * specific physical tuple. Must be set when inserting new tuples into
  694. * heapkeyspace indexes, since every tuple in the tree unambiguously belongs
  695. * in one exact position (it's never set with !heapkeyspace indexes, though).
  696. * Despite the representational difference, nbtree search code considers
  697. * scantid to be just another insertion scankey attribute.
  698. *
  699. * scankeys is an array of scan key entries for attributes that are compared
  700. * before scantid (user-visible attributes). keysz is the size of the array.
  701. * During insertion, there must be a scan key for every attribute, but when
  702. * starting a regular index scan some can be omitted. The array is used as a
  703. * flexible array member, though it's sized in a way that makes it possible to
  704. * use stack allocations. See nbtree/README for full details.
  705. */
  706. typedef struct BTScanInsertData
  707. {
  708. bool heapkeyspace;
  709. bool allequalimage;
  710. bool anynullkeys;
  711. bool nextkey;
  712. bool pivotsearch;
  713. ItemPointer scantid; /* tiebreaker for scankeys */
  714. int keysz; /* Size of scankeys array */
  715. ScanKeyData scankeys[INDEX_MAX_KEYS]; /* Must appear last */
  716. } BTScanInsertData;
  717. typedef BTScanInsertData *BTScanInsert;
  718. /*
  719. * BTInsertStateData is a working area used during insertion.
  720. *
  721. * This is filled in after descending the tree to the first leaf page the new
  722. * tuple might belong on. Tracks the current position while performing
  723. * uniqueness check, before we have determined which exact page to insert
  724. * to.
  725. *
  726. * (This should be private to nbtinsert.c, but it's also used by
  727. * _bt_binsrch_insert)
  728. */
  729. typedef struct BTInsertStateData
  730. {
  731. IndexTuple itup; /* Item we're inserting */
  732. Size itemsz; /* Size of itup -- should be MAXALIGN()'d */
  733. BTScanInsert itup_key; /* Insertion scankey */
  734. /* Buffer containing leaf page we're likely to insert itup on */
  735. Buffer buf;
  736. /*
  737. * Cache of bounds within the current buffer. Only used for insertions
  738. * where _bt_check_unique is called. See _bt_binsrch_insert and
  739. * _bt_findinsertloc for details.
  740. */
  741. bool bounds_valid;
  742. OffsetNumber low;
  743. OffsetNumber stricthigh;
  744. /*
  745. * if _bt_binsrch_insert found the location inside existing posting list,
  746. * save the position inside the list. -1 sentinel value indicates overlap
  747. * with an existing posting list tuple that has its LP_DEAD bit set.
  748. */
  749. int postingoff;
  750. } BTInsertStateData;
  751. typedef BTInsertStateData *BTInsertState;
  752. /*
  753. * State used to representing an individual pending tuple during
  754. * deduplication.
  755. */
  756. typedef struct BTDedupInterval
  757. {
  758. OffsetNumber baseoff;
  759. uint16 nitems;
  760. } BTDedupInterval;
  761. /*
  762. * BTDedupStateData is a working area used during deduplication.
  763. *
  764. * The status info fields track the state of a whole-page deduplication pass.
  765. * State about the current pending posting list is also tracked.
  766. *
  767. * A pending posting list is comprised of a contiguous group of equal items
  768. * from the page, starting from page offset number 'baseoff'. This is the
  769. * offset number of the "base" tuple for new posting list. 'nitems' is the
  770. * current total number of existing items from the page that will be merged to
  771. * make a new posting list tuple, including the base tuple item. (Existing
  772. * items may themselves be posting list tuples, or regular non-pivot tuples.)
  773. *
  774. * The total size of the existing tuples to be freed when pending posting list
  775. * is processed gets tracked by 'phystupsize'. This information allows
  776. * deduplication to calculate the space saving for each new posting list
  777. * tuple, and for the entire pass over the page as a whole.
  778. */
  779. typedef struct BTDedupStateData
  780. {
  781. /* Deduplication status info for entire pass over page */
  782. bool deduplicate; /* Still deduplicating page? */
  783. int nmaxitems; /* Number of max-sized tuples so far */
  784. Size maxpostingsize; /* Limit on size of final tuple */
  785. /* Metadata about base tuple of current pending posting list */
  786. IndexTuple base; /* Use to form new posting list */
  787. OffsetNumber baseoff; /* page offset of base */
  788. Size basetupsize; /* base size without original posting list */
  789. /* Other metadata about pending posting list */
  790. ItemPointer htids; /* Heap TIDs in pending posting list */
  791. int nhtids; /* Number of heap TIDs in htids array */
  792. int nitems; /* Number of existing tuples/line pointers */
  793. Size phystupsize; /* Includes line pointer overhead */
  794. /*
  795. * Array of tuples to go on new version of the page. Contains one entry
  796. * for each group of consecutive items. Note that existing tuples that
  797. * will not become posting list tuples do not appear in the array (they
  798. * are implicitly unchanged by deduplication pass).
  799. */
  800. int nintervals; /* current number of intervals in array */
  801. BTDedupInterval intervals[MaxIndexTuplesPerPage];
  802. } BTDedupStateData;
  803. typedef BTDedupStateData *BTDedupState;
  804. /*
  805. * BTVacuumPostingData is state that represents how to VACUUM (or delete) a
  806. * posting list tuple when some (though not all) of its TIDs are to be
  807. * deleted.
  808. *
  809. * Convention is that itup field is the original posting list tuple on input,
  810. * and palloc()'d final tuple used to overwrite existing tuple on output.
  811. */
  812. typedef struct BTVacuumPostingData
  813. {
  814. /* Tuple that will be/was updated */
  815. IndexTuple itup;
  816. OffsetNumber updatedoffset;
  817. /* State needed to describe final itup in WAL */
  818. uint16 ndeletedtids;
  819. uint16 deletetids[FLEXIBLE_ARRAY_MEMBER];
  820. } BTVacuumPostingData;
  821. typedef BTVacuumPostingData *BTVacuumPosting;
  822. /*
  823. * BTScanOpaqueData is the btree-private state needed for an indexscan.
  824. * This consists of preprocessed scan keys (see _bt_preprocess_keys() for
  825. * details of the preprocessing), information about the current location
  826. * of the scan, and information about the marked location, if any. (We use
  827. * BTScanPosData to represent the data needed for each of current and marked
  828. * locations.) In addition we can remember some known-killed index entries
  829. * that must be marked before we can move off the current page.
  830. *
  831. * Index scans work a page at a time: we pin and read-lock the page, identify
  832. * all the matching items on the page and save them in BTScanPosData, then
  833. * release the read-lock while returning the items to the caller for
  834. * processing. This approach minimizes lock/unlock traffic. Note that we
  835. * keep the pin on the index page until the caller is done with all the items
  836. * (this is needed for VACUUM synchronization, see nbtree/README). When we
  837. * are ready to step to the next page, if the caller has told us any of the
  838. * items were killed, we re-lock the page to mark them killed, then unlock.
  839. * Finally we drop the pin and step to the next page in the appropriate
  840. * direction.
  841. *
  842. * If we are doing an index-only scan, we save the entire IndexTuple for each
  843. * matched item, otherwise only its heap TID and offset. The IndexTuples go
  844. * into a separate workspace array; each BTScanPosItem stores its tuple's
  845. * offset within that array. Posting list tuples store a "base" tuple once,
  846. * allowing the same key to be returned for each TID in the posting list
  847. * tuple.
  848. */
  849. typedef struct BTScanPosItem /* what we remember about each match */
  850. {
  851. ItemPointerData heapTid; /* TID of referenced heap item */
  852. OffsetNumber indexOffset; /* index item's location within page */
  853. LocationIndex tupleOffset; /* IndexTuple's offset in workspace, if any */
  854. } BTScanPosItem;
  855. typedef struct BTScanPosData
  856. {
  857. Buffer buf; /* if valid, the buffer is pinned */
  858. XLogRecPtr lsn; /* pos in the WAL stream when page was read */
  859. BlockNumber currPage; /* page referenced by items array */
  860. BlockNumber nextPage; /* page's right link when we scanned it */
  861. /*
  862. * moreLeft and moreRight track whether we think there may be matching
  863. * index entries to the left and right of the current page, respectively.
  864. * We can clear the appropriate one of these flags when _bt_checkkeys()
  865. * returns continuescan = false.
  866. */
  867. bool moreLeft;
  868. bool moreRight;
  869. /*
  870. * If we are doing an index-only scan, nextTupleOffset is the first free
  871. * location in the associated tuple storage workspace.
  872. */
  873. int nextTupleOffset;
  874. /*
  875. * The items array is always ordered in index order (ie, increasing
  876. * indexoffset). When scanning backwards it is convenient to fill the
  877. * array back-to-front, so we start at the last slot and fill downwards.
  878. * Hence we need both a first-valid-entry and a last-valid-entry counter.
  879. * itemIndex is a cursor showing which entry was last returned to caller.
  880. */
  881. int firstItem; /* first valid index in items[] */
  882. int lastItem; /* last valid index in items[] */
  883. int itemIndex; /* current index in items[] */
  884. BTScanPosItem items[MaxTIDsPerBTreePage]; /* MUST BE LAST */
  885. } BTScanPosData;
  886. typedef BTScanPosData *BTScanPos;
  887. #define BTScanPosIsPinned(scanpos) \
  888. ( \
  889. AssertMacro(BlockNumberIsValid((scanpos).currPage) || \
  890. !BufferIsValid((scanpos).buf)), \
  891. BufferIsValid((scanpos).buf) \
  892. )
  893. #define BTScanPosUnpin(scanpos) \
  894. do { \
  895. ReleaseBuffer((scanpos).buf); \
  896. (scanpos).buf = InvalidBuffer; \
  897. } while (0)
  898. #define BTScanPosUnpinIfPinned(scanpos) \
  899. do { \
  900. if (BTScanPosIsPinned(scanpos)) \
  901. BTScanPosUnpin(scanpos); \
  902. } while (0)
  903. #define BTScanPosIsValid(scanpos) \
  904. ( \
  905. AssertMacro(BlockNumberIsValid((scanpos).currPage) || \
  906. !BufferIsValid((scanpos).buf)), \
  907. BlockNumberIsValid((scanpos).currPage) \
  908. )
  909. #define BTScanPosInvalidate(scanpos) \
  910. do { \
  911. (scanpos).currPage = InvalidBlockNumber; \
  912. (scanpos).nextPage = InvalidBlockNumber; \
  913. (scanpos).buf = InvalidBuffer; \
  914. (scanpos).lsn = InvalidXLogRecPtr; \
  915. (scanpos).nextTupleOffset = 0; \
  916. } while (0)
  917. /* We need one of these for each equality-type SK_SEARCHARRAY scan key */
  918. typedef struct BTArrayKeyInfo
  919. {
  920. int scan_key; /* index of associated key in arrayKeyData */
  921. int cur_elem; /* index of current element in elem_values */
  922. int mark_elem; /* index of marked element in elem_values */
  923. int num_elems; /* number of elems in current array value */
  924. Datum *elem_values; /* array of num_elems Datums */
  925. } BTArrayKeyInfo;
  926. typedef struct BTScanOpaqueData
  927. {
  928. /* these fields are set by _bt_preprocess_keys(): */
  929. bool qual_ok; /* false if qual can never be satisfied */
  930. int numberOfKeys; /* number of preprocessed scan keys */
  931. ScanKey keyData; /* array of preprocessed scan keys */
  932. /* workspace for SK_SEARCHARRAY support */
  933. ScanKey arrayKeyData; /* modified copy of scan->keyData */
  934. int numArrayKeys; /* number of equality-type array keys (-1 if
  935. * there are any unsatisfiable array keys) */
  936. int arrayKeyCount; /* count indicating number of array scan keys
  937. * processed */
  938. BTArrayKeyInfo *arrayKeys; /* info about each equality-type array key */
  939. MemoryContext arrayContext; /* scan-lifespan context for array data */
  940. /* info about killed items if any (killedItems is NULL if never used) */
  941. int *killedItems; /* currPos.items indexes of killed items */
  942. int numKilled; /* number of currently stored items */
  943. /*
  944. * If we are doing an index-only scan, these are the tuple storage
  945. * workspaces for the currPos and markPos respectively. Each is of size
  946. * BLCKSZ, so it can hold as much as a full page's worth of tuples.
  947. */
  948. char *currTuples; /* tuple storage for currPos */
  949. char *markTuples; /* tuple storage for markPos */
  950. /*
  951. * If the marked position is on the same page as current position, we
  952. * don't use markPos, but just keep the marked itemIndex in markItemIndex
  953. * (all the rest of currPos is valid for the mark position). Hence, to
  954. * determine if there is a mark, first look at markItemIndex, then at
  955. * markPos.
  956. */
  957. int markItemIndex; /* itemIndex, or -1 if not valid */
  958. /* keep these last in struct for efficiency */
  959. BTScanPosData currPos; /* current position data */
  960. BTScanPosData markPos; /* marked position, if any */
  961. } BTScanOpaqueData;
  962. typedef BTScanOpaqueData *BTScanOpaque;
  963. /*
  964. * We use some private sk_flags bits in preprocessed scan keys. We're allowed
  965. * to use bits 16-31 (see skey.h). The uppermost bits are copied from the
  966. * index's indoption[] array entry for the index attribute.
  967. */
  968. #define SK_BT_REQFWD 0x00010000 /* required to continue forward scan */
  969. #define SK_BT_REQBKWD 0x00020000 /* required to continue backward scan */
  970. #define SK_BT_INDOPTION_SHIFT 24 /* must clear the above bits */
  971. #define SK_BT_DESC (INDOPTION_DESC << SK_BT_INDOPTION_SHIFT)
  972. #define SK_BT_NULLS_FIRST (INDOPTION_NULLS_FIRST << SK_BT_INDOPTION_SHIFT)
  973. typedef struct BTOptions
  974. {
  975. int32 varlena_header_; /* varlena header (do not touch directly!) */
  976. int fillfactor; /* page fill factor in percent (0..100) */
  977. float8 vacuum_cleanup_index_scale_factor; /* deprecated */
  978. bool deduplicate_items; /* Try to deduplicate items? */
  979. } BTOptions;
  980. #define BTGetFillFactor(relation) \
  981. (AssertMacro(relation->rd_rel->relkind == RELKIND_INDEX && \
  982. relation->rd_rel->relam == BTREE_AM_OID), \
  983. (relation)->rd_options ? \
  984. ((BTOptions *) (relation)->rd_options)->fillfactor : \
  985. BTREE_DEFAULT_FILLFACTOR)
  986. #define BTGetTargetPageFreeSpace(relation) \
  987. (BLCKSZ * (100 - BTGetFillFactor(relation)) / 100)
  988. #define BTGetDeduplicateItems(relation) \
  989. (AssertMacro(relation->rd_rel->relkind == RELKIND_INDEX && \
  990. relation->rd_rel->relam == BTREE_AM_OID), \
  991. ((relation)->rd_options ? \
  992. ((BTOptions *) (relation)->rd_options)->deduplicate_items : true))
  993. /*
  994. * Constant definition for progress reporting. Phase numbers must match
  995. * btbuildphasename.
  996. */
  997. /* PROGRESS_CREATEIDX_SUBPHASE_INITIALIZE is 1 (see progress.h) */
  998. #define PROGRESS_BTREE_PHASE_INDEXBUILD_TABLESCAN 2
  999. #define PROGRESS_BTREE_PHASE_PERFORMSORT_1 3
  1000. #define PROGRESS_BTREE_PHASE_PERFORMSORT_2 4
  1001. #define PROGRESS_BTREE_PHASE_LEAF_LOAD 5
  1002. /*
  1003. * external entry points for btree, in nbtree.c
  1004. */
  1005. extern void btbuildempty(Relation index);
  1006. extern bool btinsert(Relation rel, Datum *values, bool *isnull,
  1007. ItemPointer ht_ctid, Relation heapRel,
  1008. IndexUniqueCheck checkUnique,
  1009. bool indexUnchanged,
  1010. struct IndexInfo *indexInfo);
  1011. extern IndexScanDesc btbeginscan(Relation rel, int nkeys, int norderbys);
  1012. extern Size btestimateparallelscan(void);
  1013. extern void btinitparallelscan(void *target);
  1014. extern bool btgettuple(IndexScanDesc scan, ScanDirection dir);
  1015. extern int64 btgetbitmap(IndexScanDesc scan, TIDBitmap *tbm);
  1016. extern void btrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys,
  1017. ScanKey orderbys, int norderbys);
  1018. extern void btparallelrescan(IndexScanDesc scan);
  1019. extern void btendscan(IndexScanDesc scan);
  1020. extern void btmarkpos(IndexScanDesc scan);
  1021. extern void btrestrpos(IndexScanDesc scan);
  1022. extern IndexBulkDeleteResult *btbulkdelete(IndexVacuumInfo *info,
  1023. IndexBulkDeleteResult *stats,
  1024. IndexBulkDeleteCallback callback,
  1025. void *callback_state);
  1026. extern IndexBulkDeleteResult *btvacuumcleanup(IndexVacuumInfo *info,
  1027. IndexBulkDeleteResult *stats);
  1028. extern bool btcanreturn(Relation index, int attno);
  1029. /*
  1030. * prototypes for internal functions in nbtree.c
  1031. */
  1032. extern bool _bt_parallel_seize(IndexScanDesc scan, BlockNumber *pageno);
  1033. extern void _bt_parallel_release(IndexScanDesc scan, BlockNumber scan_page);
  1034. extern void _bt_parallel_done(IndexScanDesc scan);
  1035. extern void _bt_parallel_advance_array_keys(IndexScanDesc scan);
  1036. /*
  1037. * prototypes for functions in nbtdedup.c
  1038. */
  1039. extern void _bt_dedup_pass(Relation rel, Buffer buf, Relation heapRel,
  1040. IndexTuple newitem, Size newitemsz,
  1041. bool bottomupdedup);
  1042. extern bool _bt_bottomupdel_pass(Relation rel, Buffer buf, Relation heapRel,
  1043. Size newitemsz);
  1044. extern void _bt_dedup_start_pending(BTDedupState state, IndexTuple base,
  1045. OffsetNumber baseoff);
  1046. extern bool _bt_dedup_save_htid(BTDedupState state, IndexTuple itup);
  1047. extern Size _bt_dedup_finish_pending(Page newpage, BTDedupState state);
  1048. extern IndexTuple _bt_form_posting(IndexTuple base, ItemPointer htids,
  1049. int nhtids);
  1050. extern void _bt_update_posting(BTVacuumPosting vacposting);
  1051. extern IndexTuple _bt_swap_posting(IndexTuple newitem, IndexTuple oposting,
  1052. int postingoff);
  1053. /*
  1054. * prototypes for functions in nbtinsert.c
  1055. */
  1056. extern bool _bt_doinsert(Relation rel, IndexTuple itup,
  1057. IndexUniqueCheck checkUnique, bool indexUnchanged,
  1058. Relation heapRel);
  1059. extern void _bt_finish_split(Relation rel, Buffer lbuf, BTStack stack);
  1060. extern Buffer _bt_getstackbuf(Relation rel, BTStack stack, BlockNumber child);
  1061. /*
  1062. * prototypes for functions in nbtsplitloc.c
  1063. */
  1064. extern OffsetNumber _bt_findsplitloc(Relation rel, Page origpage,
  1065. OffsetNumber newitemoff, Size newitemsz, IndexTuple newitem,
  1066. bool *newitemonleft);
  1067. /*
  1068. * prototypes for functions in nbtpage.c
  1069. */
  1070. extern void _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level,
  1071. bool allequalimage);
  1072. extern bool _bt_vacuum_needs_cleanup(Relation rel);
  1073. extern void _bt_set_cleanup_info(Relation rel, BlockNumber num_delpages);
  1074. extern void _bt_upgrademetapage(Page page);
  1075. extern Buffer _bt_getroot(Relation rel, int access);
  1076. extern Buffer _bt_gettrueroot(Relation rel);
  1077. extern int _bt_getrootheight(Relation rel);
  1078. extern void _bt_metaversion(Relation rel, bool *heapkeyspace,
  1079. bool *allequalimage);
  1080. extern void _bt_checkpage(Relation rel, Buffer buf);
  1081. extern Buffer _bt_getbuf(Relation rel, BlockNumber blkno, int access);
  1082. extern Buffer _bt_relandgetbuf(Relation rel, Buffer obuf,
  1083. BlockNumber blkno, int access);
  1084. extern void _bt_relbuf(Relation rel, Buffer buf);
  1085. extern void _bt_lockbuf(Relation rel, Buffer buf, int access);
  1086. extern void _bt_unlockbuf(Relation rel, Buffer buf);
  1087. extern bool _bt_conditionallockbuf(Relation rel, Buffer buf);
  1088. extern void _bt_upgradelockbufcleanup(Relation rel, Buffer buf);
  1089. extern void _bt_pageinit(Page page, Size size);
  1090. extern void _bt_delitems_vacuum(Relation rel, Buffer buf,
  1091. OffsetNumber *deletable, int ndeletable,
  1092. BTVacuumPosting *updatable, int nupdatable);
  1093. extern void _bt_delitems_delete_check(Relation rel, Buffer buf,
  1094. Relation heapRel,
  1095. TM_IndexDeleteOp *delstate);
  1096. extern void _bt_pagedel(Relation rel, Buffer leafbuf, BTVacState *vstate);
  1097. extern void _bt_pendingfsm_init(Relation rel, BTVacState *vstate,
  1098. bool cleanuponly);
  1099. extern void _bt_pendingfsm_finalize(Relation rel, BTVacState *vstate);
  1100. /*
  1101. * prototypes for functions in nbtsearch.c
  1102. */
  1103. extern BTStack _bt_search(Relation rel, BTScanInsert key, Buffer *bufP,
  1104. int access, Snapshot snapshot);
  1105. extern Buffer _bt_moveright(Relation rel, BTScanInsert key, Buffer buf,
  1106. bool forupdate, BTStack stack, int access, Snapshot snapshot);
  1107. extern OffsetNumber _bt_binsrch_insert(Relation rel, BTInsertState insertstate);
  1108. extern int32 _bt_compare(Relation rel, BTScanInsert key, Page page, OffsetNumber offnum);
  1109. extern bool _bt_first(IndexScanDesc scan, ScanDirection dir);
  1110. extern bool _bt_next(IndexScanDesc scan, ScanDirection dir);
  1111. extern Buffer _bt_get_endpoint(Relation rel, uint32 level, bool rightmost,
  1112. Snapshot snapshot);
  1113. /*
  1114. * prototypes for functions in nbtutils.c
  1115. */
  1116. extern BTScanInsert _bt_mkscankey(Relation rel, IndexTuple itup);
  1117. extern void _bt_freestack(BTStack stack);
  1118. extern void _bt_preprocess_array_keys(IndexScanDesc scan);
  1119. extern void _bt_start_array_keys(IndexScanDesc scan, ScanDirection dir);
  1120. extern bool _bt_advance_array_keys(IndexScanDesc scan, ScanDirection dir);
  1121. extern void _bt_mark_array_keys(IndexScanDesc scan);
  1122. extern void _bt_restore_array_keys(IndexScanDesc scan);
  1123. extern void _bt_preprocess_keys(IndexScanDesc scan);
  1124. extern bool _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple,
  1125. int tupnatts, ScanDirection dir, bool *continuescan);
  1126. extern void _bt_killitems(IndexScanDesc scan);
  1127. extern BTCycleId _bt_vacuum_cycleid(Relation rel);
  1128. extern BTCycleId _bt_start_vacuum(Relation rel);
  1129. extern void _bt_end_vacuum(Relation rel);
  1130. extern void _bt_end_vacuum_callback(int code, Datum arg);
  1131. extern Size BTreeShmemSize(void);
  1132. extern void BTreeShmemInit(void);
  1133. extern bytea *btoptions(Datum reloptions, bool validate);
  1134. extern bool btproperty(Oid index_oid, int attno,
  1135. IndexAMProperty prop, const char *propname,
  1136. bool *res, bool *isnull);
  1137. extern char *btbuildphasename(int64 phasenum);
  1138. extern IndexTuple _bt_truncate(Relation rel, IndexTuple lastleft,
  1139. IndexTuple firstright, BTScanInsert itup_key);
  1140. extern int _bt_keep_natts_fast(Relation rel, IndexTuple lastleft,
  1141. IndexTuple firstright);
  1142. extern bool _bt_check_natts(Relation rel, bool heapkeyspace, Page page,
  1143. OffsetNumber offnum);
  1144. extern void _bt_check_third_page(Relation rel, Relation heap,
  1145. bool needheaptidspace, Page page, IndexTuple newtup);
  1146. extern bool _bt_allequalimage(Relation rel, bool debugmessage);
  1147. /*
  1148. * prototypes for functions in nbtvalidate.c
  1149. */
  1150. extern bool btvalidate(Oid opclassoid);
  1151. extern void btadjustmembers(Oid opfamilyoid,
  1152. Oid opclassoid,
  1153. List *operators,
  1154. List *functions);
  1155. /*
  1156. * prototypes for functions in nbtsort.c
  1157. */
  1158. extern IndexBuildResult *btbuild(Relation heap, Relation index,
  1159. struct IndexInfo *indexInfo);
  1160. extern void _bt_parallel_build_main(dsm_segment *seg, shm_toc *toc);
  1161. #endif /* NBTREE_H */