hash.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485
  1. /*-------------------------------------------------------------------------
  2. *
  3. * hash.h
  4. * header file for postgres hash access method implementation
  5. *
  6. *
  7. * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
  8. * Portions Copyright (c) 1994, Regents of the University of California
  9. *
  10. * src/include/access/hash.h
  11. *
  12. * NOTES
  13. * modeled after Margo Seltzer's hash implementation for unix.
  14. *
  15. *-------------------------------------------------------------------------
  16. */
  17. #ifndef HASH_H
  18. #define HASH_H
  19. #include "access/amapi.h"
  20. #include "access/itup.h"
  21. #include "access/sdir.h"
  22. #include "catalog/pg_am_d.h"
  23. #include "common/hashfn.h"
  24. #include "lib/stringinfo.h"
  25. #include "storage/bufmgr.h"
  26. #include "storage/lockdefs.h"
  27. #include "utils/hsearch.h"
  28. #include "utils/relcache.h"
  29. /*
  30. * Mapping from hash bucket number to physical block number of bucket's
  31. * starting page. Beware of multiple evaluations of argument!
  32. */
  33. typedef uint32 Bucket;
  34. #define InvalidBucket ((Bucket) 0xFFFFFFFF)
  35. #define BUCKET_TO_BLKNO(metap,B) \
  36. ((BlockNumber) ((B) + ((B) ? (metap)->hashm_spares[_hash_spareindex((B)+1)-1] : 0)) + 1)
  37. /*
  38. * Special space for hash index pages.
  39. *
  40. * hasho_flag's LH_PAGE_TYPE bits tell us which type of page we're looking at.
  41. * Additional bits in the flag word are used for more transient purposes.
  42. *
  43. * To test a page's type, do (hasho_flag & LH_PAGE_TYPE) == LH_xxx_PAGE.
  44. * However, we ensure that each used page type has a distinct bit so that
  45. * we can OR together page types for uses such as the allowable-page-types
  46. * argument of _hash_checkpage().
  47. */
  48. #define LH_UNUSED_PAGE (0)
  49. #define LH_OVERFLOW_PAGE (1 << 0)
  50. #define LH_BUCKET_PAGE (1 << 1)
  51. #define LH_BITMAP_PAGE (1 << 2)
  52. #define LH_META_PAGE (1 << 3)
  53. #define LH_BUCKET_BEING_POPULATED (1 << 4)
  54. #define LH_BUCKET_BEING_SPLIT (1 << 5)
  55. #define LH_BUCKET_NEEDS_SPLIT_CLEANUP (1 << 6)
  56. #define LH_PAGE_HAS_DEAD_TUPLES (1 << 7)
  57. #define LH_PAGE_TYPE \
  58. (LH_OVERFLOW_PAGE | LH_BUCKET_PAGE | LH_BITMAP_PAGE | LH_META_PAGE)
  59. /*
  60. * In an overflow page, hasho_prevblkno stores the block number of the previous
  61. * page in the bucket chain; in a bucket page, hasho_prevblkno stores the
  62. * hashm_maxbucket value as of the last time the bucket was last split, or
  63. * else as of the time the bucket was created. The latter convention is used
  64. * to determine whether a cached copy of the metapage is too stale to be used
  65. * without needing to lock or pin the metapage.
  66. *
  67. * hasho_nextblkno is always the block number of the next page in the
  68. * bucket chain, or InvalidBlockNumber if there are no more such pages.
  69. */
  70. typedef struct HashPageOpaqueData
  71. {
  72. BlockNumber hasho_prevblkno; /* see above */
  73. BlockNumber hasho_nextblkno; /* see above */
  74. Bucket hasho_bucket; /* bucket number this pg belongs to */
  75. uint16 hasho_flag; /* page type code + flag bits, see above */
  76. uint16 hasho_page_id; /* for identification of hash indexes */
  77. } HashPageOpaqueData;
  78. typedef HashPageOpaqueData *HashPageOpaque;
  79. #define HashPageGetOpaque(page) ((HashPageOpaque) PageGetSpecialPointer(page))
  80. #define H_NEEDS_SPLIT_CLEANUP(opaque) (((opaque)->hasho_flag & LH_BUCKET_NEEDS_SPLIT_CLEANUP) != 0)
  81. #define H_BUCKET_BEING_SPLIT(opaque) (((opaque)->hasho_flag & LH_BUCKET_BEING_SPLIT) != 0)
  82. #define H_BUCKET_BEING_POPULATED(opaque) (((opaque)->hasho_flag & LH_BUCKET_BEING_POPULATED) != 0)
  83. #define H_HAS_DEAD_TUPLES(opaque) (((opaque)->hasho_flag & LH_PAGE_HAS_DEAD_TUPLES) != 0)
  84. /*
  85. * The page ID is for the convenience of pg_filedump and similar utilities,
  86. * which otherwise would have a hard time telling pages of different index
  87. * types apart. It should be the last 2 bytes on the page. This is more or
  88. * less "free" due to alignment considerations.
  89. */
  90. #define HASHO_PAGE_ID 0xFF80
  91. typedef struct HashScanPosItem /* what we remember about each match */
  92. {
  93. ItemPointerData heapTid; /* TID of referenced heap item */
  94. OffsetNumber indexOffset; /* index item's location within page */
  95. } HashScanPosItem;
  96. typedef struct HashScanPosData
  97. {
  98. Buffer buf; /* if valid, the buffer is pinned */
  99. BlockNumber currPage; /* current hash index page */
  100. BlockNumber nextPage; /* next overflow page */
  101. BlockNumber prevPage; /* prev overflow or bucket page */
  102. /*
  103. * The items array is always ordered in index order (ie, increasing
  104. * indexoffset). When scanning backwards it is convenient to fill the
  105. * array back-to-front, so we start at the last slot and fill downwards.
  106. * Hence we need both a first-valid-entry and a last-valid-entry counter.
  107. * itemIndex is a cursor showing which entry was last returned to caller.
  108. */
  109. int firstItem; /* first valid index in items[] */
  110. int lastItem; /* last valid index in items[] */
  111. int itemIndex; /* current index in items[] */
  112. HashScanPosItem items[MaxIndexTuplesPerPage]; /* MUST BE LAST */
  113. } HashScanPosData;
  114. #define HashScanPosIsPinned(scanpos) \
  115. ( \
  116. AssertMacro(BlockNumberIsValid((scanpos).currPage) || \
  117. !BufferIsValid((scanpos).buf)), \
  118. BufferIsValid((scanpos).buf) \
  119. )
  120. #define HashScanPosIsValid(scanpos) \
  121. ( \
  122. AssertMacro(BlockNumberIsValid((scanpos).currPage) || \
  123. !BufferIsValid((scanpos).buf)), \
  124. BlockNumberIsValid((scanpos).currPage) \
  125. )
  126. #define HashScanPosInvalidate(scanpos) \
  127. do { \
  128. (scanpos).buf = InvalidBuffer; \
  129. (scanpos).currPage = InvalidBlockNumber; \
  130. (scanpos).nextPage = InvalidBlockNumber; \
  131. (scanpos).prevPage = InvalidBlockNumber; \
  132. (scanpos).firstItem = 0; \
  133. (scanpos).lastItem = 0; \
  134. (scanpos).itemIndex = 0; \
  135. } while (0)
  136. /*
  137. * HashScanOpaqueData is private state for a hash index scan.
  138. */
  139. typedef struct HashScanOpaqueData
  140. {
  141. /* Hash value of the scan key, ie, the hash key we seek */
  142. uint32 hashso_sk_hash;
  143. /* remember the buffer associated with primary bucket */
  144. Buffer hashso_bucket_buf;
  145. /*
  146. * remember the buffer associated with primary bucket page of bucket being
  147. * split. it is required during the scan of the bucket which is being
  148. * populated during split operation.
  149. */
  150. Buffer hashso_split_bucket_buf;
  151. /* Whether scan starts on bucket being populated due to split */
  152. bool hashso_buc_populated;
  153. /*
  154. * Whether scanning bucket being split? The value of this parameter is
  155. * referred only when hashso_buc_populated is true.
  156. */
  157. bool hashso_buc_split;
  158. /* info about killed items if any (killedItems is NULL if never used) */
  159. int *killedItems; /* currPos.items indexes of killed items */
  160. int numKilled; /* number of currently stored items */
  161. /*
  162. * Identify all the matching items on a page and save them in
  163. * HashScanPosData
  164. */
  165. HashScanPosData currPos; /* current position data */
  166. } HashScanOpaqueData;
  167. typedef HashScanOpaqueData *HashScanOpaque;
  168. /*
  169. * Definitions for metapage.
  170. */
  171. #define HASH_METAPAGE 0 /* metapage is always block 0 */
  172. #define HASH_MAGIC 0x6440640
  173. #define HASH_VERSION 4
  174. /*
  175. * spares[] holds the number of overflow pages currently allocated at or
  176. * before a certain splitpoint. For example, if spares[3] = 7 then there are
  177. * 7 ovflpages before splitpoint 3 (compare BUCKET_TO_BLKNO macro). The
  178. * value in spares[ovflpoint] increases as overflow pages are added at the
  179. * end of the index. Once ovflpoint increases (ie, we have actually allocated
  180. * the bucket pages belonging to that splitpoint) the number of spares at the
  181. * prior splitpoint cannot change anymore.
  182. *
  183. * ovflpages that have been recycled for reuse can be found by looking at
  184. * bitmaps that are stored within ovflpages dedicated for the purpose.
  185. * The blknos of these bitmap pages are kept in mapp[]; nmaps is the
  186. * number of currently existing bitmaps.
  187. *
  188. * The limitation on the size of spares[] comes from the fact that there's
  189. * no point in having more than 2^32 buckets with only uint32 hashcodes.
  190. * (Note: The value of HASH_MAX_SPLITPOINTS which is the size of spares[] is
  191. * adjusted in such a way to accommodate multi phased allocation of buckets
  192. * after HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE).
  193. *
  194. * There is no particular upper limit on the size of mapp[], other than
  195. * needing to fit into the metapage. (With 8K block size, 1024 bitmaps
  196. * limit us to 256 GB of overflow space...). For smaller block size we
  197. * can not use 1024 bitmaps as it will lead to the meta page data crossing
  198. * the block size boundary. So we use BLCKSZ to determine the maximum number
  199. * of bitmaps.
  200. */
  201. #define HASH_MAX_BITMAPS Min(BLCKSZ / 8, 1024)
  202. #define HASH_SPLITPOINT_PHASE_BITS 2
  203. #define HASH_SPLITPOINT_PHASES_PER_GRP (1 << HASH_SPLITPOINT_PHASE_BITS)
  204. #define HASH_SPLITPOINT_PHASE_MASK (HASH_SPLITPOINT_PHASES_PER_GRP - 1)
  205. #define HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE 10
  206. /* defines max number of splitpoint phases a hash index can have */
  207. #define HASH_MAX_SPLITPOINT_GROUP 32
  208. #define HASH_MAX_SPLITPOINTS \
  209. (((HASH_MAX_SPLITPOINT_GROUP - HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE) * \
  210. HASH_SPLITPOINT_PHASES_PER_GRP) + \
  211. HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE)
  212. typedef struct HashMetaPageData
  213. {
  214. uint32 hashm_magic; /* magic no. for hash tables */
  215. uint32 hashm_version; /* version ID */
  216. double hashm_ntuples; /* number of tuples stored in the table */
  217. uint16 hashm_ffactor; /* target fill factor (tuples/bucket) */
  218. uint16 hashm_bsize; /* index page size (bytes) */
  219. uint16 hashm_bmsize; /* bitmap array size (bytes) - must be a power
  220. * of 2 */
  221. uint16 hashm_bmshift; /* log2(bitmap array size in BITS) */
  222. uint32 hashm_maxbucket; /* ID of maximum bucket in use */
  223. uint32 hashm_highmask; /* mask to modulo into entire table */
  224. uint32 hashm_lowmask; /* mask to modulo into lower half of table */
  225. uint32 hashm_ovflpoint; /* splitpoint from which ovflpage being
  226. * allocated */
  227. uint32 hashm_firstfree; /* lowest-number free ovflpage (bit#) */
  228. uint32 hashm_nmaps; /* number of bitmap pages */
  229. RegProcedure hashm_procid; /* hash function id from pg_proc */
  230. uint32 hashm_spares[HASH_MAX_SPLITPOINTS]; /* spare pages before each
  231. * splitpoint */
  232. BlockNumber hashm_mapp[HASH_MAX_BITMAPS]; /* blknos of ovfl bitmaps */
  233. } HashMetaPageData;
  234. typedef HashMetaPageData *HashMetaPage;
  235. typedef struct HashOptions
  236. {
  237. int32 varlena_header_; /* varlena header (do not touch directly!) */
  238. int fillfactor; /* page fill factor in percent (0..100) */
  239. } HashOptions;
  240. #define HashGetFillFactor(relation) \
  241. (AssertMacro(relation->rd_rel->relkind == RELKIND_INDEX && \
  242. relation->rd_rel->relam == HASH_AM_OID), \
  243. (relation)->rd_options ? \
  244. ((HashOptions *) (relation)->rd_options)->fillfactor : \
  245. HASH_DEFAULT_FILLFACTOR)
  246. #define HashGetTargetPageUsage(relation) \
  247. (BLCKSZ * HashGetFillFactor(relation) / 100)
  248. /*
  249. * Maximum size of a hash index item (it's okay to have only one per page)
  250. */
  251. #define HashMaxItemSize(page) \
  252. MAXALIGN_DOWN(PageGetPageSize(page) - \
  253. SizeOfPageHeaderData - \
  254. sizeof(ItemIdData) - \
  255. MAXALIGN(sizeof(HashPageOpaqueData)))
  256. #define INDEX_MOVED_BY_SPLIT_MASK INDEX_AM_RESERVED_BIT
  257. #define HASH_MIN_FILLFACTOR 10
  258. #define HASH_DEFAULT_FILLFACTOR 75
  259. /*
  260. * Constants
  261. */
  262. #define BYTE_TO_BIT 3 /* 2^3 bits/byte */
  263. #define ALL_SET ((uint32) ~0)
  264. /*
  265. * Bitmap pages do not contain tuples. They do contain the standard
  266. * page headers and trailers; however, everything in between is a
  267. * giant bit array. The number of bits that fit on a page obviously
  268. * depends on the page size and the header/trailer overhead. We require
  269. * the number of bits per page to be a power of 2.
  270. */
  271. #define BMPGSZ_BYTE(metap) ((metap)->hashm_bmsize)
  272. #define BMPGSZ_BIT(metap) ((metap)->hashm_bmsize << BYTE_TO_BIT)
  273. #define BMPG_SHIFT(metap) ((metap)->hashm_bmshift)
  274. #define BMPG_MASK(metap) (BMPGSZ_BIT(metap) - 1)
  275. #define HashPageGetBitmap(page) \
  276. ((uint32 *) PageGetContents(page))
  277. #define HashGetMaxBitmapSize(page) \
  278. (PageGetPageSize((Page) page) - \
  279. (MAXALIGN(SizeOfPageHeaderData) + MAXALIGN(sizeof(HashPageOpaqueData))))
  280. #define HashPageGetMeta(page) \
  281. ((HashMetaPage) PageGetContents(page))
  282. /*
  283. * The number of bits in an ovflpage bitmap word.
  284. */
  285. #define BITS_PER_MAP 32 /* Number of bits in uint32 */
  286. /* Given the address of the beginning of a bit map, clear/set the nth bit */
  287. #define CLRBIT(A, N) ((A)[(N)/BITS_PER_MAP] &= ~(1<<((N)%BITS_PER_MAP)))
  288. #define SETBIT(A, N) ((A)[(N)/BITS_PER_MAP] |= (1<<((N)%BITS_PER_MAP)))
  289. #define ISSET(A, N) ((A)[(N)/BITS_PER_MAP] & (1<<((N)%BITS_PER_MAP)))
  290. /*
  291. * page-level and high-level locking modes (see README)
  292. */
  293. #define HASH_READ BUFFER_LOCK_SHARE
  294. #define HASH_WRITE BUFFER_LOCK_EXCLUSIVE
  295. #define HASH_NOLOCK (-1)
  296. /*
  297. * When a new operator class is declared, we require that the user supply
  298. * us with an amproc function for hashing a key of the new type, returning
  299. * a 32-bit hash value. We call this the "standard" hash function. We
  300. * also allow an optional "extended" hash function which accepts a salt and
  301. * returns a 64-bit hash value. This is highly recommended but, for reasons
  302. * of backward compatibility, optional.
  303. *
  304. * When the salt is 0, the low 32 bits of the value returned by the extended
  305. * hash function should match the value that would have been returned by the
  306. * standard hash function.
  307. */
  308. #define HASHSTANDARD_PROC 1
  309. #define HASHEXTENDED_PROC 2
  310. #define HASHOPTIONS_PROC 3
  311. #define HASHNProcs 3
  312. /* public routines */
  313. extern IndexBuildResult *hashbuild(Relation heap, Relation index,
  314. struct IndexInfo *indexInfo);
  315. extern void hashbuildempty(Relation index);
  316. extern bool hashinsert(Relation rel, Datum *values, bool *isnull,
  317. ItemPointer ht_ctid, Relation heapRel,
  318. IndexUniqueCheck checkUnique,
  319. bool indexUnchanged,
  320. struct IndexInfo *indexInfo);
  321. extern bool hashgettuple(IndexScanDesc scan, ScanDirection dir);
  322. extern int64 hashgetbitmap(IndexScanDesc scan, TIDBitmap *tbm);
  323. extern IndexScanDesc hashbeginscan(Relation rel, int nkeys, int norderbys);
  324. extern void hashrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys,
  325. ScanKey orderbys, int norderbys);
  326. extern void hashendscan(IndexScanDesc scan);
  327. extern IndexBulkDeleteResult *hashbulkdelete(IndexVacuumInfo *info,
  328. IndexBulkDeleteResult *stats,
  329. IndexBulkDeleteCallback callback,
  330. void *callback_state);
  331. extern IndexBulkDeleteResult *hashvacuumcleanup(IndexVacuumInfo *info,
  332. IndexBulkDeleteResult *stats);
  333. extern bytea *hashoptions(Datum reloptions, bool validate);
  334. extern bool hashvalidate(Oid opclassoid);
  335. extern void hashadjustmembers(Oid opfamilyoid,
  336. Oid opclassoid,
  337. List *operators,
  338. List *functions);
  339. /* private routines */
  340. /* hashinsert.c */
  341. extern void _hash_doinsert(Relation rel, IndexTuple itup, Relation heapRel);
  342. extern OffsetNumber _hash_pgaddtup(Relation rel, Buffer buf,
  343. Size itemsize, IndexTuple itup);
  344. extern void _hash_pgaddmultitup(Relation rel, Buffer buf, IndexTuple *itups,
  345. OffsetNumber *itup_offsets, uint16 nitups);
  346. /* hashovfl.c */
  347. extern Buffer _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf, bool retain_pin);
  348. extern BlockNumber _hash_freeovflpage(Relation rel, Buffer bucketbuf, Buffer ovflbuf,
  349. Buffer wbuf, IndexTuple *itups, OffsetNumber *itup_offsets,
  350. Size *tups_size, uint16 nitups, BufferAccessStrategy bstrategy);
  351. extern void _hash_initbitmapbuffer(Buffer buf, uint16 bmsize, bool initpage);
  352. extern void _hash_squeezebucket(Relation rel,
  353. Bucket bucket, BlockNumber bucket_blkno,
  354. Buffer bucket_buf,
  355. BufferAccessStrategy bstrategy);
  356. extern uint32 _hash_ovflblkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno);
  357. /* hashpage.c */
  358. extern Buffer _hash_getbuf(Relation rel, BlockNumber blkno,
  359. int access, int flags);
  360. extern Buffer _hash_getbuf_with_condlock_cleanup(Relation rel,
  361. BlockNumber blkno, int flags);
  362. extern HashMetaPage _hash_getcachedmetap(Relation rel, Buffer *metabuf,
  363. bool force_refresh);
  364. extern Buffer _hash_getbucketbuf_from_hashkey(Relation rel, uint32 hashkey,
  365. int access,
  366. HashMetaPage *cachedmetap);
  367. extern Buffer _hash_getinitbuf(Relation rel, BlockNumber blkno);
  368. extern void _hash_initbuf(Buffer buf, uint32 max_bucket, uint32 num_bucket,
  369. uint32 flag, bool initpage);
  370. extern Buffer _hash_getnewbuf(Relation rel, BlockNumber blkno,
  371. ForkNumber forkNum);
  372. extern Buffer _hash_getbuf_with_strategy(Relation rel, BlockNumber blkno,
  373. int access, int flags,
  374. BufferAccessStrategy bstrategy);
  375. extern void _hash_relbuf(Relation rel, Buffer buf);
  376. extern void _hash_dropbuf(Relation rel, Buffer buf);
  377. extern void _hash_dropscanbuf(Relation rel, HashScanOpaque so);
  378. extern uint32 _hash_init(Relation rel, double num_tuples,
  379. ForkNumber forkNum);
  380. extern void _hash_init_metabuffer(Buffer buf, double num_tuples,
  381. RegProcedure procid, uint16 ffactor, bool initpage);
  382. extern void _hash_pageinit(Page page, Size size);
  383. extern void _hash_expandtable(Relation rel, Buffer metabuf);
  384. extern void _hash_finish_split(Relation rel, Buffer metabuf, Buffer obuf,
  385. Bucket obucket, uint32 maxbucket, uint32 highmask,
  386. uint32 lowmask);
  387. /* hashsearch.c */
  388. extern bool _hash_next(IndexScanDesc scan, ScanDirection dir);
  389. extern bool _hash_first(IndexScanDesc scan, ScanDirection dir);
  390. /* hashsort.c */
  391. typedef struct HSpool HSpool; /* opaque struct in hashsort.c */
  392. extern HSpool *_h_spoolinit(Relation heap, Relation index, uint32 num_buckets);
  393. extern void _h_spooldestroy(HSpool *hspool);
  394. extern void _h_spool(HSpool *hspool, ItemPointer self,
  395. Datum *values, bool *isnull);
  396. extern void _h_indexbuild(HSpool *hspool, Relation heapRel);
  397. /* hashutil.c */
  398. extern bool _hash_checkqual(IndexScanDesc scan, IndexTuple itup);
  399. extern uint32 _hash_datum2hashkey(Relation rel, Datum key);
  400. extern uint32 _hash_datum2hashkey_type(Relation rel, Datum key, Oid keytype);
  401. extern Bucket _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket,
  402. uint32 highmask, uint32 lowmask);
  403. extern uint32 _hash_spareindex(uint32 num_bucket);
  404. extern uint32 _hash_get_totalbuckets(uint32 splitpoint_phase);
  405. extern void _hash_checkpage(Relation rel, Buffer buf, int flags);
  406. extern uint32 _hash_get_indextuple_hashkey(IndexTuple itup);
  407. extern bool _hash_convert_tuple(Relation index,
  408. Datum *user_values, bool *user_isnull,
  409. Datum *index_values, bool *index_isnull);
  410. extern OffsetNumber _hash_binsearch(Page page, uint32 hash_value);
  411. extern OffsetNumber _hash_binsearch_last(Page page, uint32 hash_value);
  412. extern BlockNumber _hash_get_oldblock_from_newbucket(Relation rel, Bucket new_bucket);
  413. extern BlockNumber _hash_get_newblock_from_oldbucket(Relation rel, Bucket old_bucket);
  414. extern Bucket _hash_get_newbucket_from_oldbucket(Relation rel, Bucket old_bucket,
  415. uint32 lowmask, uint32 maxbucket);
  416. extern void _hash_kill_items(IndexScanDesc scan);
  417. /* hash.c */
  418. extern void hashbucketcleanup(Relation rel, Bucket cur_bucket,
  419. Buffer bucket_buf, BlockNumber bucket_blkno,
  420. BufferAccessStrategy bstrategy,
  421. uint32 maxbucket, uint32 highmask, uint32 lowmask,
  422. double *tuples_removed, double *num_index_tuples,
  423. bool split_cleanup,
  424. IndexBulkDeleteCallback callback, void *callback_state);
  425. #endif /* HASH_H */