diff --git a/engine.c b/engine.c index afaae1f..6022c17 100644 --- a/engine.c +++ b/engine.c @@ -672,9 +672,39 @@ ptrack_walkdir(const char *path, Oid tablespaceOid, Oid dbOid) FreeDir(dir); /* we ignore any error here */ } +/* + * Get a second position within ptrack map so that it fits + * within the same cache line. + */ +size_t +get_slot2(size_t slot1, uint64 hash) { + size_t cache_line_ep; // ending point of a cache line + size_t cache_line_sp; // starting point of a cache line + size_t cache_line_interval; + size_t slot2; + + /* Get the ending point of a cache line within entries[]. */ + cache_line_ep = (CACHE_LINE_ALIGN(offsetof(PtrackMapHdr, entries) + slot1*sizeof(XLogRecPtr)) + - offsetof(PtrackMapHdr, entries)) / sizeof(XLogRecPtr); + /* handling an overflow beyond the entries boundary */ + cache_line_ep = cache_line_ep > PtrackContentNblocks ? PtrackContentNblocks : cache_line_ep; + + /* Get the starting point of a cache line within entries[]. */ + cache_line_sp = cache_line_ep - ENTRIES_PER_LINE; + + /* Handling overflow below zero (sp then must be larger than ep) */ + cache_line_sp = cache_line_sp > cache_line_ep ? 0 : cache_line_sp; + + cache_line_interval = cache_line_ep - cache_line_sp; + slot2 = (size_t)(cache_line_sp + (((hash << 32) | (hash >> 32)) % cache_line_interval)); + slot2 = (slot1 == slot2) ? ((slot1+1) % cache_line_interval) : slot2; + return slot2; +} + /* * Mark modified block in ptrack_map. */ + void ptrack_mark_block(RelFileNodeBackend smgr_rnode, ForkNumber forknum, BlockNumber blocknum) @@ -703,7 +733,7 @@ ptrack_mark_block(RelFileNodeBackend smgr_rnode, hash = BID_HASH_FUNC(bid); slot1 = (size_t)(hash % PtrackContentNblocks); - slot2 = (size_t)(((hash << 32) | (hash >> 32)) % PtrackContentNblocks); + slot2 = get_slot2(slot1, hash); if (RecoveryInProgress()) new_lsn = GetXLogReplayRecPtr(NULL); diff --git a/engine.h b/engine.h index 56777fc..4bcac22 100644 --- a/engine.h +++ b/engine.h @@ -40,6 +40,14 @@ */ #define PTRACK_BUF_SIZE ((uint64) 8000) +/* + * A reasonable assumption for most systems. Postgres core + * leverages the same value for this purpose. + */ +#define CACHE_LINE_SIZE 64 +#define CACHE_LINE_ALIGN(LEN) TYPEALIGN(CACHE_LINE_SIZE, (LEN)) +#define ENTRIES_PER_LINE (CACHE_LINE_SIZE/sizeof(XLogRecPtr)) + /* Ptrack magic bytes */ #define PTRACK_MAGIC "ptk" #define PTRACK_MAGIC_SIZE 4 @@ -117,6 +125,7 @@ extern void ptrack_mark_block(RelFileNodeBackend smgr_rnode, ForkNumber forkno, BlockNumber blkno); extern bool is_cfm_file_path(const char *path); +extern size_t get_slot2(size_t slot1, uint64 hash); #ifdef PGPRO_EE extern off_t get_cfs_relation_file_decompressed_size(RelFileNodeBackend rnode, const char *fullpath, ForkNumber forknum); diff --git a/ptrack.c b/ptrack.c index 22a2acf..955b13a 100644 --- a/ptrack.c +++ b/ptrack.c @@ -651,7 +651,7 @@ ptrack_get_pagemapset(PG_FUNCTION_ARGS) /* Only probe the second slot if the first one is marked */ if (update_lsn1 >= ctx->lsn) { - slot2 = (size_t)(((hash << 32) | (hash >> 32)) % PtrackContentNblocks); + slot2 = get_slot2(slot1, hash); update_lsn2 = pg_atomic_read_u64(&ptrack_map->entries[slot2]); if (update_lsn2 != InvalidXLogRecPtr)