return (rblk->next_page == rrpc->dev->pgs_per_blk);
}
-static sector_t block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
+static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
{
struct nvm_block *blk = rblk->parent;
return blk->id * rrpc->dev->pgs_per_blk;
}
-static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev,
- sector_t addr)
+static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
{
struct ppa_addr paddr;
struct page *page;
int slot;
int nr_pgs_per_blk = rrpc->dev->pgs_per_blk;
- sector_t phys_addr;
+ u64 phys_addr;
DECLARE_COMPLETION_ONSTACK(wait);
if (bitmap_full(rblk->invalid_pages, nr_pgs_per_blk))
}
static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
- struct rrpc_block *rblk, sector_t paddr)
+ struct rrpc_block *rblk, u64 paddr)
{
struct rrpc_addr *gp;
struct rrpc_rev_addr *rev;
return gp;
}
-static sector_t rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
+static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- sector_t addr = ADDR_EMPTY;
+ u64 addr = ADDR_EMPTY;
spin_lock(&rblk->lock);
if (block_is_full(rrpc, rblk))
struct rrpc_lun *rlun;
struct rrpc_block *rblk;
struct nvm_lun *lun;
- sector_t paddr;
+ u64 paddr;
rlun = rrpc_get_lun_rr(rrpc, is_gc);
lun = rlun->parent;
struct nvm_dev *dev = rrpc->dev;
int offset;
struct rrpc_addr *laddr;
- sector_t paddr, pladdr;
+ u64 paddr, pladdr;
for (offset = 0; offset < dev->pgs_per_blk; offset++) {
paddr = block_to_addr(rrpc, rblk) + offset;
struct nvm_dev *dev;
struct gendisk *disk;
- sector_t poffset; /* physical page offset */
+ u64 poffset; /* physical page offset */
int lun_offset;
int nr_luns;
/* Logical to physical mapping */
struct rrpc_addr {
- sector_t addr;
+ u64 addr;
struct rrpc_block *rblk;
};
/* Physical to logical mapping */
struct rrpc_rev_addr {
- sector_t addr;
+ u64 addr;
};
static inline sector_t rrpc_get_laddr(struct bio *bio)
union {
/* Channel-based PPA format in nand 4x2x2x2x8x10 */
struct {
- sector_t ch : 4;
- sector_t sec : 2; /* 4 sectors per page */
- sector_t pl : 2; /* 4 planes per LUN */
- sector_t lun : 2; /* 4 LUNs per channel */
- sector_t pg : 8; /* 256 pages per block */
- sector_t blk : 10;/* 1024 blocks per plane */
- sector_t resved : 36;
+ u64 ch : 4;
+ u64 sec : 2; /* 4 sectors per page */
+ u64 pl : 2; /* 4 planes per LUN */
+ u64 lun : 2; /* 4 LUNs per channel */
+ u64 pg : 8; /* 256 pages per block */
+ u64 blk : 10;/* 1024 blocks per plane */
+ u64 resved : 36;
} chnl;
/* Generic structure for all addresses */
struct {
- sector_t sec : NVM_SEC_BITS;
- sector_t pl : NVM_PL_BITS;
- sector_t pg : NVM_PG_BITS;
- sector_t blk : NVM_BLK_BITS;
- sector_t lun : NVM_LUN_BITS;
- sector_t ch : NVM_CH_BITS;
+ u64 sec : NVM_SEC_BITS;
+ u64 pl : NVM_PL_BITS;
+ u64 pg : NVM_PG_BITS;
+ u64 blk : NVM_BLK_BITS;
+ u64 lun : NVM_LUN_BITS;
+ u64 ch : NVM_CH_BITS;
} g;
- sector_t ppa;
+ u64 ppa;
};
} __packed;