at v4.11 11 kB view raw
1#ifndef NVM_H 2#define NVM_H 3 4#include <linux/blkdev.h> 5#include <linux/types.h> 6#include <uapi/linux/lightnvm.h> 7 8enum { 9 NVM_IO_OK = 0, 10 NVM_IO_REQUEUE = 1, 11 NVM_IO_DONE = 2, 12 NVM_IO_ERR = 3, 13 14 NVM_IOTYPE_NONE = 0, 15 NVM_IOTYPE_GC = 1, 16}; 17 18#define NVM_BLK_BITS (16) 19#define NVM_PG_BITS (16) 20#define NVM_SEC_BITS (8) 21#define NVM_PL_BITS (8) 22#define NVM_LUN_BITS (8) 23#define NVM_CH_BITS (7) 24 25struct ppa_addr { 26 /* Generic structure for all addresses */ 27 union { 28 struct { 29 u64 blk : NVM_BLK_BITS; 30 u64 pg : NVM_PG_BITS; 31 u64 sec : NVM_SEC_BITS; 32 u64 pl : NVM_PL_BITS; 33 u64 lun : NVM_LUN_BITS; 34 u64 ch : NVM_CH_BITS; 35 u64 reserved : 1; 36 } g; 37 38 struct { 39 u64 line : 63; 40 u64 is_cached : 1; 41 } c; 42 43 u64 ppa; 44 }; 45}; 46 47struct nvm_rq; 48struct nvm_id; 49struct nvm_dev; 50struct nvm_tgt_dev; 51 52typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); 53typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *); 54typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32, 55 nvm_l2p_update_fn *, void *); 56typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *); 57typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int); 58typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); 59typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *); 60typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *); 61typedef void (nvm_destroy_dma_pool_fn)(void *); 62typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t, 63 dma_addr_t *); 64typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t); 65 66struct nvm_dev_ops { 67 nvm_id_fn *identity; 68 nvm_get_l2p_tbl_fn *get_l2p_tbl; 69 nvm_op_bb_tbl_fn *get_bb_tbl; 70 nvm_op_set_bb_fn *set_bb_tbl; 71 72 nvm_submit_io_fn *submit_io; 73 nvm_erase_blk_fn *erase_block; 74 75 nvm_create_dma_pool_fn *create_dma_pool; 76 nvm_destroy_dma_pool_fn *destroy_dma_pool; 77 nvm_dev_dma_alloc_fn *dev_dma_alloc; 78 nvm_dev_dma_free_fn *dev_dma_free; 79 80 unsigned int max_phys_sect; 81}; 82 83#ifdef CONFIG_NVM 84 85#include <linux/blkdev.h> 86#include <linux/file.h> 87#include <linux/dmapool.h> 88#include <uapi/linux/lightnvm.h> 89 90enum { 91 /* HW Responsibilities */ 92 NVM_RSP_L2P = 1 << 0, 93 NVM_RSP_ECC = 1 << 1, 94 95 /* Physical Adressing Mode */ 96 NVM_ADDRMODE_LINEAR = 0, 97 NVM_ADDRMODE_CHANNEL = 1, 98 99 /* Plane programming mode for LUN */ 100 NVM_PLANE_SINGLE = 1, 101 NVM_PLANE_DOUBLE = 2, 102 NVM_PLANE_QUAD = 4, 103 104 /* Status codes */ 105 NVM_RSP_SUCCESS = 0x0, 106 NVM_RSP_NOT_CHANGEABLE = 0x1, 107 NVM_RSP_ERR_FAILWRITE = 0x40ff, 108 NVM_RSP_ERR_EMPTYPAGE = 0x42ff, 109 NVM_RSP_ERR_FAILECC = 0x4281, 110 NVM_RSP_ERR_FAILCRC = 0x4004, 111 NVM_RSP_WARN_HIGHECC = 0x4700, 112 113 /* Device opcodes */ 114 NVM_OP_HBREAD = 0x02, 115 NVM_OP_HBWRITE = 0x81, 116 NVM_OP_PWRITE = 0x91, 117 NVM_OP_PREAD = 0x92, 118 NVM_OP_ERASE = 0x90, 119 120 /* PPA Command Flags */ 121 NVM_IO_SNGL_ACCESS = 0x0, 122 NVM_IO_DUAL_ACCESS = 0x1, 123 NVM_IO_QUAD_ACCESS = 0x2, 124 125 /* NAND Access Modes */ 126 NVM_IO_SUSPEND = 0x80, 127 NVM_IO_SLC_MODE = 0x100, 128 NVM_IO_SCRAMBLE_DISABLE = 0x200, 129 130 /* Block Types */ 131 NVM_BLK_T_FREE = 0x0, 132 NVM_BLK_T_BAD = 0x1, 133 NVM_BLK_T_GRWN_BAD = 0x2, 134 NVM_BLK_T_DEV = 0x4, 135 NVM_BLK_T_HOST = 0x8, 136 137 /* Memory capabilities */ 138 NVM_ID_CAP_SLC = 0x1, 139 NVM_ID_CAP_CMD_SUSPEND = 0x2, 140 NVM_ID_CAP_SCRAMBLE = 0x4, 141 NVM_ID_CAP_ENCRYPT = 0x8, 142 143 /* Memory types */ 144 NVM_ID_FMTYPE_SLC = 0, 145 NVM_ID_FMTYPE_MLC = 1, 146 147 /* Device capabilities */ 148 NVM_ID_DCAP_BBLKMGMT = 0x1, 149 NVM_UD_DCAP_ECC = 0x2, 150}; 151 152struct nvm_id_lp_mlc { 153 u16 num_pairs; 154 u8 pairs[886]; 155}; 156 157struct nvm_id_lp_tbl { 158 __u8 id[8]; 159 struct nvm_id_lp_mlc mlc; 160}; 161 162struct nvm_id_group { 163 u8 mtype; 164 u8 fmtype; 165 u8 num_ch; 166 u8 num_lun; 167 u8 num_pln; 168 u16 num_blk; 169 u16 num_pg; 170 u16 fpg_sz; 171 u16 csecs; 172 u16 sos; 173 u32 trdt; 174 u32 trdm; 175 u32 tprt; 176 u32 tprm; 177 u32 tbet; 178 u32 tbem; 179 u32 mpos; 180 u32 mccap; 181 u16 cpar; 182 183 struct nvm_id_lp_tbl lptbl; 184}; 185 186struct nvm_addr_format { 187 u8 ch_offset; 188 u8 ch_len; 189 u8 lun_offset; 190 u8 lun_len; 191 u8 pln_offset; 192 u8 pln_len; 193 u8 blk_offset; 194 u8 blk_len; 195 u8 pg_offset; 196 u8 pg_len; 197 u8 sect_offset; 198 u8 sect_len; 199}; 200 201struct nvm_id { 202 u8 ver_id; 203 u8 vmnt; 204 u32 cap; 205 u32 dom; 206 struct nvm_addr_format ppaf; 207 struct nvm_id_group grp; 208} __packed; 209 210struct nvm_target { 211 struct list_head list; 212 struct nvm_tgt_dev *dev; 213 struct nvm_tgt_type *type; 214 struct gendisk *disk; 215}; 216 217#define ADDR_EMPTY (~0ULL) 218 219#define NVM_VERSION_MAJOR 1 220#define NVM_VERSION_MINOR 0 221#define NVM_VERSION_PATCH 0 222 223struct nvm_rq; 224typedef void (nvm_end_io_fn)(struct nvm_rq *); 225 226struct nvm_rq { 227 struct nvm_tgt_dev *dev; 228 229 struct bio *bio; 230 231 union { 232 struct ppa_addr ppa_addr; 233 dma_addr_t dma_ppa_list; 234 }; 235 236 struct ppa_addr *ppa_list; 237 238 void *meta_list; 239 dma_addr_t dma_meta_list; 240 241 struct completion *wait; 242 nvm_end_io_fn *end_io; 243 244 uint8_t opcode; 245 uint16_t nr_ppas; 246 uint16_t flags; 247 248 u64 ppa_status; /* ppa media status */ 249 int error; 250 251 void *private; 252}; 253 254static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu) 255{ 256 return pdu - sizeof(struct nvm_rq); 257} 258 259static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata) 260{ 261 return rqdata + 1; 262} 263 264enum { 265 NVM_BLK_ST_FREE = 0x1, /* Free block */ 266 NVM_BLK_ST_TGT = 0x2, /* Block in use by target */ 267 NVM_BLK_ST_BAD = 0x8, /* Bad block */ 268}; 269 270/* Device generic information */ 271struct nvm_geo { 272 int nr_chnls; 273 int nr_luns; 274 int luns_per_chnl; /* -1 if channels are not symmetric */ 275 int nr_planes; 276 int sec_per_pg; /* only sectors for a single page */ 277 int pgs_per_blk; 278 int blks_per_lun; 279 int fpg_size; 280 int pfpg_size; /* size of buffer if all pages are to be read */ 281 int sec_size; 282 int oob_size; 283 int mccap; 284 struct nvm_addr_format ppaf; 285 286 /* Calculated/Cached values. These do not reflect the actual usable 287 * blocks at run-time. 288 */ 289 int max_rq_size; 290 int plane_mode; /* drive device in single, double or quad mode */ 291 292 int sec_per_pl; /* all sectors across planes */ 293 int sec_per_blk; 294 int sec_per_lun; 295}; 296 297/* sub-device structure */ 298struct nvm_tgt_dev { 299 /* Device information */ 300 struct nvm_geo geo; 301 302 /* Base ppas for target LUNs */ 303 struct ppa_addr *luns; 304 305 sector_t total_secs; 306 307 struct nvm_id identity; 308 struct request_queue *q; 309 310 struct nvm_dev *parent; 311 void *map; 312}; 313 314struct nvm_dev { 315 struct nvm_dev_ops *ops; 316 317 struct list_head devices; 318 319 /* Device information */ 320 struct nvm_geo geo; 321 322 /* lower page table */ 323 int lps_per_blk; 324 int *lptbl; 325 326 unsigned long total_secs; 327 328 unsigned long *lun_map; 329 void *dma_pool; 330 331 struct nvm_id identity; 332 333 /* Backend device */ 334 struct request_queue *q; 335 char name[DISK_NAME_LEN]; 336 void *private_data; 337 338 void *rmap; 339 340 struct mutex mlock; 341 spinlock_t lock; 342 343 /* target management */ 344 struct list_head area_list; 345 struct list_head targets; 346}; 347 348static inline struct ppa_addr linear_to_generic_addr(struct nvm_geo *geo, 349 u64 pba) 350{ 351 struct ppa_addr l; 352 int secs, pgs, blks, luns; 353 sector_t ppa = pba; 354 355 l.ppa = 0; 356 357 div_u64_rem(ppa, geo->sec_per_pg, &secs); 358 l.g.sec = secs; 359 360 sector_div(ppa, geo->sec_per_pg); 361 div_u64_rem(ppa, geo->pgs_per_blk, &pgs); 362 l.g.pg = pgs; 363 364 sector_div(ppa, geo->pgs_per_blk); 365 div_u64_rem(ppa, geo->blks_per_lun, &blks); 366 l.g.blk = blks; 367 368 sector_div(ppa, geo->blks_per_lun); 369 div_u64_rem(ppa, geo->luns_per_chnl, &luns); 370 l.g.lun = luns; 371 372 sector_div(ppa, geo->luns_per_chnl); 373 l.g.ch = ppa; 374 375 return l; 376} 377 378static inline struct ppa_addr generic_to_dev_addr(struct nvm_tgt_dev *tgt_dev, 379 struct ppa_addr r) 380{ 381 struct nvm_geo *geo = &tgt_dev->geo; 382 struct ppa_addr l; 383 384 l.ppa = ((u64)r.g.blk) << geo->ppaf.blk_offset; 385 l.ppa |= ((u64)r.g.pg) << geo->ppaf.pg_offset; 386 l.ppa |= ((u64)r.g.sec) << geo->ppaf.sect_offset; 387 l.ppa |= ((u64)r.g.pl) << geo->ppaf.pln_offset; 388 l.ppa |= ((u64)r.g.lun) << geo->ppaf.lun_offset; 389 l.ppa |= ((u64)r.g.ch) << geo->ppaf.ch_offset; 390 391 return l; 392} 393 394static inline struct ppa_addr dev_to_generic_addr(struct nvm_tgt_dev *tgt_dev, 395 struct ppa_addr r) 396{ 397 struct nvm_geo *geo = &tgt_dev->geo; 398 struct ppa_addr l; 399 400 l.ppa = 0; 401 /* 402 * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc. 403 */ 404 l.g.blk = (r.ppa >> geo->ppaf.blk_offset) & 405 (((1 << geo->ppaf.blk_len) - 1)); 406 l.g.pg |= (r.ppa >> geo->ppaf.pg_offset) & 407 (((1 << geo->ppaf.pg_len) - 1)); 408 l.g.sec |= (r.ppa >> geo->ppaf.sect_offset) & 409 (((1 << geo->ppaf.sect_len) - 1)); 410 l.g.pl |= (r.ppa >> geo->ppaf.pln_offset) & 411 (((1 << geo->ppaf.pln_len) - 1)); 412 l.g.lun |= (r.ppa >> geo->ppaf.lun_offset) & 413 (((1 << geo->ppaf.lun_len) - 1)); 414 l.g.ch |= (r.ppa >> geo->ppaf.ch_offset) & 415 (((1 << geo->ppaf.ch_len) - 1)); 416 417 return l; 418} 419 420static inline int ppa_empty(struct ppa_addr ppa_addr) 421{ 422 return (ppa_addr.ppa == ADDR_EMPTY); 423} 424 425static inline void ppa_set_empty(struct ppa_addr *ppa_addr) 426{ 427 ppa_addr->ppa = ADDR_EMPTY; 428} 429 430static inline int ppa_cmp_blk(struct ppa_addr ppa1, struct ppa_addr ppa2) 431{ 432 if (ppa_empty(ppa1) || ppa_empty(ppa2)) 433 return 0; 434 435 return ((ppa1.g.ch == ppa2.g.ch) && (ppa1.g.lun == ppa2.g.lun) && 436 (ppa1.g.blk == ppa2.g.blk)); 437} 438 439typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); 440typedef sector_t (nvm_tgt_capacity_fn)(void *); 441typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *); 442typedef void (nvm_tgt_exit_fn)(void *); 443typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *); 444typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *); 445 446struct nvm_tgt_type { 447 const char *name; 448 unsigned int version[3]; 449 450 /* target entry points */ 451 nvm_tgt_make_rq_fn *make_rq; 452 nvm_tgt_capacity_fn *capacity; 453 454 /* module-specific init/teardown */ 455 nvm_tgt_init_fn *init; 456 nvm_tgt_exit_fn *exit; 457 458 /* sysfs */ 459 nvm_tgt_sysfs_init_fn *sysfs_init; 460 nvm_tgt_sysfs_exit_fn *sysfs_exit; 461 462 /* For internal use */ 463 struct list_head list; 464}; 465 466extern struct nvm_tgt_type *nvm_find_target_type(const char *, int); 467 468extern int nvm_register_tgt_type(struct nvm_tgt_type *); 469extern void nvm_unregister_tgt_type(struct nvm_tgt_type *); 470 471extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *); 472extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t); 473 474extern struct nvm_dev *nvm_alloc_dev(int); 475extern int nvm_register(struct nvm_dev *); 476extern void nvm_unregister(struct nvm_dev *); 477 478extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *, 479 int, int); 480extern int nvm_max_phys_sects(struct nvm_tgt_dev *); 481extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *); 482extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *, 483 const struct ppa_addr *, int, int); 484extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *); 485extern int nvm_erase_blk(struct nvm_tgt_dev *, struct ppa_addr *, int); 486extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *, 487 void *); 488extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t); 489extern void nvm_put_area(struct nvm_tgt_dev *, sector_t); 490extern void nvm_end_io(struct nvm_rq *); 491extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int); 492extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *); 493 494extern int nvm_dev_factory(struct nvm_dev *, int flags); 495 496extern void nvm_part_to_tgt(struct nvm_dev *, sector_t *, int); 497 498#else /* CONFIG_NVM */ 499struct nvm_dev_ops; 500 501static inline struct nvm_dev *nvm_alloc_dev(int node) 502{ 503 return ERR_PTR(-EINVAL); 504} 505static inline int nvm_register(struct nvm_dev *dev) 506{ 507 return -EINVAL; 508} 509static inline void nvm_unregister(struct nvm_dev *dev) {} 510#endif /* CONFIG_NVM */ 511#endif /* LIGHTNVM.H */