Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-5.12/drivers-2021-02-17' of git://git.kernel.dk/linux-block

Pull block driver updates from Jens Axboe:

- Remove the skd driver. It's been EOL for a long time (Damien)

- NVMe pull requests
- fix multipath handling of ->queue_rq errors (Chao Leng)
- nvmet cleanups (Chaitanya Kulkarni)
- add a quirk for buggy Amazon controller (Filippo Sironi)
- avoid devm allocations in nvme-hwmon that don't interact well
with fabrics (Hannes Reinecke)
- sysfs cleanups (Jiapeng Chong)
- fix nr_zones for multipath (Keith Busch)
- nvme-tcp crash fix for no-data commands (Sagi Grimberg)
- nvmet-tcp fixes (Sagi Grimberg)
- add a missing __rcu annotation (Christoph)
- failed reconnect fixes (Chao Leng)
- various tracing improvements (Michal Krakowiak, Johannes
Thumshirn)
- switch the nvmet-fc assoc_list to use RCU protection (Leonid
Ravich)
- resync the status codes with the latest spec (Max Gurtovoy)
- minor nvme-tcp improvements (Sagi Grimberg)
- various cleanups (Rikard Falkeborn, Minwoo Im, Chaitanya
Kulkarni, Israel Rukshin)

- Floppy O_NDELAY fix (Denis)

- MD pull request
- raid5 chunk_sectors fix (Guoqing)

- Use lore links (Kees)

- Use DEFINE_SHOW_ATTRIBUTE for nbd (Liao)

- loop lock scaling (Pavel)

- mtip32xx PCI fixes (Bjorn)

- bcache fixes (Kai, Dongdong)

- Misc fixes (Tian, Yang, Guoqing, Joe, Andy)

* tag 'for-5.12/drivers-2021-02-17' of git://git.kernel.dk/linux-block: (64 commits)
lightnvm: pblk: Replace guid_copy() with export_guid()/import_guid()
lightnvm: fix unnecessary NULL check warnings
nvme-tcp: fix crash triggered with a dataless request submission
block: Replace lkml.org links with lore
nbd: Convert to DEFINE_SHOW_ATTRIBUTE
nvme: add 48-bit DMA address quirk for Amazon NVMe controllers
nvme-hwmon: rework to avoid devm allocation
nvmet: remove else at the end of the function
nvmet: add nvmet_req_subsys() helper
nvmet: use min of device_path and disk len
nvmet: use invalid cmd opcode helper
nvmet: use invalid cmd opcode helper
nvmet: add helper to report invalid opcode
nvmet: remove extra variable in id-ns handler
nvmet: make nvmet_find_namespace() req based
nvmet: return uniform error for invalid ns
nvmet: set status to 0 in case for invalid nsid
nvmet-fc: add a missing __rcu annotation to nvmet_fc_tgt_assoc.queues
nvme-multipath: set nr_zones for zoned namespaces
nvmet-tcp: fix potential race of tcp socket closing accept_work
...

+665 -4376
-6
MAINTAINERS
··· 16908 16908 F: kernel/jump_label.c 16909 16909 F: kernel/static_call.c 16910 16910 16911 - STEC S1220 SKD DRIVER 16912 - M: Damien Le Moal <Damien.LeMoal@wdc.com> 16913 - L: linux-block@vger.kernel.org 16914 - S: Maintained 16915 - F: drivers/block/skd*[ch] 16916 - 16917 16911 STI AUDIO (ASoC) DRIVERS 16918 16912 M: Arnaud Pouliquen <arnaud.pouliquen@st.com> 16919 16913 L: alsa-devel@alsa-project.org (moderated for non-subscribers)
-10
drivers/block/Kconfig
··· 267 267 268 268 If unsure, say N. 269 269 270 - config BLK_DEV_SKD 271 - tristate "STEC S1120 Block Driver" 272 - depends on PCI 273 - depends on 64BIT 274 - help 275 - Saying Y or M here will enable support for the 276 - STEC, Inc. S1120 PCIe SSD. 277 - 278 - Use device /dev/skd$N amd /dev/skd$Np$M. 279 - 280 270 config BLK_DEV_SX8 281 271 tristate "Promise SATA SX8 support" 282 272 depends on PCI
-2
drivers/block/Makefile
··· 22 22 obj-$(CONFIG_XILINX_SYSACE) += xsysace.o 23 23 obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o 24 24 obj-$(CONFIG_SUNVDC) += sunvdc.o 25 - obj-$(CONFIG_BLK_DEV_SKD) += skd.o 26 25 27 26 obj-$(CONFIG_BLK_DEV_UMEM) += umem.o 28 27 obj-$(CONFIG_BLK_DEV_NBD) += nbd.o ··· 42 43 43 44 obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk/ 44 45 45 - skd-y := skd_main.o 46 46 swim_mod-y := swim.o swim_asm.o
+1 -1
drivers/block/aoe/aoecmd.c
··· 1046 1046 1047 1047 __blk_mq_end_request(rq, err); 1048 1048 1049 - /* cf. http://lkml.org/lkml/2006/10/31/28 */ 1049 + /* cf. https://lore.kernel.org/lkml/20061031071040.GS14055@kernel.dk/ */ 1050 1050 if (!fastfail) 1051 1051 blk_mq_run_hw_queues(q, true); 1052 1052 }
+1 -1
drivers/block/drbd/drbd_int.h
··· 1447 1447 1448 1448 /* drbd_req */ 1449 1449 extern void do_submit(struct work_struct *ws); 1450 - extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long); 1450 + extern void __drbd_make_request(struct drbd_device *, struct bio *); 1451 1451 extern blk_qc_t drbd_submit_bio(struct bio *bio); 1452 1452 extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req); 1453 1453 extern int is_valid_ar_handle(struct drbd_request *, sector_t);
+1 -2
drivers/block/drbd/drbd_main.c
··· 2275 2275 list_for_each_entry_safe(req, tmp, &writes, tl_requests) { 2276 2276 struct drbd_device *device = req->device; 2277 2277 struct bio *bio = req->master_bio; 2278 - unsigned long start_jif = req->start_jif; 2279 2278 bool expected; 2280 2279 2281 2280 expected = ··· 2309 2310 /* We are not just doing submit_bio_noacct(), 2310 2311 * as we want to keep the start_time information. */ 2311 2312 inc_ap_bio(device); 2312 - __drbd_make_request(device, bio, start_jif); 2313 + __drbd_make_request(device, bio); 2313 2314 } 2314 2315 } 2315 2316
+4 -2
drivers/block/drbd/drbd_receiver.c
··· 111 111 { 112 112 struct page *tmp; 113 113 int i = 1; 114 - while ((tmp = page_chain_next(page))) 115 - ++i, page = tmp; 114 + while ((tmp = page_chain_next(page))) { 115 + ++i; 116 + page = tmp; 117 + } 116 118 if (len) 117 119 *len = i; 118 120 return page;
+4 -7
drivers/block/drbd/drbd_req.c
··· 1191 1191 * Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request. 1192 1192 */ 1193 1193 static struct drbd_request * 1194 - drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_jif) 1194 + drbd_request_prepare(struct drbd_device *device, struct bio *bio) 1195 1195 { 1196 1196 const int rw = bio_data_dir(bio); 1197 1197 struct drbd_request *req; ··· 1419 1419 complete_master_bio(device, &m); 1420 1420 } 1421 1421 1422 - void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_jif) 1422 + void __drbd_make_request(struct drbd_device *device, struct bio *bio) 1423 1423 { 1424 - struct drbd_request *req = drbd_request_prepare(device, bio, start_jif); 1424 + struct drbd_request *req = drbd_request_prepare(device, bio); 1425 1425 if (IS_ERR_OR_NULL(req)) 1426 1426 return; 1427 1427 drbd_send_and_submit(device, req); ··· 1599 1599 blk_qc_t drbd_submit_bio(struct bio *bio) 1600 1600 { 1601 1601 struct drbd_device *device = bio->bi_bdev->bd_disk->private_data; 1602 - unsigned long start_jif; 1603 1602 1604 1603 blk_queue_split(&bio); 1605 - 1606 - start_jif = jiffies; 1607 1604 1608 1605 /* 1609 1606 * what we "blindly" assume: ··· 1608 1611 D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512)); 1609 1612 1610 1613 inc_ap_bio(device); 1611 - __drbd_make_request(device, bio, start_jif); 1614 + __drbd_make_request(device, bio); 1612 1615 return BLK_QC_T_NONE; 1613 1616 } 1614 1617
+15 -15
drivers/block/floppy.c
··· 4121 4121 if (fdc_state[FDC(drive)].rawcmd == 1) 4122 4122 fdc_state[FDC(drive)].rawcmd = 2; 4123 4123 4124 - if (!(mode & FMODE_NDELAY)) { 4125 - if (mode & (FMODE_READ|FMODE_WRITE)) { 4126 - drive_state[drive].last_checked = 0; 4127 - clear_bit(FD_OPEN_SHOULD_FAIL_BIT, 4128 - &drive_state[drive].flags); 4129 - if (bdev_check_media_change(bdev)) 4130 - floppy_revalidate(bdev->bd_disk); 4131 - if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags)) 4132 - goto out; 4133 - if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags)) 4134 - goto out; 4135 - } 4136 - res = -EROFS; 4137 - if ((mode & FMODE_WRITE) && 4138 - !test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags)) 4124 + if (mode & (FMODE_READ|FMODE_WRITE)) { 4125 + drive_state[drive].last_checked = 0; 4126 + clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags); 4127 + if (bdev_check_media_change(bdev)) 4128 + floppy_revalidate(bdev->bd_disk); 4129 + if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags)) 4130 + goto out; 4131 + if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags)) 4139 4132 goto out; 4140 4133 } 4134 + 4135 + res = -EROFS; 4136 + 4137 + if ((mode & FMODE_WRITE) && 4138 + !test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags)) 4139 + goto out; 4140 + 4141 4141 mutex_unlock(&open_lock); 4142 4142 mutex_unlock(&floppy_mutex); 4143 4143 return 0;
+53 -40
drivers/block/loop.c
··· 704 704 int error; 705 705 bool partscan; 706 706 707 - error = mutex_lock_killable(&loop_ctl_mutex); 707 + error = mutex_lock_killable(&lo->lo_mutex); 708 708 if (error) 709 709 return error; 710 710 error = -ENXIO; ··· 743 743 loop_update_dio(lo); 744 744 blk_mq_unfreeze_queue(lo->lo_queue); 745 745 partscan = lo->lo_flags & LO_FLAGS_PARTSCAN; 746 - mutex_unlock(&loop_ctl_mutex); 746 + mutex_unlock(&lo->lo_mutex); 747 747 /* 748 - * We must drop file reference outside of loop_ctl_mutex as dropping 748 + * We must drop file reference outside of lo_mutex as dropping 749 749 * the file ref can take bd_mutex which creates circular locking 750 750 * dependency. 751 751 */ ··· 755 755 return 0; 756 756 757 757 out_err: 758 - mutex_unlock(&loop_ctl_mutex); 758 + mutex_unlock(&lo->lo_mutex); 759 759 if (file) 760 760 fput(file); 761 761 return error; ··· 1092 1092 goto out_putf; 1093 1093 } 1094 1094 1095 - error = mutex_lock_killable(&loop_ctl_mutex); 1095 + error = mutex_lock_killable(&lo->lo_mutex); 1096 1096 if (error) 1097 1097 goto out_bdev; 1098 1098 ··· 1171 1171 * put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev). 1172 1172 */ 1173 1173 bdgrab(bdev); 1174 - mutex_unlock(&loop_ctl_mutex); 1174 + mutex_unlock(&lo->lo_mutex); 1175 1175 if (partscan) 1176 1176 loop_reread_partitions(lo, bdev); 1177 1177 if (!(mode & FMODE_EXCL)) ··· 1179 1179 return 0; 1180 1180 1181 1181 out_unlock: 1182 - mutex_unlock(&loop_ctl_mutex); 1182 + mutex_unlock(&lo->lo_mutex); 1183 1183 out_bdev: 1184 1184 if (!(mode & FMODE_EXCL)) 1185 1185 bd_abort_claiming(bdev, loop_configure); ··· 1200 1200 bool partscan = false; 1201 1201 int lo_number; 1202 1202 1203 - mutex_lock(&loop_ctl_mutex); 1203 + mutex_lock(&lo->lo_mutex); 1204 1204 if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) { 1205 1205 err = -ENXIO; 1206 1206 goto out_unlock; ··· 1253 1253 lo_number = lo->lo_number; 1254 1254 loop_unprepare_queue(lo); 1255 1255 out_unlock: 1256 - mutex_unlock(&loop_ctl_mutex); 1256 + mutex_unlock(&lo->lo_mutex); 1257 1257 if (partscan) { 1258 1258 /* 1259 1259 * bd_mutex has been held already in release path, so don't ··· 1284 1284 * protects us from all the other places trying to change the 'lo' 1285 1285 * device. 1286 1286 */ 1287 - mutex_lock(&loop_ctl_mutex); 1287 + mutex_lock(&lo->lo_mutex); 1288 1288 lo->lo_flags = 0; 1289 1289 if (!part_shift) 1290 1290 lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN; 1291 1291 lo->lo_state = Lo_unbound; 1292 - mutex_unlock(&loop_ctl_mutex); 1292 + mutex_unlock(&lo->lo_mutex); 1293 1293 1294 1294 /* 1295 - * Need not hold loop_ctl_mutex to fput backing file. 1296 - * Calling fput holding loop_ctl_mutex triggers a circular 1297 - * lock dependency possibility warning as fput can take 1298 - * bd_mutex which is usually taken before loop_ctl_mutex. 1295 + * Need not hold lo_mutex to fput backing file. Calling fput holding 1296 + * lo_mutex triggers a circular lock dependency possibility warning as 1297 + * fput can take bd_mutex which is usually taken before lo_mutex. 1299 1298 */ 1300 1299 if (filp) 1301 1300 fput(filp); ··· 1305 1306 { 1306 1307 int err; 1307 1308 1308 - err = mutex_lock_killable(&loop_ctl_mutex); 1309 + err = mutex_lock_killable(&lo->lo_mutex); 1309 1310 if (err) 1310 1311 return err; 1311 1312 if (lo->lo_state != Lo_bound) { 1312 - mutex_unlock(&loop_ctl_mutex); 1313 + mutex_unlock(&lo->lo_mutex); 1313 1314 return -ENXIO; 1314 1315 } 1315 1316 /* ··· 1324 1325 */ 1325 1326 if (atomic_read(&lo->lo_refcnt) > 1) { 1326 1327 lo->lo_flags |= LO_FLAGS_AUTOCLEAR; 1327 - mutex_unlock(&loop_ctl_mutex); 1328 + mutex_unlock(&lo->lo_mutex); 1328 1329 return 0; 1329 1330 } 1330 1331 lo->lo_state = Lo_rundown; 1331 - mutex_unlock(&loop_ctl_mutex); 1332 + mutex_unlock(&lo->lo_mutex); 1332 1333 1333 1334 return __loop_clr_fd(lo, false); 1334 1335 } ··· 1343 1344 bool partscan = false; 1344 1345 bool size_changed = false; 1345 1346 1346 - err = mutex_lock_killable(&loop_ctl_mutex); 1347 + err = mutex_lock_killable(&lo->lo_mutex); 1347 1348 if (err) 1348 1349 return err; 1349 1350 if (lo->lo_encrypt_key_size && ··· 1410 1411 partscan = true; 1411 1412 } 1412 1413 out_unlock: 1413 - mutex_unlock(&loop_ctl_mutex); 1414 + mutex_unlock(&lo->lo_mutex); 1414 1415 if (partscan) 1415 1416 loop_reread_partitions(lo, bdev); 1416 1417 ··· 1424 1425 struct kstat stat; 1425 1426 int ret; 1426 1427 1427 - ret = mutex_lock_killable(&loop_ctl_mutex); 1428 + ret = mutex_lock_killable(&lo->lo_mutex); 1428 1429 if (ret) 1429 1430 return ret; 1430 1431 if (lo->lo_state != Lo_bound) { 1431 - mutex_unlock(&loop_ctl_mutex); 1432 + mutex_unlock(&lo->lo_mutex); 1432 1433 return -ENXIO; 1433 1434 } 1434 1435 ··· 1447 1448 lo->lo_encrypt_key_size); 1448 1449 } 1449 1450 1450 - /* Drop loop_ctl_mutex while we call into the filesystem. */ 1451 + /* Drop lo_mutex while we call into the filesystem. */ 1451 1452 path = lo->lo_backing_file->f_path; 1452 1453 path_get(&path); 1453 - mutex_unlock(&loop_ctl_mutex); 1454 + mutex_unlock(&lo->lo_mutex); 1454 1455 ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT); 1455 1456 if (!ret) { 1456 1457 info->lo_device = huge_encode_dev(stat.dev); ··· 1636 1637 { 1637 1638 int err; 1638 1639 1639 - err = mutex_lock_killable(&loop_ctl_mutex); 1640 + err = mutex_lock_killable(&lo->lo_mutex); 1640 1641 if (err) 1641 1642 return err; 1642 1643 switch (cmd) { ··· 1652 1653 default: 1653 1654 err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL; 1654 1655 } 1655 - mutex_unlock(&loop_ctl_mutex); 1656 + mutex_unlock(&lo->lo_mutex); 1656 1657 return err; 1657 1658 } 1658 1659 ··· 1878 1879 struct loop_device *lo; 1879 1880 int err; 1880 1881 1882 + /* 1883 + * take loop_ctl_mutex to protect lo pointer from race with 1884 + * loop_control_ioctl(LOOP_CTL_REMOVE), however, to reduce contention 1885 + * release it prior to updating lo->lo_refcnt. 1886 + */ 1881 1887 err = mutex_lock_killable(&loop_ctl_mutex); 1882 1888 if (err) 1883 1889 return err; 1884 1890 lo = bdev->bd_disk->private_data; 1885 1891 if (!lo) { 1886 - err = -ENXIO; 1887 - goto out; 1892 + mutex_unlock(&loop_ctl_mutex); 1893 + return -ENXIO; 1888 1894 } 1889 - 1890 - atomic_inc(&lo->lo_refcnt); 1891 - out: 1895 + err = mutex_lock_killable(&lo->lo_mutex); 1892 1896 mutex_unlock(&loop_ctl_mutex); 1893 - return err; 1897 + if (err) 1898 + return err; 1899 + atomic_inc(&lo->lo_refcnt); 1900 + mutex_unlock(&lo->lo_mutex); 1901 + return 0; 1894 1902 } 1895 1903 1896 1904 static void lo_release(struct gendisk *disk, fmode_t mode) 1897 1905 { 1898 - struct loop_device *lo; 1906 + struct loop_device *lo = disk->private_data; 1899 1907 1900 - mutex_lock(&loop_ctl_mutex); 1901 - lo = disk->private_data; 1908 + mutex_lock(&lo->lo_mutex); 1902 1909 if (atomic_dec_return(&lo->lo_refcnt)) 1903 1910 goto out_unlock; 1904 1911 ··· 1912 1907 if (lo->lo_state != Lo_bound) 1913 1908 goto out_unlock; 1914 1909 lo->lo_state = Lo_rundown; 1915 - mutex_unlock(&loop_ctl_mutex); 1910 + mutex_unlock(&lo->lo_mutex); 1916 1911 /* 1917 1912 * In autoclear mode, stop the loop thread 1918 1913 * and remove configuration after last close. ··· 1929 1924 } 1930 1925 1931 1926 out_unlock: 1932 - mutex_unlock(&loop_ctl_mutex); 1927 + mutex_unlock(&lo->lo_mutex); 1933 1928 } 1934 1929 1935 1930 static const struct block_device_operations lo_fops = { ··· 1968 1963 struct loop_device *lo = ptr; 1969 1964 struct loop_func_table *xfer = data; 1970 1965 1971 - mutex_lock(&loop_ctl_mutex); 1966 + mutex_lock(&lo->lo_mutex); 1972 1967 if (lo->lo_encryption == xfer) 1973 1968 loop_release_xfer(lo); 1974 - mutex_unlock(&loop_ctl_mutex); 1969 + mutex_unlock(&lo->lo_mutex); 1975 1970 return 0; 1976 1971 } 1977 1972 ··· 2157 2152 disk->flags |= GENHD_FL_NO_PART_SCAN; 2158 2153 disk->flags |= GENHD_FL_EXT_DEVT; 2159 2154 atomic_set(&lo->lo_refcnt, 0); 2155 + mutex_init(&lo->lo_mutex); 2160 2156 lo->lo_number = i; 2161 2157 spin_lock_init(&lo->lo_lock); 2162 2158 disk->major = LOOP_MAJOR; ··· 2188 2182 blk_cleanup_queue(lo->lo_queue); 2189 2183 blk_mq_free_tag_set(&lo->tag_set); 2190 2184 put_disk(lo->lo_disk); 2185 + mutex_destroy(&lo->lo_mutex); 2191 2186 kfree(lo); 2192 2187 } 2193 2188 ··· 2268 2261 ret = loop_lookup(&lo, parm); 2269 2262 if (ret < 0) 2270 2263 break; 2264 + ret = mutex_lock_killable(&lo->lo_mutex); 2265 + if (ret) 2266 + break; 2271 2267 if (lo->lo_state != Lo_unbound) { 2272 2268 ret = -EBUSY; 2269 + mutex_unlock(&lo->lo_mutex); 2273 2270 break; 2274 2271 } 2275 2272 if (atomic_read(&lo->lo_refcnt) > 0) { 2276 2273 ret = -EBUSY; 2274 + mutex_unlock(&lo->lo_mutex); 2277 2275 break; 2278 2276 } 2279 2277 lo->lo_disk->private_data = NULL; 2278 + mutex_unlock(&lo->lo_mutex); 2280 2279 idr_remove(&loop_index_idr, lo->lo_number); 2281 2280 loop_remove(lo); 2282 2281 break;
+1
drivers/block/loop.h
··· 62 62 struct request_queue *lo_queue; 63 63 struct blk_mq_tag_set tag_set; 64 64 struct gendisk *lo_disk; 65 + struct mutex lo_mutex; 65 66 }; 66 67 67 68 struct loop_cmd {
+5 -10
drivers/block/mtip32xx/mtip32xx.c
··· 3924 3924 3925 3925 static void mtip_disable_link_opts(struct driver_data *dd, struct pci_dev *pdev) 3926 3926 { 3927 - int pos; 3928 3927 unsigned short pcie_dev_ctrl; 3929 3928 3930 - pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 3931 - if (pos) { 3932 - pci_read_config_word(pdev, 3933 - pos + PCI_EXP_DEVCTL, 3934 - &pcie_dev_ctrl); 3935 - if (pcie_dev_ctrl & (1 << 11) || 3936 - pcie_dev_ctrl & (1 << 4)) { 3929 + if (pci_is_pcie(pdev)) { 3930 + pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &pcie_dev_ctrl); 3931 + if (pcie_dev_ctrl & PCI_EXP_DEVCTL_NOSNOOP_EN || 3932 + pcie_dev_ctrl & PCI_EXP_DEVCTL_RELAX_EN) { 3937 3933 dev_info(&dd->pdev->dev, 3938 3934 "Disabling ERO/No-Snoop on bridge device %04x:%04x\n", 3939 3935 pdev->vendor, pdev->device); 3940 3936 pcie_dev_ctrl &= ~(PCI_EXP_DEVCTL_NOSNOOP_EN | 3941 3937 PCI_EXP_DEVCTL_RELAX_EN); 3942 - pci_write_config_word(pdev, 3943 - pos + PCI_EXP_DEVCTL, 3938 + pcie_capability_write_word(pdev, PCI_EXP_DEVCTL, 3944 3939 pcie_dev_ctrl); 3945 3940 } 3946 3941 }
+4 -24
drivers/block/nbd.c
··· 1529 1529 return 0; 1530 1530 } 1531 1531 1532 - static int nbd_dbg_tasks_open(struct inode *inode, struct file *file) 1533 - { 1534 - return single_open(file, nbd_dbg_tasks_show, inode->i_private); 1535 - } 1536 - 1537 - static const struct file_operations nbd_dbg_tasks_ops = { 1538 - .open = nbd_dbg_tasks_open, 1539 - .read = seq_read, 1540 - .llseek = seq_lseek, 1541 - .release = single_release, 1542 - }; 1532 + DEFINE_SHOW_ATTRIBUTE(nbd_dbg_tasks); 1543 1533 1544 1534 static int nbd_dbg_flags_show(struct seq_file *s, void *unused) 1545 1535 { ··· 1554 1564 return 0; 1555 1565 } 1556 1566 1557 - static int nbd_dbg_flags_open(struct inode *inode, struct file *file) 1558 - { 1559 - return single_open(file, nbd_dbg_flags_show, inode->i_private); 1560 - } 1561 - 1562 - static const struct file_operations nbd_dbg_flags_ops = { 1563 - .open = nbd_dbg_flags_open, 1564 - .read = seq_read, 1565 - .llseek = seq_lseek, 1566 - .release = single_release, 1567 - }; 1567 + DEFINE_SHOW_ATTRIBUTE(nbd_dbg_flags); 1568 1568 1569 1569 static int nbd_dev_dbg_init(struct nbd_device *nbd) 1570 1570 { ··· 1572 1592 } 1573 1593 config->dbg_dir = dir; 1574 1594 1575 - debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops); 1595 + debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_fops); 1576 1596 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize); 1577 1597 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout); 1578 1598 debugfs_create_u64("blocksize", 0444, dir, &config->blksize); 1579 - debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops); 1599 + debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_fops); 1580 1600 1581 1601 return 0; 1582 1602 }
+1 -2
drivers/block/rsxx/dma.c
··· 944 944 ctrl->done_wq = NULL; 945 945 } 946 946 947 - if (ctrl->trackers) 948 - vfree(ctrl->trackers); 947 + vfree(ctrl->trackers); 949 948 950 949 if (ctrl->status.buf) 951 950 dma_free_coherent(&card->dev->dev, STATUS_BUFFER_SIZE8,
-3670
drivers/block/skd_main.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-only 2 - /* 3 - * Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST 4 - * was acquired by Western Digital in 2012. 5 - * 6 - * Copyright 2012 sTec, Inc. 7 - * Copyright (c) 2017 Western Digital Corporation or its affiliates. 8 - */ 9 - 10 - #include <linux/kernel.h> 11 - #include <linux/module.h> 12 - #include <linux/init.h> 13 - #include <linux/pci.h> 14 - #include <linux/slab.h> 15 - #include <linux/spinlock.h> 16 - #include <linux/blkdev.h> 17 - #include <linux/blk-mq.h> 18 - #include <linux/sched.h> 19 - #include <linux/interrupt.h> 20 - #include <linux/compiler.h> 21 - #include <linux/workqueue.h> 22 - #include <linux/delay.h> 23 - #include <linux/time.h> 24 - #include <linux/hdreg.h> 25 - #include <linux/dma-mapping.h> 26 - #include <linux/completion.h> 27 - #include <linux/scatterlist.h> 28 - #include <linux/err.h> 29 - #include <linux/aer.h> 30 - #include <linux/wait.h> 31 - #include <linux/stringify.h> 32 - #include <scsi/scsi.h> 33 - #include <scsi/sg.h> 34 - #include <linux/io.h> 35 - #include <linux/uaccess.h> 36 - #include <asm/unaligned.h> 37 - 38 - #include "skd_s1120.h" 39 - 40 - static int skd_dbg_level; 41 - static int skd_isr_comp_limit = 4; 42 - 43 - #define SKD_ASSERT(expr) \ 44 - do { \ 45 - if (unlikely(!(expr))) { \ 46 - pr_err("Assertion failed! %s,%s,%s,line=%d\n", \ 47 - # expr, __FILE__, __func__, __LINE__); \ 48 - } \ 49 - } while (0) 50 - 51 - #define DRV_NAME "skd" 52 - #define PFX DRV_NAME ": " 53 - 54 - MODULE_LICENSE("GPL"); 55 - 56 - MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver"); 57 - 58 - #define PCI_VENDOR_ID_STEC 0x1B39 59 - #define PCI_DEVICE_ID_S1120 0x0001 60 - 61 - #define SKD_FUA_NV (1 << 1) 62 - #define SKD_MINORS_PER_DEVICE 16 63 - 64 - #define SKD_MAX_QUEUE_DEPTH 200u 65 - 66 - #define SKD_PAUSE_TIMEOUT (5 * 1000) 67 - 68 - #define SKD_N_FITMSG_BYTES (512u) 69 - #define SKD_MAX_REQ_PER_MSG 14 70 - 71 - #define SKD_N_SPECIAL_FITMSG_BYTES (128u) 72 - 73 - /* SG elements are 32 bytes, so we can make this 4096 and still be under the 74 - * 128KB limit. That allows 4096*4K = 16M xfer size 75 - */ 76 - #define SKD_N_SG_PER_REQ_DEFAULT 256u 77 - 78 - #define SKD_N_COMPLETION_ENTRY 256u 79 - #define SKD_N_READ_CAP_BYTES (8u) 80 - 81 - #define SKD_N_INTERNAL_BYTES (512u) 82 - 83 - #define SKD_SKCOMP_SIZE \ 84 - ((sizeof(struct fit_completion_entry_v1) + \ 85 - sizeof(struct fit_comp_error_info)) * SKD_N_COMPLETION_ENTRY) 86 - 87 - /* 5 bits of uniqifier, 0xF800 */ 88 - #define SKD_ID_TABLE_MASK (3u << 8u) 89 - #define SKD_ID_RW_REQUEST (0u << 8u) 90 - #define SKD_ID_INTERNAL (1u << 8u) 91 - #define SKD_ID_FIT_MSG (3u << 8u) 92 - #define SKD_ID_SLOT_MASK 0x00FFu 93 - #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu 94 - 95 - #define SKD_N_MAX_SECTORS 2048u 96 - 97 - #define SKD_MAX_RETRIES 2u 98 - 99 - #define SKD_TIMER_SECONDS(seconds) (seconds) 100 - #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60)) 101 - 102 - #define INQ_STD_NBYTES 36 103 - 104 - enum skd_drvr_state { 105 - SKD_DRVR_STATE_LOAD, 106 - SKD_DRVR_STATE_IDLE, 107 - SKD_DRVR_STATE_BUSY, 108 - SKD_DRVR_STATE_STARTING, 109 - SKD_DRVR_STATE_ONLINE, 110 - SKD_DRVR_STATE_PAUSING, 111 - SKD_DRVR_STATE_PAUSED, 112 - SKD_DRVR_STATE_RESTARTING, 113 - SKD_DRVR_STATE_RESUMING, 114 - SKD_DRVR_STATE_STOPPING, 115 - SKD_DRVR_STATE_FAULT, 116 - SKD_DRVR_STATE_DISAPPEARED, 117 - SKD_DRVR_STATE_PROTOCOL_MISMATCH, 118 - SKD_DRVR_STATE_BUSY_ERASE, 119 - SKD_DRVR_STATE_BUSY_SANITIZE, 120 - SKD_DRVR_STATE_BUSY_IMMINENT, 121 - SKD_DRVR_STATE_WAIT_BOOT, 122 - SKD_DRVR_STATE_SYNCING, 123 - }; 124 - 125 - #define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u) 126 - #define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u) 127 - #define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u) 128 - #define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u) 129 - #define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u) 130 - #define SKD_START_WAIT_SECONDS 90u 131 - 132 - enum skd_req_state { 133 - SKD_REQ_STATE_IDLE, 134 - SKD_REQ_STATE_SETUP, 135 - SKD_REQ_STATE_BUSY, 136 - SKD_REQ_STATE_COMPLETED, 137 - SKD_REQ_STATE_TIMEOUT, 138 - }; 139 - 140 - enum skd_check_status_action { 141 - SKD_CHECK_STATUS_REPORT_GOOD, 142 - SKD_CHECK_STATUS_REPORT_SMART_ALERT, 143 - SKD_CHECK_STATUS_REQUEUE_REQUEST, 144 - SKD_CHECK_STATUS_REPORT_ERROR, 145 - SKD_CHECK_STATUS_BUSY_IMMINENT, 146 - }; 147 - 148 - struct skd_msg_buf { 149 - struct fit_msg_hdr fmh; 150 - struct skd_scsi_request scsi[SKD_MAX_REQ_PER_MSG]; 151 - }; 152 - 153 - struct skd_fitmsg_context { 154 - u32 id; 155 - 156 - u32 length; 157 - 158 - struct skd_msg_buf *msg_buf; 159 - dma_addr_t mb_dma_address; 160 - }; 161 - 162 - struct skd_request_context { 163 - enum skd_req_state state; 164 - 165 - u16 id; 166 - u32 fitmsg_id; 167 - 168 - u8 flush_cmd; 169 - 170 - enum dma_data_direction data_dir; 171 - struct scatterlist *sg; 172 - u32 n_sg; 173 - u32 sg_byte_count; 174 - 175 - struct fit_sg_descriptor *sksg_list; 176 - dma_addr_t sksg_dma_address; 177 - 178 - struct fit_completion_entry_v1 completion; 179 - 180 - struct fit_comp_error_info err_info; 181 - int retries; 182 - 183 - blk_status_t status; 184 - }; 185 - 186 - struct skd_special_context { 187 - struct skd_request_context req; 188 - 189 - void *data_buf; 190 - dma_addr_t db_dma_address; 191 - 192 - struct skd_msg_buf *msg_buf; 193 - dma_addr_t mb_dma_address; 194 - }; 195 - 196 - typedef enum skd_irq_type { 197 - SKD_IRQ_LEGACY, 198 - SKD_IRQ_MSI, 199 - SKD_IRQ_MSIX 200 - } skd_irq_type_t; 201 - 202 - #define SKD_MAX_BARS 2 203 - 204 - struct skd_device { 205 - void __iomem *mem_map[SKD_MAX_BARS]; 206 - resource_size_t mem_phys[SKD_MAX_BARS]; 207 - u32 mem_size[SKD_MAX_BARS]; 208 - 209 - struct skd_msix_entry *msix_entries; 210 - 211 - struct pci_dev *pdev; 212 - int pcie_error_reporting_is_enabled; 213 - 214 - spinlock_t lock; 215 - struct gendisk *disk; 216 - struct blk_mq_tag_set tag_set; 217 - struct request_queue *queue; 218 - struct skd_fitmsg_context *skmsg; 219 - struct device *class_dev; 220 - int gendisk_on; 221 - int sync_done; 222 - 223 - u32 devno; 224 - u32 major; 225 - char isr_name[30]; 226 - 227 - enum skd_drvr_state state; 228 - u32 drive_state; 229 - 230 - u32 cur_max_queue_depth; 231 - u32 queue_low_water_mark; 232 - u32 dev_max_queue_depth; 233 - 234 - u32 num_fitmsg_context; 235 - u32 num_req_context; 236 - 237 - struct skd_fitmsg_context *skmsg_table; 238 - 239 - struct skd_special_context internal_skspcl; 240 - u32 read_cap_blocksize; 241 - u32 read_cap_last_lba; 242 - int read_cap_is_valid; 243 - int inquiry_is_valid; 244 - u8 inq_serial_num[13]; /*12 chars plus null term */ 245 - 246 - u8 skcomp_cycle; 247 - u32 skcomp_ix; 248 - struct kmem_cache *msgbuf_cache; 249 - struct kmem_cache *sglist_cache; 250 - struct kmem_cache *databuf_cache; 251 - struct fit_completion_entry_v1 *skcomp_table; 252 - struct fit_comp_error_info *skerr_table; 253 - dma_addr_t cq_dma_address; 254 - 255 - wait_queue_head_t waitq; 256 - 257 - struct timer_list timer; 258 - u32 timer_countdown; 259 - u32 timer_substate; 260 - 261 - int sgs_per_request; 262 - u32 last_mtd; 263 - 264 - u32 proto_ver; 265 - 266 - int dbg_level; 267 - u32 connect_time_stamp; 268 - int connect_retries; 269 - #define SKD_MAX_CONNECT_RETRIES 16 270 - u32 drive_jiffies; 271 - 272 - u32 timo_slot; 273 - 274 - struct work_struct start_queue; 275 - struct work_struct completion_worker; 276 - }; 277 - 278 - #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF) 279 - #define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF) 280 - #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF) 281 - 282 - static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset) 283 - { 284 - u32 val = readl(skdev->mem_map[1] + offset); 285 - 286 - if (unlikely(skdev->dbg_level >= 2)) 287 - dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val); 288 - return val; 289 - } 290 - 291 - static inline void skd_reg_write32(struct skd_device *skdev, u32 val, 292 - u32 offset) 293 - { 294 - writel(val, skdev->mem_map[1] + offset); 295 - if (unlikely(skdev->dbg_level >= 2)) 296 - dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val); 297 - } 298 - 299 - static inline void skd_reg_write64(struct skd_device *skdev, u64 val, 300 - u32 offset) 301 - { 302 - writeq(val, skdev->mem_map[1] + offset); 303 - if (unlikely(skdev->dbg_level >= 2)) 304 - dev_dbg(&skdev->pdev->dev, "offset %x = %016llx\n", offset, 305 - val); 306 - } 307 - 308 - 309 - #define SKD_IRQ_DEFAULT SKD_IRQ_MSIX 310 - static int skd_isr_type = SKD_IRQ_DEFAULT; 311 - 312 - module_param(skd_isr_type, int, 0444); 313 - MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability." 314 - " (0==legacy, 1==MSI, 2==MSI-X, default==1)"); 315 - 316 - #define SKD_MAX_REQ_PER_MSG_DEFAULT 1 317 - static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT; 318 - 319 - module_param(skd_max_req_per_msg, int, 0444); 320 - MODULE_PARM_DESC(skd_max_req_per_msg, 321 - "Maximum SCSI requests packed in a single message." 322 - " (1-" __stringify(SKD_MAX_REQ_PER_MSG) ", default==1)"); 323 - 324 - #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64 325 - #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64" 326 - static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT; 327 - 328 - module_param(skd_max_queue_depth, int, 0444); 329 - MODULE_PARM_DESC(skd_max_queue_depth, 330 - "Maximum SCSI requests issued to s1120." 331 - " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")"); 332 - 333 - static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT; 334 - module_param(skd_sgs_per_request, int, 0444); 335 - MODULE_PARM_DESC(skd_sgs_per_request, 336 - "Maximum SG elements per block request." 337 - " (1-4096, default==256)"); 338 - 339 - static int skd_max_pass_thru = 1; 340 - module_param(skd_max_pass_thru, int, 0444); 341 - MODULE_PARM_DESC(skd_max_pass_thru, 342 - "Maximum SCSI pass-thru at a time. IGNORED"); 343 - 344 - module_param(skd_dbg_level, int, 0444); 345 - MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)"); 346 - 347 - module_param(skd_isr_comp_limit, int, 0444); 348 - MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4"); 349 - 350 - /* Major device number dynamically assigned. */ 351 - static u32 skd_major; 352 - 353 - static void skd_destruct(struct skd_device *skdev); 354 - static const struct block_device_operations skd_blockdev_ops; 355 - static void skd_send_fitmsg(struct skd_device *skdev, 356 - struct skd_fitmsg_context *skmsg); 357 - static void skd_send_special_fitmsg(struct skd_device *skdev, 358 - struct skd_special_context *skspcl); 359 - static bool skd_preop_sg_list(struct skd_device *skdev, 360 - struct skd_request_context *skreq); 361 - static void skd_postop_sg_list(struct skd_device *skdev, 362 - struct skd_request_context *skreq); 363 - 364 - static void skd_restart_device(struct skd_device *skdev); 365 - static int skd_quiesce_dev(struct skd_device *skdev); 366 - static int skd_unquiesce_dev(struct skd_device *skdev); 367 - static void skd_disable_interrupts(struct skd_device *skdev); 368 - static void skd_isr_fwstate(struct skd_device *skdev); 369 - static void skd_recover_requests(struct skd_device *skdev); 370 - static void skd_soft_reset(struct skd_device *skdev); 371 - 372 - const char *skd_drive_state_to_str(int state); 373 - const char *skd_skdev_state_to_str(enum skd_drvr_state state); 374 - static void skd_log_skdev(struct skd_device *skdev, const char *event); 375 - static void skd_log_skreq(struct skd_device *skdev, 376 - struct skd_request_context *skreq, const char *event); 377 - 378 - /* 379 - ***************************************************************************** 380 - * READ/WRITE REQUESTS 381 - ***************************************************************************** 382 - */ 383 - static bool skd_inc_in_flight(struct request *rq, void *data, bool reserved) 384 - { 385 - int *count = data; 386 - 387 - count++; 388 - return true; 389 - } 390 - 391 - static int skd_in_flight(struct skd_device *skdev) 392 - { 393 - int count = 0; 394 - 395 - blk_mq_tagset_busy_iter(&skdev->tag_set, skd_inc_in_flight, &count); 396 - 397 - return count; 398 - } 399 - 400 - static void 401 - skd_prep_rw_cdb(struct skd_scsi_request *scsi_req, 402 - int data_dir, unsigned lba, 403 - unsigned count) 404 - { 405 - if (data_dir == READ) 406 - scsi_req->cdb[0] = READ_10; 407 - else 408 - scsi_req->cdb[0] = WRITE_10; 409 - 410 - scsi_req->cdb[1] = 0; 411 - scsi_req->cdb[2] = (lba & 0xff000000) >> 24; 412 - scsi_req->cdb[3] = (lba & 0xff0000) >> 16; 413 - scsi_req->cdb[4] = (lba & 0xff00) >> 8; 414 - scsi_req->cdb[5] = (lba & 0xff); 415 - scsi_req->cdb[6] = 0; 416 - scsi_req->cdb[7] = (count & 0xff00) >> 8; 417 - scsi_req->cdb[8] = count & 0xff; 418 - scsi_req->cdb[9] = 0; 419 - } 420 - 421 - static void 422 - skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req, 423 - struct skd_request_context *skreq) 424 - { 425 - skreq->flush_cmd = 1; 426 - 427 - scsi_req->cdb[0] = SYNCHRONIZE_CACHE; 428 - scsi_req->cdb[1] = 0; 429 - scsi_req->cdb[2] = 0; 430 - scsi_req->cdb[3] = 0; 431 - scsi_req->cdb[4] = 0; 432 - scsi_req->cdb[5] = 0; 433 - scsi_req->cdb[6] = 0; 434 - scsi_req->cdb[7] = 0; 435 - scsi_req->cdb[8] = 0; 436 - scsi_req->cdb[9] = 0; 437 - } 438 - 439 - /* 440 - * Return true if and only if all pending requests should be failed. 441 - */ 442 - static bool skd_fail_all(struct request_queue *q) 443 - { 444 - struct skd_device *skdev = q->queuedata; 445 - 446 - SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE); 447 - 448 - skd_log_skdev(skdev, "req_not_online"); 449 - switch (skdev->state) { 450 - case SKD_DRVR_STATE_PAUSING: 451 - case SKD_DRVR_STATE_PAUSED: 452 - case SKD_DRVR_STATE_STARTING: 453 - case SKD_DRVR_STATE_RESTARTING: 454 - case SKD_DRVR_STATE_WAIT_BOOT: 455 - /* In case of starting, we haven't started the queue, 456 - * so we can't get here... but requests are 457 - * possibly hanging out waiting for us because we 458 - * reported the dev/skd0 already. They'll wait 459 - * forever if connect doesn't complete. 460 - * What to do??? delay dev/skd0 ?? 461 - */ 462 - case SKD_DRVR_STATE_BUSY: 463 - case SKD_DRVR_STATE_BUSY_IMMINENT: 464 - case SKD_DRVR_STATE_BUSY_ERASE: 465 - return false; 466 - 467 - case SKD_DRVR_STATE_BUSY_SANITIZE: 468 - case SKD_DRVR_STATE_STOPPING: 469 - case SKD_DRVR_STATE_SYNCING: 470 - case SKD_DRVR_STATE_FAULT: 471 - case SKD_DRVR_STATE_DISAPPEARED: 472 - default: 473 - return true; 474 - } 475 - } 476 - 477 - static blk_status_t skd_mq_queue_rq(struct blk_mq_hw_ctx *hctx, 478 - const struct blk_mq_queue_data *mqd) 479 - { 480 - struct request *const req = mqd->rq; 481 - struct request_queue *const q = req->q; 482 - struct skd_device *skdev = q->queuedata; 483 - struct skd_fitmsg_context *skmsg; 484 - struct fit_msg_hdr *fmh; 485 - const u32 tag = blk_mq_unique_tag(req); 486 - struct skd_request_context *const skreq = blk_mq_rq_to_pdu(req); 487 - struct skd_scsi_request *scsi_req; 488 - unsigned long flags = 0; 489 - const u32 lba = blk_rq_pos(req); 490 - const u32 count = blk_rq_sectors(req); 491 - const int data_dir = rq_data_dir(req); 492 - 493 - if (unlikely(skdev->state != SKD_DRVR_STATE_ONLINE)) 494 - return skd_fail_all(q) ? BLK_STS_IOERR : BLK_STS_RESOURCE; 495 - 496 - if (!(req->rq_flags & RQF_DONTPREP)) { 497 - skreq->retries = 0; 498 - req->rq_flags |= RQF_DONTPREP; 499 - } 500 - 501 - blk_mq_start_request(req); 502 - 503 - WARN_ONCE(tag >= skd_max_queue_depth, "%#x > %#x (nr_requests = %lu)\n", 504 - tag, skd_max_queue_depth, q->nr_requests); 505 - 506 - SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE); 507 - 508 - dev_dbg(&skdev->pdev->dev, 509 - "new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba, 510 - lba, count, count, data_dir); 511 - 512 - skreq->id = tag + SKD_ID_RW_REQUEST; 513 - skreq->flush_cmd = 0; 514 - skreq->n_sg = 0; 515 - skreq->sg_byte_count = 0; 516 - 517 - skreq->fitmsg_id = 0; 518 - 519 - skreq->data_dir = data_dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 520 - 521 - if (req->bio && !skd_preop_sg_list(skdev, skreq)) { 522 - dev_dbg(&skdev->pdev->dev, "error Out\n"); 523 - skreq->status = BLK_STS_RESOURCE; 524 - blk_mq_complete_request(req); 525 - return BLK_STS_OK; 526 - } 527 - 528 - dma_sync_single_for_device(&skdev->pdev->dev, skreq->sksg_dma_address, 529 - skreq->n_sg * 530 - sizeof(struct fit_sg_descriptor), 531 - DMA_TO_DEVICE); 532 - 533 - /* Either a FIT msg is in progress or we have to start one. */ 534 - if (skd_max_req_per_msg == 1) { 535 - skmsg = NULL; 536 - } else { 537 - spin_lock_irqsave(&skdev->lock, flags); 538 - skmsg = skdev->skmsg; 539 - } 540 - if (!skmsg) { 541 - skmsg = &skdev->skmsg_table[tag]; 542 - skdev->skmsg = skmsg; 543 - 544 - /* Initialize the FIT msg header */ 545 - fmh = &skmsg->msg_buf->fmh; 546 - memset(fmh, 0, sizeof(*fmh)); 547 - fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; 548 - skmsg->length = sizeof(*fmh); 549 - } else { 550 - fmh = &skmsg->msg_buf->fmh; 551 - } 552 - 553 - skreq->fitmsg_id = skmsg->id; 554 - 555 - scsi_req = &skmsg->msg_buf->scsi[fmh->num_protocol_cmds_coalesced]; 556 - memset(scsi_req, 0, sizeof(*scsi_req)); 557 - 558 - scsi_req->hdr.tag = skreq->id; 559 - scsi_req->hdr.sg_list_dma_address = 560 - cpu_to_be64(skreq->sksg_dma_address); 561 - 562 - if (req_op(req) == REQ_OP_FLUSH) { 563 - skd_prep_zerosize_flush_cdb(scsi_req, skreq); 564 - SKD_ASSERT(skreq->flush_cmd == 1); 565 - } else { 566 - skd_prep_rw_cdb(scsi_req, data_dir, lba, count); 567 - } 568 - 569 - if (req->cmd_flags & REQ_FUA) 570 - scsi_req->cdb[1] |= SKD_FUA_NV; 571 - 572 - scsi_req->hdr.sg_list_len_bytes = cpu_to_be32(skreq->sg_byte_count); 573 - 574 - /* Complete resource allocations. */ 575 - skreq->state = SKD_REQ_STATE_BUSY; 576 - 577 - skmsg->length += sizeof(struct skd_scsi_request); 578 - fmh->num_protocol_cmds_coalesced++; 579 - 580 - dev_dbg(&skdev->pdev->dev, "req=0x%x busy=%d\n", skreq->id, 581 - skd_in_flight(skdev)); 582 - 583 - /* 584 - * If the FIT msg buffer is full send it. 585 - */ 586 - if (skd_max_req_per_msg == 1) { 587 - skd_send_fitmsg(skdev, skmsg); 588 - } else { 589 - if (mqd->last || 590 - fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) { 591 - skd_send_fitmsg(skdev, skmsg); 592 - skdev->skmsg = NULL; 593 - } 594 - spin_unlock_irqrestore(&skdev->lock, flags); 595 - } 596 - 597 - return BLK_STS_OK; 598 - } 599 - 600 - static enum blk_eh_timer_return skd_timed_out(struct request *req, 601 - bool reserved) 602 - { 603 - struct skd_device *skdev = req->q->queuedata; 604 - 605 - dev_err(&skdev->pdev->dev, "request with tag %#x timed out\n", 606 - blk_mq_unique_tag(req)); 607 - 608 - return BLK_EH_RESET_TIMER; 609 - } 610 - 611 - static void skd_complete_rq(struct request *req) 612 - { 613 - struct skd_request_context *skreq = blk_mq_rq_to_pdu(req); 614 - 615 - blk_mq_end_request(req, skreq->status); 616 - } 617 - 618 - static bool skd_preop_sg_list(struct skd_device *skdev, 619 - struct skd_request_context *skreq) 620 - { 621 - struct request *req = blk_mq_rq_from_pdu(skreq); 622 - struct scatterlist *sgl = &skreq->sg[0], *sg; 623 - int n_sg; 624 - int i; 625 - 626 - skreq->sg_byte_count = 0; 627 - 628 - WARN_ON_ONCE(skreq->data_dir != DMA_TO_DEVICE && 629 - skreq->data_dir != DMA_FROM_DEVICE); 630 - 631 - n_sg = blk_rq_map_sg(skdev->queue, req, sgl); 632 - if (n_sg <= 0) 633 - return false; 634 - 635 - /* 636 - * Map scatterlist to PCI bus addresses. 637 - * Note PCI might change the number of entries. 638 - */ 639 - n_sg = dma_map_sg(&skdev->pdev->dev, sgl, n_sg, skreq->data_dir); 640 - if (n_sg <= 0) 641 - return false; 642 - 643 - SKD_ASSERT(n_sg <= skdev->sgs_per_request); 644 - 645 - skreq->n_sg = n_sg; 646 - 647 - for_each_sg(sgl, sg, n_sg, i) { 648 - struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; 649 - u32 cnt = sg_dma_len(sg); 650 - uint64_t dma_addr = sg_dma_address(sg); 651 - 652 - sgd->control = FIT_SGD_CONTROL_NOT_LAST; 653 - sgd->byte_count = cnt; 654 - skreq->sg_byte_count += cnt; 655 - sgd->host_side_addr = dma_addr; 656 - sgd->dev_side_addr = 0; 657 - } 658 - 659 - skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL; 660 - skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST; 661 - 662 - if (unlikely(skdev->dbg_level > 1)) { 663 - dev_dbg(&skdev->pdev->dev, 664 - "skreq=%x sksg_list=%p sksg_dma=%pad\n", 665 - skreq->id, skreq->sksg_list, &skreq->sksg_dma_address); 666 - for (i = 0; i < n_sg; i++) { 667 - struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; 668 - 669 - dev_dbg(&skdev->pdev->dev, 670 - " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n", 671 - i, sgd->byte_count, sgd->control, 672 - sgd->host_side_addr, sgd->next_desc_ptr); 673 - } 674 - } 675 - 676 - return true; 677 - } 678 - 679 - static void skd_postop_sg_list(struct skd_device *skdev, 680 - struct skd_request_context *skreq) 681 - { 682 - /* 683 - * restore the next ptr for next IO request so we 684 - * don't have to set it every time. 685 - */ 686 - skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr = 687 - skreq->sksg_dma_address + 688 - ((skreq->n_sg) * sizeof(struct fit_sg_descriptor)); 689 - dma_unmap_sg(&skdev->pdev->dev, &skreq->sg[0], skreq->n_sg, 690 - skreq->data_dir); 691 - } 692 - 693 - /* 694 - ***************************************************************************** 695 - * TIMER 696 - ***************************************************************************** 697 - */ 698 - 699 - static void skd_timer_tick_not_online(struct skd_device *skdev); 700 - 701 - static void skd_start_queue(struct work_struct *work) 702 - { 703 - struct skd_device *skdev = container_of(work, typeof(*skdev), 704 - start_queue); 705 - 706 - /* 707 - * Although it is safe to call blk_start_queue() from interrupt 708 - * context, blk_mq_start_hw_queues() must not be called from 709 - * interrupt context. 710 - */ 711 - blk_mq_start_hw_queues(skdev->queue); 712 - } 713 - 714 - static void skd_timer_tick(struct timer_list *t) 715 - { 716 - struct skd_device *skdev = from_timer(skdev, t, timer); 717 - unsigned long reqflags; 718 - u32 state; 719 - 720 - if (skdev->state == SKD_DRVR_STATE_FAULT) 721 - /* The driver has declared fault, and we want it to 722 - * stay that way until driver is reloaded. 723 - */ 724 - return; 725 - 726 - spin_lock_irqsave(&skdev->lock, reqflags); 727 - 728 - state = SKD_READL(skdev, FIT_STATUS); 729 - state &= FIT_SR_DRIVE_STATE_MASK; 730 - if (state != skdev->drive_state) 731 - skd_isr_fwstate(skdev); 732 - 733 - if (skdev->state != SKD_DRVR_STATE_ONLINE) 734 - skd_timer_tick_not_online(skdev); 735 - 736 - mod_timer(&skdev->timer, (jiffies + HZ)); 737 - 738 - spin_unlock_irqrestore(&skdev->lock, reqflags); 739 - } 740 - 741 - static void skd_timer_tick_not_online(struct skd_device *skdev) 742 - { 743 - switch (skdev->state) { 744 - case SKD_DRVR_STATE_IDLE: 745 - case SKD_DRVR_STATE_LOAD: 746 - break; 747 - case SKD_DRVR_STATE_BUSY_SANITIZE: 748 - dev_dbg(&skdev->pdev->dev, 749 - "drive busy sanitize[%x], driver[%x]\n", 750 - skdev->drive_state, skdev->state); 751 - /* If we've been in sanitize for 3 seconds, we figure we're not 752 - * going to get anymore completions, so recover requests now 753 - */ 754 - if (skdev->timer_countdown > 0) { 755 - skdev->timer_countdown--; 756 - return; 757 - } 758 - skd_recover_requests(skdev); 759 - break; 760 - 761 - case SKD_DRVR_STATE_BUSY: 762 - case SKD_DRVR_STATE_BUSY_IMMINENT: 763 - case SKD_DRVR_STATE_BUSY_ERASE: 764 - dev_dbg(&skdev->pdev->dev, "busy[%x], countdown=%d\n", 765 - skdev->state, skdev->timer_countdown); 766 - if (skdev->timer_countdown > 0) { 767 - skdev->timer_countdown--; 768 - return; 769 - } 770 - dev_dbg(&skdev->pdev->dev, 771 - "busy[%x], timedout=%d, restarting device.", 772 - skdev->state, skdev->timer_countdown); 773 - skd_restart_device(skdev); 774 - break; 775 - 776 - case SKD_DRVR_STATE_WAIT_BOOT: 777 - case SKD_DRVR_STATE_STARTING: 778 - if (skdev->timer_countdown > 0) { 779 - skdev->timer_countdown--; 780 - return; 781 - } 782 - /* For now, we fault the drive. Could attempt resets to 783 - * revcover at some point. */ 784 - skdev->state = SKD_DRVR_STATE_FAULT; 785 - 786 - dev_err(&skdev->pdev->dev, "DriveFault Connect Timeout (%x)\n", 787 - skdev->drive_state); 788 - 789 - /*start the queue so we can respond with error to requests */ 790 - /* wakeup anyone waiting for startup complete */ 791 - schedule_work(&skdev->start_queue); 792 - skdev->gendisk_on = -1; 793 - wake_up_interruptible(&skdev->waitq); 794 - break; 795 - 796 - case SKD_DRVR_STATE_ONLINE: 797 - /* shouldn't get here. */ 798 - break; 799 - 800 - case SKD_DRVR_STATE_PAUSING: 801 - case SKD_DRVR_STATE_PAUSED: 802 - break; 803 - 804 - case SKD_DRVR_STATE_RESTARTING: 805 - if (skdev->timer_countdown > 0) { 806 - skdev->timer_countdown--; 807 - return; 808 - } 809 - /* For now, we fault the drive. Could attempt resets to 810 - * revcover at some point. */ 811 - skdev->state = SKD_DRVR_STATE_FAULT; 812 - dev_err(&skdev->pdev->dev, 813 - "DriveFault Reconnect Timeout (%x)\n", 814 - skdev->drive_state); 815 - 816 - /* 817 - * Recovering does two things: 818 - * 1. completes IO with error 819 - * 2. reclaims dma resources 820 - * When is it safe to recover requests? 821 - * - if the drive state is faulted 822 - * - if the state is still soft reset after out timeout 823 - * - if the drive registers are dead (state = FF) 824 - * If it is "unsafe", we still need to recover, so we will 825 - * disable pci bus mastering and disable our interrupts. 826 - */ 827 - 828 - if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) || 829 - (skdev->drive_state == FIT_SR_DRIVE_FAULT) || 830 - (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK)) 831 - /* It never came out of soft reset. Try to 832 - * recover the requests and then let them 833 - * fail. This is to mitigate hung processes. */ 834 - skd_recover_requests(skdev); 835 - else { 836 - dev_err(&skdev->pdev->dev, "Disable BusMaster (%x)\n", 837 - skdev->drive_state); 838 - pci_disable_device(skdev->pdev); 839 - skd_disable_interrupts(skdev); 840 - skd_recover_requests(skdev); 841 - } 842 - 843 - /*start the queue so we can respond with error to requests */ 844 - /* wakeup anyone waiting for startup complete */ 845 - schedule_work(&skdev->start_queue); 846 - skdev->gendisk_on = -1; 847 - wake_up_interruptible(&skdev->waitq); 848 - break; 849 - 850 - case SKD_DRVR_STATE_RESUMING: 851 - case SKD_DRVR_STATE_STOPPING: 852 - case SKD_DRVR_STATE_SYNCING: 853 - case SKD_DRVR_STATE_FAULT: 854 - case SKD_DRVR_STATE_DISAPPEARED: 855 - default: 856 - break; 857 - } 858 - } 859 - 860 - static int skd_start_timer(struct skd_device *skdev) 861 - { 862 - int rc; 863 - 864 - timer_setup(&skdev->timer, skd_timer_tick, 0); 865 - 866 - rc = mod_timer(&skdev->timer, (jiffies + HZ)); 867 - if (rc) 868 - dev_err(&skdev->pdev->dev, "failed to start timer %d\n", rc); 869 - return rc; 870 - } 871 - 872 - static void skd_kill_timer(struct skd_device *skdev) 873 - { 874 - del_timer_sync(&skdev->timer); 875 - } 876 - 877 - /* 878 - ***************************************************************************** 879 - * INTERNAL REQUESTS -- generated by driver itself 880 - ***************************************************************************** 881 - */ 882 - 883 - static int skd_format_internal_skspcl(struct skd_device *skdev) 884 - { 885 - struct skd_special_context *skspcl = &skdev->internal_skspcl; 886 - struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0]; 887 - struct fit_msg_hdr *fmh; 888 - uint64_t dma_address; 889 - struct skd_scsi_request *scsi; 890 - 891 - fmh = &skspcl->msg_buf->fmh; 892 - fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; 893 - fmh->num_protocol_cmds_coalesced = 1; 894 - 895 - scsi = &skspcl->msg_buf->scsi[0]; 896 - memset(scsi, 0, sizeof(*scsi)); 897 - dma_address = skspcl->req.sksg_dma_address; 898 - scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address); 899 - skspcl->req.n_sg = 1; 900 - sgd->control = FIT_SGD_CONTROL_LAST; 901 - sgd->byte_count = 0; 902 - sgd->host_side_addr = skspcl->db_dma_address; 903 - sgd->dev_side_addr = 0; 904 - sgd->next_desc_ptr = 0LL; 905 - 906 - return 1; 907 - } 908 - 909 - #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES 910 - 911 - static void skd_send_internal_skspcl(struct skd_device *skdev, 912 - struct skd_special_context *skspcl, 913 - u8 opcode) 914 - { 915 - struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0]; 916 - struct skd_scsi_request *scsi; 917 - unsigned char *buf = skspcl->data_buf; 918 - int i; 919 - 920 - if (skspcl->req.state != SKD_REQ_STATE_IDLE) 921 - /* 922 - * A refresh is already in progress. 923 - * Just wait for it to finish. 924 - */ 925 - return; 926 - 927 - skspcl->req.state = SKD_REQ_STATE_BUSY; 928 - 929 - scsi = &skspcl->msg_buf->scsi[0]; 930 - scsi->hdr.tag = skspcl->req.id; 931 - 932 - memset(scsi->cdb, 0, sizeof(scsi->cdb)); 933 - 934 - switch (opcode) { 935 - case TEST_UNIT_READY: 936 - scsi->cdb[0] = TEST_UNIT_READY; 937 - sgd->byte_count = 0; 938 - scsi->hdr.sg_list_len_bytes = 0; 939 - break; 940 - 941 - case READ_CAPACITY: 942 - scsi->cdb[0] = READ_CAPACITY; 943 - sgd->byte_count = SKD_N_READ_CAP_BYTES; 944 - scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); 945 - break; 946 - 947 - case INQUIRY: 948 - scsi->cdb[0] = INQUIRY; 949 - scsi->cdb[1] = 0x01; /* evpd */ 950 - scsi->cdb[2] = 0x80; /* serial number page */ 951 - scsi->cdb[4] = 0x10; 952 - sgd->byte_count = 16; 953 - scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); 954 - break; 955 - 956 - case SYNCHRONIZE_CACHE: 957 - scsi->cdb[0] = SYNCHRONIZE_CACHE; 958 - sgd->byte_count = 0; 959 - scsi->hdr.sg_list_len_bytes = 0; 960 - break; 961 - 962 - case WRITE_BUFFER: 963 - scsi->cdb[0] = WRITE_BUFFER; 964 - scsi->cdb[1] = 0x02; 965 - scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8; 966 - scsi->cdb[8] = WR_BUF_SIZE & 0xFF; 967 - sgd->byte_count = WR_BUF_SIZE; 968 - scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); 969 - /* fill incrementing byte pattern */ 970 - for (i = 0; i < sgd->byte_count; i++) 971 - buf[i] = i & 0xFF; 972 - break; 973 - 974 - case READ_BUFFER: 975 - scsi->cdb[0] = READ_BUFFER; 976 - scsi->cdb[1] = 0x02; 977 - scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8; 978 - scsi->cdb[8] = WR_BUF_SIZE & 0xFF; 979 - sgd->byte_count = WR_BUF_SIZE; 980 - scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); 981 - memset(skspcl->data_buf, 0, sgd->byte_count); 982 - break; 983 - 984 - default: 985 - SKD_ASSERT("Don't know what to send"); 986 - return; 987 - 988 - } 989 - skd_send_special_fitmsg(skdev, skspcl); 990 - } 991 - 992 - static void skd_refresh_device_data(struct skd_device *skdev) 993 - { 994 - struct skd_special_context *skspcl = &skdev->internal_skspcl; 995 - 996 - skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY); 997 - } 998 - 999 - static int skd_chk_read_buf(struct skd_device *skdev, 1000 - struct skd_special_context *skspcl) 1001 - { 1002 - unsigned char *buf = skspcl->data_buf; 1003 - int i; 1004 - 1005 - /* check for incrementing byte pattern */ 1006 - for (i = 0; i < WR_BUF_SIZE; i++) 1007 - if (buf[i] != (i & 0xFF)) 1008 - return 1; 1009 - 1010 - return 0; 1011 - } 1012 - 1013 - static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key, 1014 - u8 code, u8 qual, u8 fruc) 1015 - { 1016 - /* If the check condition is of special interest, log a message */ 1017 - if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02) 1018 - && (code == 0x04) && (qual == 0x06)) { 1019 - dev_err(&skdev->pdev->dev, 1020 - "*** LOST_WRITE_DATA ERROR *** key/asc/ascq/fruc %02x/%02x/%02x/%02x\n", 1021 - key, code, qual, fruc); 1022 - } 1023 - } 1024 - 1025 - static void skd_complete_internal(struct skd_device *skdev, 1026 - struct fit_completion_entry_v1 *skcomp, 1027 - struct fit_comp_error_info *skerr, 1028 - struct skd_special_context *skspcl) 1029 - { 1030 - u8 *buf = skspcl->data_buf; 1031 - u8 status; 1032 - int i; 1033 - struct skd_scsi_request *scsi = &skspcl->msg_buf->scsi[0]; 1034 - 1035 - lockdep_assert_held(&skdev->lock); 1036 - 1037 - SKD_ASSERT(skspcl == &skdev->internal_skspcl); 1038 - 1039 - dev_dbg(&skdev->pdev->dev, "complete internal %x\n", scsi->cdb[0]); 1040 - 1041 - dma_sync_single_for_cpu(&skdev->pdev->dev, 1042 - skspcl->db_dma_address, 1043 - skspcl->req.sksg_list[0].byte_count, 1044 - DMA_BIDIRECTIONAL); 1045 - 1046 - skspcl->req.completion = *skcomp; 1047 - skspcl->req.state = SKD_REQ_STATE_IDLE; 1048 - 1049 - status = skspcl->req.completion.status; 1050 - 1051 - skd_log_check_status(skdev, status, skerr->key, skerr->code, 1052 - skerr->qual, skerr->fruc); 1053 - 1054 - switch (scsi->cdb[0]) { 1055 - case TEST_UNIT_READY: 1056 - if (status == SAM_STAT_GOOD) 1057 - skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER); 1058 - else if ((status == SAM_STAT_CHECK_CONDITION) && 1059 - (skerr->key == MEDIUM_ERROR)) 1060 - skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER); 1061 - else { 1062 - if (skdev->state == SKD_DRVR_STATE_STOPPING) { 1063 - dev_dbg(&skdev->pdev->dev, 1064 - "TUR failed, don't send anymore state 0x%x\n", 1065 - skdev->state); 1066 - return; 1067 - } 1068 - dev_dbg(&skdev->pdev->dev, 1069 - "**** TUR failed, retry skerr\n"); 1070 - skd_send_internal_skspcl(skdev, skspcl, 1071 - TEST_UNIT_READY); 1072 - } 1073 - break; 1074 - 1075 - case WRITE_BUFFER: 1076 - if (status == SAM_STAT_GOOD) 1077 - skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER); 1078 - else { 1079 - if (skdev->state == SKD_DRVR_STATE_STOPPING) { 1080 - dev_dbg(&skdev->pdev->dev, 1081 - "write buffer failed, don't send anymore state 0x%x\n", 1082 - skdev->state); 1083 - return; 1084 - } 1085 - dev_dbg(&skdev->pdev->dev, 1086 - "**** write buffer failed, retry skerr\n"); 1087 - skd_send_internal_skspcl(skdev, skspcl, 1088 - TEST_UNIT_READY); 1089 - } 1090 - break; 1091 - 1092 - case READ_BUFFER: 1093 - if (status == SAM_STAT_GOOD) { 1094 - if (skd_chk_read_buf(skdev, skspcl) == 0) 1095 - skd_send_internal_skspcl(skdev, skspcl, 1096 - READ_CAPACITY); 1097 - else { 1098 - dev_err(&skdev->pdev->dev, 1099 - "*** W/R Buffer mismatch %d ***\n", 1100 - skdev->connect_retries); 1101 - if (skdev->connect_retries < 1102 - SKD_MAX_CONNECT_RETRIES) { 1103 - skdev->connect_retries++; 1104 - skd_soft_reset(skdev); 1105 - } else { 1106 - dev_err(&skdev->pdev->dev, 1107 - "W/R Buffer Connect Error\n"); 1108 - return; 1109 - } 1110 - } 1111 - 1112 - } else { 1113 - if (skdev->state == SKD_DRVR_STATE_STOPPING) { 1114 - dev_dbg(&skdev->pdev->dev, 1115 - "read buffer failed, don't send anymore state 0x%x\n", 1116 - skdev->state); 1117 - return; 1118 - } 1119 - dev_dbg(&skdev->pdev->dev, 1120 - "**** read buffer failed, retry skerr\n"); 1121 - skd_send_internal_skspcl(skdev, skspcl, 1122 - TEST_UNIT_READY); 1123 - } 1124 - break; 1125 - 1126 - case READ_CAPACITY: 1127 - skdev->read_cap_is_valid = 0; 1128 - if (status == SAM_STAT_GOOD) { 1129 - skdev->read_cap_last_lba = 1130 - (buf[0] << 24) | (buf[1] << 16) | 1131 - (buf[2] << 8) | buf[3]; 1132 - skdev->read_cap_blocksize = 1133 - (buf[4] << 24) | (buf[5] << 16) | 1134 - (buf[6] << 8) | buf[7]; 1135 - 1136 - dev_dbg(&skdev->pdev->dev, "last lba %d, bs %d\n", 1137 - skdev->read_cap_last_lba, 1138 - skdev->read_cap_blocksize); 1139 - 1140 - set_capacity(skdev->disk, skdev->read_cap_last_lba + 1); 1141 - 1142 - skdev->read_cap_is_valid = 1; 1143 - 1144 - skd_send_internal_skspcl(skdev, skspcl, INQUIRY); 1145 - } else if ((status == SAM_STAT_CHECK_CONDITION) && 1146 - (skerr->key == MEDIUM_ERROR)) { 1147 - skdev->read_cap_last_lba = ~0; 1148 - set_capacity(skdev->disk, skdev->read_cap_last_lba + 1); 1149 - dev_dbg(&skdev->pdev->dev, "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n"); 1150 - skd_send_internal_skspcl(skdev, skspcl, INQUIRY); 1151 - } else { 1152 - dev_dbg(&skdev->pdev->dev, "**** READCAP failed, retry TUR\n"); 1153 - skd_send_internal_skspcl(skdev, skspcl, 1154 - TEST_UNIT_READY); 1155 - } 1156 - break; 1157 - 1158 - case INQUIRY: 1159 - skdev->inquiry_is_valid = 0; 1160 - if (status == SAM_STAT_GOOD) { 1161 - skdev->inquiry_is_valid = 1; 1162 - 1163 - for (i = 0; i < 12; i++) 1164 - skdev->inq_serial_num[i] = buf[i + 4]; 1165 - skdev->inq_serial_num[12] = 0; 1166 - } 1167 - 1168 - if (skd_unquiesce_dev(skdev) < 0) 1169 - dev_dbg(&skdev->pdev->dev, "**** failed, to ONLINE device\n"); 1170 - /* connection is complete */ 1171 - skdev->connect_retries = 0; 1172 - break; 1173 - 1174 - case SYNCHRONIZE_CACHE: 1175 - if (status == SAM_STAT_GOOD) 1176 - skdev->sync_done = 1; 1177 - else 1178 - skdev->sync_done = -1; 1179 - wake_up_interruptible(&skdev->waitq); 1180 - break; 1181 - 1182 - default: 1183 - SKD_ASSERT("we didn't send this"); 1184 - } 1185 - } 1186 - 1187 - /* 1188 - ***************************************************************************** 1189 - * FIT MESSAGES 1190 - ***************************************************************************** 1191 - */ 1192 - 1193 - static void skd_send_fitmsg(struct skd_device *skdev, 1194 - struct skd_fitmsg_context *skmsg) 1195 - { 1196 - u64 qcmd; 1197 - 1198 - dev_dbg(&skdev->pdev->dev, "dma address %pad, busy=%d\n", 1199 - &skmsg->mb_dma_address, skd_in_flight(skdev)); 1200 - dev_dbg(&skdev->pdev->dev, "msg_buf %p\n", skmsg->msg_buf); 1201 - 1202 - qcmd = skmsg->mb_dma_address; 1203 - qcmd |= FIT_QCMD_QID_NORMAL; 1204 - 1205 - if (unlikely(skdev->dbg_level > 1)) { 1206 - u8 *bp = (u8 *)skmsg->msg_buf; 1207 - int i; 1208 - for (i = 0; i < skmsg->length; i += 8) { 1209 - dev_dbg(&skdev->pdev->dev, "msg[%2d] %8ph\n", i, 1210 - &bp[i]); 1211 - if (i == 0) 1212 - i = 64 - 8; 1213 - } 1214 - } 1215 - 1216 - if (skmsg->length > 256) 1217 - qcmd |= FIT_QCMD_MSGSIZE_512; 1218 - else if (skmsg->length > 128) 1219 - qcmd |= FIT_QCMD_MSGSIZE_256; 1220 - else if (skmsg->length > 64) 1221 - qcmd |= FIT_QCMD_MSGSIZE_128; 1222 - else 1223 - /* 1224 - * This makes no sense because the FIT msg header is 1225 - * 64 bytes. If the msg is only 64 bytes long it has 1226 - * no payload. 1227 - */ 1228 - qcmd |= FIT_QCMD_MSGSIZE_64; 1229 - 1230 - dma_sync_single_for_device(&skdev->pdev->dev, skmsg->mb_dma_address, 1231 - skmsg->length, DMA_TO_DEVICE); 1232 - 1233 - /* Make sure skd_msg_buf is written before the doorbell is triggered. */ 1234 - smp_wmb(); 1235 - 1236 - SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); 1237 - } 1238 - 1239 - static void skd_send_special_fitmsg(struct skd_device *skdev, 1240 - struct skd_special_context *skspcl) 1241 - { 1242 - u64 qcmd; 1243 - 1244 - WARN_ON_ONCE(skspcl->req.n_sg != 1); 1245 - 1246 - if (unlikely(skdev->dbg_level > 1)) { 1247 - u8 *bp = (u8 *)skspcl->msg_buf; 1248 - int i; 1249 - 1250 - for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) { 1251 - dev_dbg(&skdev->pdev->dev, " spcl[%2d] %8ph\n", i, 1252 - &bp[i]); 1253 - if (i == 0) 1254 - i = 64 - 8; 1255 - } 1256 - 1257 - dev_dbg(&skdev->pdev->dev, 1258 - "skspcl=%p id=%04x sksg_list=%p sksg_dma=%pad\n", 1259 - skspcl, skspcl->req.id, skspcl->req.sksg_list, 1260 - &skspcl->req.sksg_dma_address); 1261 - for (i = 0; i < skspcl->req.n_sg; i++) { 1262 - struct fit_sg_descriptor *sgd = 1263 - &skspcl->req.sksg_list[i]; 1264 - 1265 - dev_dbg(&skdev->pdev->dev, 1266 - " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n", 1267 - i, sgd->byte_count, sgd->control, 1268 - sgd->host_side_addr, sgd->next_desc_ptr); 1269 - } 1270 - } 1271 - 1272 - /* 1273 - * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr 1274 - * and one 64-byte SSDI command. 1275 - */ 1276 - qcmd = skspcl->mb_dma_address; 1277 - qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128; 1278 - 1279 - dma_sync_single_for_device(&skdev->pdev->dev, skspcl->mb_dma_address, 1280 - SKD_N_SPECIAL_FITMSG_BYTES, DMA_TO_DEVICE); 1281 - dma_sync_single_for_device(&skdev->pdev->dev, 1282 - skspcl->req.sksg_dma_address, 1283 - 1 * sizeof(struct fit_sg_descriptor), 1284 - DMA_TO_DEVICE); 1285 - dma_sync_single_for_device(&skdev->pdev->dev, 1286 - skspcl->db_dma_address, 1287 - skspcl->req.sksg_list[0].byte_count, 1288 - DMA_BIDIRECTIONAL); 1289 - 1290 - /* Make sure skd_msg_buf is written before the doorbell is triggered. */ 1291 - smp_wmb(); 1292 - 1293 - SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); 1294 - } 1295 - 1296 - /* 1297 - ***************************************************************************** 1298 - * COMPLETION QUEUE 1299 - ***************************************************************************** 1300 - */ 1301 - 1302 - static void skd_complete_other(struct skd_device *skdev, 1303 - struct fit_completion_entry_v1 *skcomp, 1304 - struct fit_comp_error_info *skerr); 1305 - 1306 - struct sns_info { 1307 - u8 type; 1308 - u8 stat; 1309 - u8 key; 1310 - u8 asc; 1311 - u8 ascq; 1312 - u8 mask; 1313 - enum skd_check_status_action action; 1314 - }; 1315 - 1316 - static struct sns_info skd_chkstat_table[] = { 1317 - /* Good */ 1318 - { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c, 1319 - SKD_CHECK_STATUS_REPORT_GOOD }, 1320 - 1321 - /* Smart alerts */ 1322 - { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */ 1323 - SKD_CHECK_STATUS_REPORT_SMART_ALERT }, 1324 - { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */ 1325 - SKD_CHECK_STATUS_REPORT_SMART_ALERT }, 1326 - { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */ 1327 - SKD_CHECK_STATUS_REPORT_SMART_ALERT }, 1328 - 1329 - /* Retry (with limits) */ 1330 - { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */ 1331 - SKD_CHECK_STATUS_REQUEUE_REQUEST }, 1332 - { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */ 1333 - SKD_CHECK_STATUS_REQUEUE_REQUEST }, 1334 - { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */ 1335 - SKD_CHECK_STATUS_REQUEUE_REQUEST }, 1336 - { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */ 1337 - SKD_CHECK_STATUS_REQUEUE_REQUEST }, 1338 - 1339 - /* Busy (or about to be) */ 1340 - { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */ 1341 - SKD_CHECK_STATUS_BUSY_IMMINENT }, 1342 - }; 1343 - 1344 - /* 1345 - * Look up status and sense data to decide how to handle the error 1346 - * from the device. 1347 - * mask says which fields must match e.g., mask=0x18 means check 1348 - * type and stat, ignore key, asc, ascq. 1349 - */ 1350 - 1351 - static enum skd_check_status_action 1352 - skd_check_status(struct skd_device *skdev, 1353 - u8 cmp_status, struct fit_comp_error_info *skerr) 1354 - { 1355 - int i; 1356 - 1357 - dev_err(&skdev->pdev->dev, "key/asc/ascq/fruc %02x/%02x/%02x/%02x\n", 1358 - skerr->key, skerr->code, skerr->qual, skerr->fruc); 1359 - 1360 - dev_dbg(&skdev->pdev->dev, 1361 - "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n", 1362 - skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual, 1363 - skerr->fruc); 1364 - 1365 - /* Does the info match an entry in the good category? */ 1366 - for (i = 0; i < ARRAY_SIZE(skd_chkstat_table); i++) { 1367 - struct sns_info *sns = &skd_chkstat_table[i]; 1368 - 1369 - if (sns->mask & 0x10) 1370 - if (skerr->type != sns->type) 1371 - continue; 1372 - 1373 - if (sns->mask & 0x08) 1374 - if (cmp_status != sns->stat) 1375 - continue; 1376 - 1377 - if (sns->mask & 0x04) 1378 - if (skerr->key != sns->key) 1379 - continue; 1380 - 1381 - if (sns->mask & 0x02) 1382 - if (skerr->code != sns->asc) 1383 - continue; 1384 - 1385 - if (sns->mask & 0x01) 1386 - if (skerr->qual != sns->ascq) 1387 - continue; 1388 - 1389 - if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) { 1390 - dev_err(&skdev->pdev->dev, 1391 - "SMART Alert: sense key/asc/ascq %02x/%02x/%02x\n", 1392 - skerr->key, skerr->code, skerr->qual); 1393 - } 1394 - return sns->action; 1395 - } 1396 - 1397 - /* No other match, so nonzero status means error, 1398 - * zero status means good 1399 - */ 1400 - if (cmp_status) { 1401 - dev_dbg(&skdev->pdev->dev, "status check: error\n"); 1402 - return SKD_CHECK_STATUS_REPORT_ERROR; 1403 - } 1404 - 1405 - dev_dbg(&skdev->pdev->dev, "status check good default\n"); 1406 - return SKD_CHECK_STATUS_REPORT_GOOD; 1407 - } 1408 - 1409 - static void skd_resolve_req_exception(struct skd_device *skdev, 1410 - struct skd_request_context *skreq, 1411 - struct request *req) 1412 - { 1413 - u8 cmp_status = skreq->completion.status; 1414 - 1415 - switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) { 1416 - case SKD_CHECK_STATUS_REPORT_GOOD: 1417 - case SKD_CHECK_STATUS_REPORT_SMART_ALERT: 1418 - skreq->status = BLK_STS_OK; 1419 - if (likely(!blk_should_fake_timeout(req->q))) 1420 - blk_mq_complete_request(req); 1421 - break; 1422 - 1423 - case SKD_CHECK_STATUS_BUSY_IMMINENT: 1424 - skd_log_skreq(skdev, skreq, "retry(busy)"); 1425 - blk_mq_requeue_request(req, true); 1426 - dev_info(&skdev->pdev->dev, "drive BUSY imminent\n"); 1427 - skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT; 1428 - skdev->timer_countdown = SKD_TIMER_MINUTES(20); 1429 - skd_quiesce_dev(skdev); 1430 - break; 1431 - 1432 - case SKD_CHECK_STATUS_REQUEUE_REQUEST: 1433 - if (++skreq->retries < SKD_MAX_RETRIES) { 1434 - skd_log_skreq(skdev, skreq, "retry"); 1435 - blk_mq_requeue_request(req, true); 1436 - break; 1437 - } 1438 - fallthrough; 1439 - 1440 - case SKD_CHECK_STATUS_REPORT_ERROR: 1441 - default: 1442 - skreq->status = BLK_STS_IOERR; 1443 - if (likely(!blk_should_fake_timeout(req->q))) 1444 - blk_mq_complete_request(req); 1445 - break; 1446 - } 1447 - } 1448 - 1449 - static void skd_release_skreq(struct skd_device *skdev, 1450 - struct skd_request_context *skreq) 1451 - { 1452 - /* 1453 - * Reclaim the skd_request_context 1454 - */ 1455 - skreq->state = SKD_REQ_STATE_IDLE; 1456 - } 1457 - 1458 - static int skd_isr_completion_posted(struct skd_device *skdev, 1459 - int limit, int *enqueued) 1460 - { 1461 - struct fit_completion_entry_v1 *skcmp; 1462 - struct fit_comp_error_info *skerr; 1463 - u16 req_id; 1464 - u32 tag; 1465 - u16 hwq = 0; 1466 - struct request *rq; 1467 - struct skd_request_context *skreq; 1468 - u16 cmp_cntxt; 1469 - u8 cmp_status; 1470 - u8 cmp_cycle; 1471 - u32 cmp_bytes; 1472 - int rc = 0; 1473 - int processed = 0; 1474 - 1475 - lockdep_assert_held(&skdev->lock); 1476 - 1477 - for (;; ) { 1478 - SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY); 1479 - 1480 - skcmp = &skdev->skcomp_table[skdev->skcomp_ix]; 1481 - cmp_cycle = skcmp->cycle; 1482 - cmp_cntxt = skcmp->tag; 1483 - cmp_status = skcmp->status; 1484 - cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes); 1485 - 1486 - skerr = &skdev->skerr_table[skdev->skcomp_ix]; 1487 - 1488 - dev_dbg(&skdev->pdev->dev, 1489 - "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d busy=%d rbytes=0x%x proto=%d\n", 1490 - skdev->skcomp_cycle, skdev->skcomp_ix, cmp_cycle, 1491 - cmp_cntxt, cmp_status, skd_in_flight(skdev), 1492 - cmp_bytes, skdev->proto_ver); 1493 - 1494 - if (cmp_cycle != skdev->skcomp_cycle) { 1495 - dev_dbg(&skdev->pdev->dev, "end of completions\n"); 1496 - break; 1497 - } 1498 - /* 1499 - * Update the completion queue head index and possibly 1500 - * the completion cycle count. 8-bit wrap-around. 1501 - */ 1502 - skdev->skcomp_ix++; 1503 - if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) { 1504 - skdev->skcomp_ix = 0; 1505 - skdev->skcomp_cycle++; 1506 - } 1507 - 1508 - /* 1509 - * The command context is a unique 32-bit ID. The low order 1510 - * bits help locate the request. The request is usually a 1511 - * r/w request (see skd_start() above) or a special request. 1512 - */ 1513 - req_id = cmp_cntxt; 1514 - tag = req_id & SKD_ID_SLOT_AND_TABLE_MASK; 1515 - 1516 - /* Is this other than a r/w request? */ 1517 - if (tag >= skdev->num_req_context) { 1518 - /* 1519 - * This is not a completion for a r/w request. 1520 - */ 1521 - WARN_ON_ONCE(blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], 1522 - tag)); 1523 - skd_complete_other(skdev, skcmp, skerr); 1524 - continue; 1525 - } 1526 - 1527 - rq = blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], tag); 1528 - if (WARN(!rq, "No request for tag %#x -> %#x\n", cmp_cntxt, 1529 - tag)) 1530 - continue; 1531 - skreq = blk_mq_rq_to_pdu(rq); 1532 - 1533 - /* 1534 - * Make sure the request ID for the slot matches. 1535 - */ 1536 - if (skreq->id != req_id) { 1537 - dev_err(&skdev->pdev->dev, 1538 - "Completion mismatch comp_id=0x%04x skreq=0x%04x new=0x%04x\n", 1539 - req_id, skreq->id, cmp_cntxt); 1540 - 1541 - continue; 1542 - } 1543 - 1544 - SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY); 1545 - 1546 - skreq->completion = *skcmp; 1547 - if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) { 1548 - skreq->err_info = *skerr; 1549 - skd_log_check_status(skdev, cmp_status, skerr->key, 1550 - skerr->code, skerr->qual, 1551 - skerr->fruc); 1552 - } 1553 - /* Release DMA resources for the request. */ 1554 - if (skreq->n_sg > 0) 1555 - skd_postop_sg_list(skdev, skreq); 1556 - 1557 - skd_release_skreq(skdev, skreq); 1558 - 1559 - /* 1560 - * Capture the outcome and post it back to the native request. 1561 - */ 1562 - if (likely(cmp_status == SAM_STAT_GOOD)) { 1563 - skreq->status = BLK_STS_OK; 1564 - if (likely(!blk_should_fake_timeout(rq->q))) 1565 - blk_mq_complete_request(rq); 1566 - } else { 1567 - skd_resolve_req_exception(skdev, skreq, rq); 1568 - } 1569 - 1570 - /* skd_isr_comp_limit equal zero means no limit */ 1571 - if (limit) { 1572 - if (++processed >= limit) { 1573 - rc = 1; 1574 - break; 1575 - } 1576 - } 1577 - } 1578 - 1579 - if (skdev->state == SKD_DRVR_STATE_PAUSING && 1580 - skd_in_flight(skdev) == 0) { 1581 - skdev->state = SKD_DRVR_STATE_PAUSED; 1582 - wake_up_interruptible(&skdev->waitq); 1583 - } 1584 - 1585 - return rc; 1586 - } 1587 - 1588 - static void skd_complete_other(struct skd_device *skdev, 1589 - struct fit_completion_entry_v1 *skcomp, 1590 - struct fit_comp_error_info *skerr) 1591 - { 1592 - u32 req_id = 0; 1593 - u32 req_table; 1594 - u32 req_slot; 1595 - struct skd_special_context *skspcl; 1596 - 1597 - lockdep_assert_held(&skdev->lock); 1598 - 1599 - req_id = skcomp->tag; 1600 - req_table = req_id & SKD_ID_TABLE_MASK; 1601 - req_slot = req_id & SKD_ID_SLOT_MASK; 1602 - 1603 - dev_dbg(&skdev->pdev->dev, "table=0x%x id=0x%x slot=%d\n", req_table, 1604 - req_id, req_slot); 1605 - 1606 - /* 1607 - * Based on the request id, determine how to dispatch this completion. 1608 - * This swich/case is finding the good cases and forwarding the 1609 - * completion entry. Errors are reported below the switch. 1610 - */ 1611 - switch (req_table) { 1612 - case SKD_ID_RW_REQUEST: 1613 - /* 1614 - * The caller, skd_isr_completion_posted() above, 1615 - * handles r/w requests. The only way we get here 1616 - * is if the req_slot is out of bounds. 1617 - */ 1618 - break; 1619 - 1620 - case SKD_ID_INTERNAL: 1621 - if (req_slot == 0) { 1622 - skspcl = &skdev->internal_skspcl; 1623 - if (skspcl->req.id == req_id && 1624 - skspcl->req.state == SKD_REQ_STATE_BUSY) { 1625 - skd_complete_internal(skdev, 1626 - skcomp, skerr, skspcl); 1627 - return; 1628 - } 1629 - } 1630 - break; 1631 - 1632 - case SKD_ID_FIT_MSG: 1633 - /* 1634 - * These id's should never appear in a completion record. 1635 - */ 1636 - break; 1637 - 1638 - default: 1639 - /* 1640 - * These id's should never appear anywhere; 1641 - */ 1642 - break; 1643 - } 1644 - 1645 - /* 1646 - * If we get here it is a bad or stale id. 1647 - */ 1648 - } 1649 - 1650 - static void skd_reset_skcomp(struct skd_device *skdev) 1651 - { 1652 - memset(skdev->skcomp_table, 0, SKD_SKCOMP_SIZE); 1653 - 1654 - skdev->skcomp_ix = 0; 1655 - skdev->skcomp_cycle = 1; 1656 - } 1657 - 1658 - /* 1659 - ***************************************************************************** 1660 - * INTERRUPTS 1661 - ***************************************************************************** 1662 - */ 1663 - static void skd_completion_worker(struct work_struct *work) 1664 - { 1665 - struct skd_device *skdev = 1666 - container_of(work, struct skd_device, completion_worker); 1667 - unsigned long flags; 1668 - int flush_enqueued = 0; 1669 - 1670 - spin_lock_irqsave(&skdev->lock, flags); 1671 - 1672 - /* 1673 - * pass in limit=0, which means no limit.. 1674 - * process everything in compq 1675 - */ 1676 - skd_isr_completion_posted(skdev, 0, &flush_enqueued); 1677 - schedule_work(&skdev->start_queue); 1678 - 1679 - spin_unlock_irqrestore(&skdev->lock, flags); 1680 - } 1681 - 1682 - static void skd_isr_msg_from_dev(struct skd_device *skdev); 1683 - 1684 - static irqreturn_t 1685 - skd_isr(int irq, void *ptr) 1686 - { 1687 - struct skd_device *skdev = ptr; 1688 - u32 intstat; 1689 - u32 ack; 1690 - int rc = 0; 1691 - int deferred = 0; 1692 - int flush_enqueued = 0; 1693 - 1694 - spin_lock(&skdev->lock); 1695 - 1696 - for (;; ) { 1697 - intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST); 1698 - 1699 - ack = FIT_INT_DEF_MASK; 1700 - ack &= intstat; 1701 - 1702 - dev_dbg(&skdev->pdev->dev, "intstat=0x%x ack=0x%x\n", intstat, 1703 - ack); 1704 - 1705 - /* As long as there is an int pending on device, keep 1706 - * running loop. When none, get out, but if we've never 1707 - * done any processing, call completion handler? 1708 - */ 1709 - if (ack == 0) { 1710 - /* No interrupts on device, but run the completion 1711 - * processor anyway? 1712 - */ 1713 - if (rc == 0) 1714 - if (likely (skdev->state 1715 - == SKD_DRVR_STATE_ONLINE)) 1716 - deferred = 1; 1717 - break; 1718 - } 1719 - 1720 - rc = IRQ_HANDLED; 1721 - 1722 - SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST); 1723 - 1724 - if (likely((skdev->state != SKD_DRVR_STATE_LOAD) && 1725 - (skdev->state != SKD_DRVR_STATE_STOPPING))) { 1726 - if (intstat & FIT_ISH_COMPLETION_POSTED) { 1727 - /* 1728 - * If we have already deferred completion 1729 - * processing, don't bother running it again 1730 - */ 1731 - if (deferred == 0) 1732 - deferred = 1733 - skd_isr_completion_posted(skdev, 1734 - skd_isr_comp_limit, &flush_enqueued); 1735 - } 1736 - 1737 - if (intstat & FIT_ISH_FW_STATE_CHANGE) { 1738 - skd_isr_fwstate(skdev); 1739 - if (skdev->state == SKD_DRVR_STATE_FAULT || 1740 - skdev->state == 1741 - SKD_DRVR_STATE_DISAPPEARED) { 1742 - spin_unlock(&skdev->lock); 1743 - return rc; 1744 - } 1745 - } 1746 - 1747 - if (intstat & FIT_ISH_MSG_FROM_DEV) 1748 - skd_isr_msg_from_dev(skdev); 1749 - } 1750 - } 1751 - 1752 - if (unlikely(flush_enqueued)) 1753 - schedule_work(&skdev->start_queue); 1754 - 1755 - if (deferred) 1756 - schedule_work(&skdev->completion_worker); 1757 - else if (!flush_enqueued) 1758 - schedule_work(&skdev->start_queue); 1759 - 1760 - spin_unlock(&skdev->lock); 1761 - 1762 - return rc; 1763 - } 1764 - 1765 - static void skd_drive_fault(struct skd_device *skdev) 1766 - { 1767 - skdev->state = SKD_DRVR_STATE_FAULT; 1768 - dev_err(&skdev->pdev->dev, "Drive FAULT\n"); 1769 - } 1770 - 1771 - static void skd_drive_disappeared(struct skd_device *skdev) 1772 - { 1773 - skdev->state = SKD_DRVR_STATE_DISAPPEARED; 1774 - dev_err(&skdev->pdev->dev, "Drive DISAPPEARED\n"); 1775 - } 1776 - 1777 - static void skd_isr_fwstate(struct skd_device *skdev) 1778 - { 1779 - u32 sense; 1780 - u32 state; 1781 - u32 mtd; 1782 - int prev_driver_state = skdev->state; 1783 - 1784 - sense = SKD_READL(skdev, FIT_STATUS); 1785 - state = sense & FIT_SR_DRIVE_STATE_MASK; 1786 - 1787 - dev_err(&skdev->pdev->dev, "s1120 state %s(%d)=>%s(%d)\n", 1788 - skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, 1789 - skd_drive_state_to_str(state), state); 1790 - 1791 - skdev->drive_state = state; 1792 - 1793 - switch (skdev->drive_state) { 1794 - case FIT_SR_DRIVE_INIT: 1795 - if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) { 1796 - skd_disable_interrupts(skdev); 1797 - break; 1798 - } 1799 - if (skdev->state == SKD_DRVR_STATE_RESTARTING) 1800 - skd_recover_requests(skdev); 1801 - if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) { 1802 - skdev->timer_countdown = SKD_STARTING_TIMO; 1803 - skdev->state = SKD_DRVR_STATE_STARTING; 1804 - skd_soft_reset(skdev); 1805 - break; 1806 - } 1807 - mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0); 1808 - SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 1809 - skdev->last_mtd = mtd; 1810 - break; 1811 - 1812 - case FIT_SR_DRIVE_ONLINE: 1813 - skdev->cur_max_queue_depth = skd_max_queue_depth; 1814 - if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth) 1815 - skdev->cur_max_queue_depth = skdev->dev_max_queue_depth; 1816 - 1817 - skdev->queue_low_water_mark = 1818 - skdev->cur_max_queue_depth * 2 / 3 + 1; 1819 - if (skdev->queue_low_water_mark < 1) 1820 - skdev->queue_low_water_mark = 1; 1821 - dev_info(&skdev->pdev->dev, 1822 - "Queue depth limit=%d dev=%d lowat=%d\n", 1823 - skdev->cur_max_queue_depth, 1824 - skdev->dev_max_queue_depth, 1825 - skdev->queue_low_water_mark); 1826 - 1827 - skd_refresh_device_data(skdev); 1828 - break; 1829 - 1830 - case FIT_SR_DRIVE_BUSY: 1831 - skdev->state = SKD_DRVR_STATE_BUSY; 1832 - skdev->timer_countdown = SKD_BUSY_TIMO; 1833 - skd_quiesce_dev(skdev); 1834 - break; 1835 - case FIT_SR_DRIVE_BUSY_SANITIZE: 1836 - /* set timer for 3 seconds, we'll abort any unfinished 1837 - * commands after that expires 1838 - */ 1839 - skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; 1840 - skdev->timer_countdown = SKD_TIMER_SECONDS(3); 1841 - schedule_work(&skdev->start_queue); 1842 - break; 1843 - case FIT_SR_DRIVE_BUSY_ERASE: 1844 - skdev->state = SKD_DRVR_STATE_BUSY_ERASE; 1845 - skdev->timer_countdown = SKD_BUSY_TIMO; 1846 - break; 1847 - case FIT_SR_DRIVE_OFFLINE: 1848 - skdev->state = SKD_DRVR_STATE_IDLE; 1849 - break; 1850 - case FIT_SR_DRIVE_SOFT_RESET: 1851 - switch (skdev->state) { 1852 - case SKD_DRVR_STATE_STARTING: 1853 - case SKD_DRVR_STATE_RESTARTING: 1854 - /* Expected by a caller of skd_soft_reset() */ 1855 - break; 1856 - default: 1857 - skdev->state = SKD_DRVR_STATE_RESTARTING; 1858 - break; 1859 - } 1860 - break; 1861 - case FIT_SR_DRIVE_FW_BOOTING: 1862 - dev_dbg(&skdev->pdev->dev, "ISR FIT_SR_DRIVE_FW_BOOTING\n"); 1863 - skdev->state = SKD_DRVR_STATE_WAIT_BOOT; 1864 - skdev->timer_countdown = SKD_WAIT_BOOT_TIMO; 1865 - break; 1866 - 1867 - case FIT_SR_DRIVE_DEGRADED: 1868 - case FIT_SR_PCIE_LINK_DOWN: 1869 - case FIT_SR_DRIVE_NEED_FW_DOWNLOAD: 1870 - break; 1871 - 1872 - case FIT_SR_DRIVE_FAULT: 1873 - skd_drive_fault(skdev); 1874 - skd_recover_requests(skdev); 1875 - schedule_work(&skdev->start_queue); 1876 - break; 1877 - 1878 - /* PCIe bus returned all Fs? */ 1879 - case 0xFF: 1880 - dev_info(&skdev->pdev->dev, "state=0x%x sense=0x%x\n", state, 1881 - sense); 1882 - skd_drive_disappeared(skdev); 1883 - skd_recover_requests(skdev); 1884 - schedule_work(&skdev->start_queue); 1885 - break; 1886 - default: 1887 - /* 1888 - * Uknown FW State. Wait for a state we recognize. 1889 - */ 1890 - break; 1891 - } 1892 - dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n", 1893 - skd_skdev_state_to_str(prev_driver_state), prev_driver_state, 1894 - skd_skdev_state_to_str(skdev->state), skdev->state); 1895 - } 1896 - 1897 - static bool skd_recover_request(struct request *req, void *data, bool reserved) 1898 - { 1899 - struct skd_device *const skdev = data; 1900 - struct skd_request_context *skreq = blk_mq_rq_to_pdu(req); 1901 - 1902 - if (skreq->state != SKD_REQ_STATE_BUSY) 1903 - return true; 1904 - 1905 - skd_log_skreq(skdev, skreq, "recover"); 1906 - 1907 - /* Release DMA resources for the request. */ 1908 - if (skreq->n_sg > 0) 1909 - skd_postop_sg_list(skdev, skreq); 1910 - 1911 - skreq->state = SKD_REQ_STATE_IDLE; 1912 - skreq->status = BLK_STS_IOERR; 1913 - blk_mq_complete_request(req); 1914 - return true; 1915 - } 1916 - 1917 - static void skd_recover_requests(struct skd_device *skdev) 1918 - { 1919 - blk_mq_tagset_busy_iter(&skdev->tag_set, skd_recover_request, skdev); 1920 - } 1921 - 1922 - static void skd_isr_msg_from_dev(struct skd_device *skdev) 1923 - { 1924 - u32 mfd; 1925 - u32 mtd; 1926 - u32 data; 1927 - 1928 - mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); 1929 - 1930 - dev_dbg(&skdev->pdev->dev, "mfd=0x%x last_mtd=0x%x\n", mfd, 1931 - skdev->last_mtd); 1932 - 1933 - /* ignore any mtd that is an ack for something we didn't send */ 1934 - if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd)) 1935 - return; 1936 - 1937 - switch (FIT_MXD_TYPE(mfd)) { 1938 - case FIT_MTD_FITFW_INIT: 1939 - skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd); 1940 - 1941 - if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) { 1942 - dev_err(&skdev->pdev->dev, "protocol mismatch\n"); 1943 - dev_err(&skdev->pdev->dev, " got=%d support=%d\n", 1944 - skdev->proto_ver, FIT_PROTOCOL_VERSION_1); 1945 - dev_err(&skdev->pdev->dev, " please upgrade driver\n"); 1946 - skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH; 1947 - skd_soft_reset(skdev); 1948 - break; 1949 - } 1950 - mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0); 1951 - SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 1952 - skdev->last_mtd = mtd; 1953 - break; 1954 - 1955 - case FIT_MTD_GET_CMDQ_DEPTH: 1956 - skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd); 1957 - mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0, 1958 - SKD_N_COMPLETION_ENTRY); 1959 - SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 1960 - skdev->last_mtd = mtd; 1961 - break; 1962 - 1963 - case FIT_MTD_SET_COMPQ_DEPTH: 1964 - SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG); 1965 - mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0); 1966 - SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 1967 - skdev->last_mtd = mtd; 1968 - break; 1969 - 1970 - case FIT_MTD_SET_COMPQ_ADDR: 1971 - skd_reset_skcomp(skdev); 1972 - mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno); 1973 - SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 1974 - skdev->last_mtd = mtd; 1975 - break; 1976 - 1977 - case FIT_MTD_CMD_LOG_HOST_ID: 1978 - /* hardware interface overflows in y2106 */ 1979 - skdev->connect_time_stamp = (u32)ktime_get_real_seconds(); 1980 - data = skdev->connect_time_stamp & 0xFFFF; 1981 - mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data); 1982 - SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 1983 - skdev->last_mtd = mtd; 1984 - break; 1985 - 1986 - case FIT_MTD_CMD_LOG_TIME_STAMP_LO: 1987 - skdev->drive_jiffies = FIT_MXD_DATA(mfd); 1988 - data = (skdev->connect_time_stamp >> 16) & 0xFFFF; 1989 - mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data); 1990 - SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 1991 - skdev->last_mtd = mtd; 1992 - break; 1993 - 1994 - case FIT_MTD_CMD_LOG_TIME_STAMP_HI: 1995 - skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16); 1996 - mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0); 1997 - SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 1998 - skdev->last_mtd = mtd; 1999 - 2000 - dev_err(&skdev->pdev->dev, "Time sync driver=0x%x device=0x%x\n", 2001 - skdev->connect_time_stamp, skdev->drive_jiffies); 2002 - break; 2003 - 2004 - case FIT_MTD_ARM_QUEUE: 2005 - skdev->last_mtd = 0; 2006 - /* 2007 - * State should be, or soon will be, FIT_SR_DRIVE_ONLINE. 2008 - */ 2009 - break; 2010 - 2011 - default: 2012 - break; 2013 - } 2014 - } 2015 - 2016 - static void skd_disable_interrupts(struct skd_device *skdev) 2017 - { 2018 - u32 sense; 2019 - 2020 - sense = SKD_READL(skdev, FIT_CONTROL); 2021 - sense &= ~FIT_CR_ENABLE_INTERRUPTS; 2022 - SKD_WRITEL(skdev, sense, FIT_CONTROL); 2023 - dev_dbg(&skdev->pdev->dev, "sense 0x%x\n", sense); 2024 - 2025 - /* Note that the 1s is written. A 1-bit means 2026 - * disable, a 0 means enable. 2027 - */ 2028 - SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST); 2029 - } 2030 - 2031 - static void skd_enable_interrupts(struct skd_device *skdev) 2032 - { 2033 - u32 val; 2034 - 2035 - /* unmask interrupts first */ 2036 - val = FIT_ISH_FW_STATE_CHANGE + 2037 - FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV; 2038 - 2039 - /* Note that the compliment of mask is written. A 1-bit means 2040 - * disable, a 0 means enable. */ 2041 - SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST); 2042 - dev_dbg(&skdev->pdev->dev, "interrupt mask=0x%x\n", ~val); 2043 - 2044 - val = SKD_READL(skdev, FIT_CONTROL); 2045 - val |= FIT_CR_ENABLE_INTERRUPTS; 2046 - dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val); 2047 - SKD_WRITEL(skdev, val, FIT_CONTROL); 2048 - } 2049 - 2050 - /* 2051 - ***************************************************************************** 2052 - * START, STOP, RESTART, QUIESCE, UNQUIESCE 2053 - ***************************************************************************** 2054 - */ 2055 - 2056 - static void skd_soft_reset(struct skd_device *skdev) 2057 - { 2058 - u32 val; 2059 - 2060 - val = SKD_READL(skdev, FIT_CONTROL); 2061 - val |= (FIT_CR_SOFT_RESET); 2062 - dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val); 2063 - SKD_WRITEL(skdev, val, FIT_CONTROL); 2064 - } 2065 - 2066 - static void skd_start_device(struct skd_device *skdev) 2067 - { 2068 - unsigned long flags; 2069 - u32 sense; 2070 - u32 state; 2071 - 2072 - spin_lock_irqsave(&skdev->lock, flags); 2073 - 2074 - /* ack all ghost interrupts */ 2075 - SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); 2076 - 2077 - sense = SKD_READL(skdev, FIT_STATUS); 2078 - 2079 - dev_dbg(&skdev->pdev->dev, "initial status=0x%x\n", sense); 2080 - 2081 - state = sense & FIT_SR_DRIVE_STATE_MASK; 2082 - skdev->drive_state = state; 2083 - skdev->last_mtd = 0; 2084 - 2085 - skdev->state = SKD_DRVR_STATE_STARTING; 2086 - skdev->timer_countdown = SKD_STARTING_TIMO; 2087 - 2088 - skd_enable_interrupts(skdev); 2089 - 2090 - switch (skdev->drive_state) { 2091 - case FIT_SR_DRIVE_OFFLINE: 2092 - dev_err(&skdev->pdev->dev, "Drive offline...\n"); 2093 - break; 2094 - 2095 - case FIT_SR_DRIVE_FW_BOOTING: 2096 - dev_dbg(&skdev->pdev->dev, "FIT_SR_DRIVE_FW_BOOTING\n"); 2097 - skdev->state = SKD_DRVR_STATE_WAIT_BOOT; 2098 - skdev->timer_countdown = SKD_WAIT_BOOT_TIMO; 2099 - break; 2100 - 2101 - case FIT_SR_DRIVE_BUSY_SANITIZE: 2102 - dev_info(&skdev->pdev->dev, "Start: BUSY_SANITIZE\n"); 2103 - skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; 2104 - skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; 2105 - break; 2106 - 2107 - case FIT_SR_DRIVE_BUSY_ERASE: 2108 - dev_info(&skdev->pdev->dev, "Start: BUSY_ERASE\n"); 2109 - skdev->state = SKD_DRVR_STATE_BUSY_ERASE; 2110 - skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; 2111 - break; 2112 - 2113 - case FIT_SR_DRIVE_INIT: 2114 - case FIT_SR_DRIVE_ONLINE: 2115 - skd_soft_reset(skdev); 2116 - break; 2117 - 2118 - case FIT_SR_DRIVE_BUSY: 2119 - dev_err(&skdev->pdev->dev, "Drive Busy...\n"); 2120 - skdev->state = SKD_DRVR_STATE_BUSY; 2121 - skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; 2122 - break; 2123 - 2124 - case FIT_SR_DRIVE_SOFT_RESET: 2125 - dev_err(&skdev->pdev->dev, "drive soft reset in prog\n"); 2126 - break; 2127 - 2128 - case FIT_SR_DRIVE_FAULT: 2129 - /* Fault state is bad...soft reset won't do it... 2130 - * Hard reset, maybe, but does it work on device? 2131 - * For now, just fault so the system doesn't hang. 2132 - */ 2133 - skd_drive_fault(skdev); 2134 - /*start the queue so we can respond with error to requests */ 2135 - dev_dbg(&skdev->pdev->dev, "starting queue\n"); 2136 - schedule_work(&skdev->start_queue); 2137 - skdev->gendisk_on = -1; 2138 - wake_up_interruptible(&skdev->waitq); 2139 - break; 2140 - 2141 - case 0xFF: 2142 - /* Most likely the device isn't there or isn't responding 2143 - * to the BAR1 addresses. */ 2144 - skd_drive_disappeared(skdev); 2145 - /*start the queue so we can respond with error to requests */ 2146 - dev_dbg(&skdev->pdev->dev, 2147 - "starting queue to error-out reqs\n"); 2148 - schedule_work(&skdev->start_queue); 2149 - skdev->gendisk_on = -1; 2150 - wake_up_interruptible(&skdev->waitq); 2151 - break; 2152 - 2153 - default: 2154 - dev_err(&skdev->pdev->dev, "Start: unknown state %x\n", 2155 - skdev->drive_state); 2156 - break; 2157 - } 2158 - 2159 - state = SKD_READL(skdev, FIT_CONTROL); 2160 - dev_dbg(&skdev->pdev->dev, "FIT Control Status=0x%x\n", state); 2161 - 2162 - state = SKD_READL(skdev, FIT_INT_STATUS_HOST); 2163 - dev_dbg(&skdev->pdev->dev, "Intr Status=0x%x\n", state); 2164 - 2165 - state = SKD_READL(skdev, FIT_INT_MASK_HOST); 2166 - dev_dbg(&skdev->pdev->dev, "Intr Mask=0x%x\n", state); 2167 - 2168 - state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); 2169 - dev_dbg(&skdev->pdev->dev, "Msg from Dev=0x%x\n", state); 2170 - 2171 - state = SKD_READL(skdev, FIT_HW_VERSION); 2172 - dev_dbg(&skdev->pdev->dev, "HW version=0x%x\n", state); 2173 - 2174 - spin_unlock_irqrestore(&skdev->lock, flags); 2175 - } 2176 - 2177 - static void skd_stop_device(struct skd_device *skdev) 2178 - { 2179 - unsigned long flags; 2180 - struct skd_special_context *skspcl = &skdev->internal_skspcl; 2181 - u32 dev_state; 2182 - int i; 2183 - 2184 - spin_lock_irqsave(&skdev->lock, flags); 2185 - 2186 - if (skdev->state != SKD_DRVR_STATE_ONLINE) { 2187 - dev_err(&skdev->pdev->dev, "%s not online no sync\n", __func__); 2188 - goto stop_out; 2189 - } 2190 - 2191 - if (skspcl->req.state != SKD_REQ_STATE_IDLE) { 2192 - dev_err(&skdev->pdev->dev, "%s no special\n", __func__); 2193 - goto stop_out; 2194 - } 2195 - 2196 - skdev->state = SKD_DRVR_STATE_SYNCING; 2197 - skdev->sync_done = 0; 2198 - 2199 - skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE); 2200 - 2201 - spin_unlock_irqrestore(&skdev->lock, flags); 2202 - 2203 - wait_event_interruptible_timeout(skdev->waitq, 2204 - (skdev->sync_done), (10 * HZ)); 2205 - 2206 - spin_lock_irqsave(&skdev->lock, flags); 2207 - 2208 - switch (skdev->sync_done) { 2209 - case 0: 2210 - dev_err(&skdev->pdev->dev, "%s no sync\n", __func__); 2211 - break; 2212 - case 1: 2213 - dev_err(&skdev->pdev->dev, "%s sync done\n", __func__); 2214 - break; 2215 - default: 2216 - dev_err(&skdev->pdev->dev, "%s sync error\n", __func__); 2217 - } 2218 - 2219 - stop_out: 2220 - skdev->state = SKD_DRVR_STATE_STOPPING; 2221 - spin_unlock_irqrestore(&skdev->lock, flags); 2222 - 2223 - skd_kill_timer(skdev); 2224 - 2225 - spin_lock_irqsave(&skdev->lock, flags); 2226 - skd_disable_interrupts(skdev); 2227 - 2228 - /* ensure all ints on device are cleared */ 2229 - /* soft reset the device to unload with a clean slate */ 2230 - SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); 2231 - SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL); 2232 - 2233 - spin_unlock_irqrestore(&skdev->lock, flags); 2234 - 2235 - /* poll every 100ms, 1 second timeout */ 2236 - for (i = 0; i < 10; i++) { 2237 - dev_state = 2238 - SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK; 2239 - if (dev_state == FIT_SR_DRIVE_INIT) 2240 - break; 2241 - set_current_state(TASK_INTERRUPTIBLE); 2242 - schedule_timeout(msecs_to_jiffies(100)); 2243 - } 2244 - 2245 - if (dev_state != FIT_SR_DRIVE_INIT) 2246 - dev_err(&skdev->pdev->dev, "%s state error 0x%02x\n", __func__, 2247 - dev_state); 2248 - } 2249 - 2250 - /* assume spinlock is held */ 2251 - static void skd_restart_device(struct skd_device *skdev) 2252 - { 2253 - u32 state; 2254 - 2255 - /* ack all ghost interrupts */ 2256 - SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); 2257 - 2258 - state = SKD_READL(skdev, FIT_STATUS); 2259 - 2260 - dev_dbg(&skdev->pdev->dev, "drive status=0x%x\n", state); 2261 - 2262 - state &= FIT_SR_DRIVE_STATE_MASK; 2263 - skdev->drive_state = state; 2264 - skdev->last_mtd = 0; 2265 - 2266 - skdev->state = SKD_DRVR_STATE_RESTARTING; 2267 - skdev->timer_countdown = SKD_RESTARTING_TIMO; 2268 - 2269 - skd_soft_reset(skdev); 2270 - } 2271 - 2272 - /* assume spinlock is held */ 2273 - static int skd_quiesce_dev(struct skd_device *skdev) 2274 - { 2275 - int rc = 0; 2276 - 2277 - switch (skdev->state) { 2278 - case SKD_DRVR_STATE_BUSY: 2279 - case SKD_DRVR_STATE_BUSY_IMMINENT: 2280 - dev_dbg(&skdev->pdev->dev, "stopping queue\n"); 2281 - blk_mq_stop_hw_queues(skdev->queue); 2282 - break; 2283 - case SKD_DRVR_STATE_ONLINE: 2284 - case SKD_DRVR_STATE_STOPPING: 2285 - case SKD_DRVR_STATE_SYNCING: 2286 - case SKD_DRVR_STATE_PAUSING: 2287 - case SKD_DRVR_STATE_PAUSED: 2288 - case SKD_DRVR_STATE_STARTING: 2289 - case SKD_DRVR_STATE_RESTARTING: 2290 - case SKD_DRVR_STATE_RESUMING: 2291 - default: 2292 - rc = -EINVAL; 2293 - dev_dbg(&skdev->pdev->dev, "state [%d] not implemented\n", 2294 - skdev->state); 2295 - } 2296 - return rc; 2297 - } 2298 - 2299 - /* assume spinlock is held */ 2300 - static int skd_unquiesce_dev(struct skd_device *skdev) 2301 - { 2302 - int prev_driver_state = skdev->state; 2303 - 2304 - skd_log_skdev(skdev, "unquiesce"); 2305 - if (skdev->state == SKD_DRVR_STATE_ONLINE) { 2306 - dev_dbg(&skdev->pdev->dev, "**** device already ONLINE\n"); 2307 - return 0; 2308 - } 2309 - if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) { 2310 - /* 2311 - * If there has been an state change to other than 2312 - * ONLINE, we will rely on controller state change 2313 - * to come back online and restart the queue. 2314 - * The BUSY state means that driver is ready to 2315 - * continue normal processing but waiting for controller 2316 - * to become available. 2317 - */ 2318 - skdev->state = SKD_DRVR_STATE_BUSY; 2319 - dev_dbg(&skdev->pdev->dev, "drive BUSY state\n"); 2320 - return 0; 2321 - } 2322 - 2323 - /* 2324 - * Drive has just come online, driver is either in startup, 2325 - * paused performing a task, or bust waiting for hardware. 2326 - */ 2327 - switch (skdev->state) { 2328 - case SKD_DRVR_STATE_PAUSED: 2329 - case SKD_DRVR_STATE_BUSY: 2330 - case SKD_DRVR_STATE_BUSY_IMMINENT: 2331 - case SKD_DRVR_STATE_BUSY_ERASE: 2332 - case SKD_DRVR_STATE_STARTING: 2333 - case SKD_DRVR_STATE_RESTARTING: 2334 - case SKD_DRVR_STATE_FAULT: 2335 - case SKD_DRVR_STATE_IDLE: 2336 - case SKD_DRVR_STATE_LOAD: 2337 - skdev->state = SKD_DRVR_STATE_ONLINE; 2338 - dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n", 2339 - skd_skdev_state_to_str(prev_driver_state), 2340 - prev_driver_state, skd_skdev_state_to_str(skdev->state), 2341 - skdev->state); 2342 - dev_dbg(&skdev->pdev->dev, 2343 - "**** device ONLINE...starting block queue\n"); 2344 - dev_dbg(&skdev->pdev->dev, "starting queue\n"); 2345 - dev_info(&skdev->pdev->dev, "STEC s1120 ONLINE\n"); 2346 - schedule_work(&skdev->start_queue); 2347 - skdev->gendisk_on = 1; 2348 - wake_up_interruptible(&skdev->waitq); 2349 - break; 2350 - 2351 - case SKD_DRVR_STATE_DISAPPEARED: 2352 - default: 2353 - dev_dbg(&skdev->pdev->dev, 2354 - "**** driver state %d, not implemented\n", 2355 - skdev->state); 2356 - return -EBUSY; 2357 - } 2358 - return 0; 2359 - } 2360 - 2361 - /* 2362 - ***************************************************************************** 2363 - * PCIe MSI/MSI-X INTERRUPT HANDLERS 2364 - ***************************************************************************** 2365 - */ 2366 - 2367 - static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data) 2368 - { 2369 - struct skd_device *skdev = skd_host_data; 2370 - unsigned long flags; 2371 - 2372 - spin_lock_irqsave(&skdev->lock, flags); 2373 - dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", 2374 - SKD_READL(skdev, FIT_INT_STATUS_HOST)); 2375 - dev_err(&skdev->pdev->dev, "MSIX reserved irq %d = 0x%x\n", irq, 2376 - SKD_READL(skdev, FIT_INT_STATUS_HOST)); 2377 - SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST); 2378 - spin_unlock_irqrestore(&skdev->lock, flags); 2379 - return IRQ_HANDLED; 2380 - } 2381 - 2382 - static irqreturn_t skd_statec_isr(int irq, void *skd_host_data) 2383 - { 2384 - struct skd_device *skdev = skd_host_data; 2385 - unsigned long flags; 2386 - 2387 - spin_lock_irqsave(&skdev->lock, flags); 2388 - dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", 2389 - SKD_READL(skdev, FIT_INT_STATUS_HOST)); 2390 - SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST); 2391 - skd_isr_fwstate(skdev); 2392 - spin_unlock_irqrestore(&skdev->lock, flags); 2393 - return IRQ_HANDLED; 2394 - } 2395 - 2396 - static irqreturn_t skd_comp_q(int irq, void *skd_host_data) 2397 - { 2398 - struct skd_device *skdev = skd_host_data; 2399 - unsigned long flags; 2400 - int flush_enqueued = 0; 2401 - int deferred; 2402 - 2403 - spin_lock_irqsave(&skdev->lock, flags); 2404 - dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", 2405 - SKD_READL(skdev, FIT_INT_STATUS_HOST)); 2406 - SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST); 2407 - deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit, 2408 - &flush_enqueued); 2409 - if (flush_enqueued) 2410 - schedule_work(&skdev->start_queue); 2411 - 2412 - if (deferred) 2413 - schedule_work(&skdev->completion_worker); 2414 - else if (!flush_enqueued) 2415 - schedule_work(&skdev->start_queue); 2416 - 2417 - spin_unlock_irqrestore(&skdev->lock, flags); 2418 - 2419 - return IRQ_HANDLED; 2420 - } 2421 - 2422 - static irqreturn_t skd_msg_isr(int irq, void *skd_host_data) 2423 - { 2424 - struct skd_device *skdev = skd_host_data; 2425 - unsigned long flags; 2426 - 2427 - spin_lock_irqsave(&skdev->lock, flags); 2428 - dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", 2429 - SKD_READL(skdev, FIT_INT_STATUS_HOST)); 2430 - SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST); 2431 - skd_isr_msg_from_dev(skdev); 2432 - spin_unlock_irqrestore(&skdev->lock, flags); 2433 - return IRQ_HANDLED; 2434 - } 2435 - 2436 - static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data) 2437 - { 2438 - struct skd_device *skdev = skd_host_data; 2439 - unsigned long flags; 2440 - 2441 - spin_lock_irqsave(&skdev->lock, flags); 2442 - dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", 2443 - SKD_READL(skdev, FIT_INT_STATUS_HOST)); 2444 - SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST); 2445 - spin_unlock_irqrestore(&skdev->lock, flags); 2446 - return IRQ_HANDLED; 2447 - } 2448 - 2449 - /* 2450 - ***************************************************************************** 2451 - * PCIe MSI/MSI-X SETUP 2452 - ***************************************************************************** 2453 - */ 2454 - 2455 - struct skd_msix_entry { 2456 - char isr_name[30]; 2457 - }; 2458 - 2459 - struct skd_init_msix_entry { 2460 - const char *name; 2461 - irq_handler_t handler; 2462 - }; 2463 - 2464 - #define SKD_MAX_MSIX_COUNT 13 2465 - #define SKD_MIN_MSIX_COUNT 7 2466 - #define SKD_BASE_MSIX_IRQ 4 2467 - 2468 - static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = { 2469 - { "(DMA 0)", skd_reserved_isr }, 2470 - { "(DMA 1)", skd_reserved_isr }, 2471 - { "(DMA 2)", skd_reserved_isr }, 2472 - { "(DMA 3)", skd_reserved_isr }, 2473 - { "(State Change)", skd_statec_isr }, 2474 - { "(COMPL_Q)", skd_comp_q }, 2475 - { "(MSG)", skd_msg_isr }, 2476 - { "(Reserved)", skd_reserved_isr }, 2477 - { "(Reserved)", skd_reserved_isr }, 2478 - { "(Queue Full 0)", skd_qfull_isr }, 2479 - { "(Queue Full 1)", skd_qfull_isr }, 2480 - { "(Queue Full 2)", skd_qfull_isr }, 2481 - { "(Queue Full 3)", skd_qfull_isr }, 2482 - }; 2483 - 2484 - static int skd_acquire_msix(struct skd_device *skdev) 2485 - { 2486 - int i, rc; 2487 - struct pci_dev *pdev = skdev->pdev; 2488 - 2489 - rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT, 2490 - PCI_IRQ_MSIX); 2491 - if (rc < 0) { 2492 - dev_err(&skdev->pdev->dev, "failed to enable MSI-X %d\n", rc); 2493 - goto out; 2494 - } 2495 - 2496 - skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT, 2497 - sizeof(struct skd_msix_entry), GFP_KERNEL); 2498 - if (!skdev->msix_entries) { 2499 - rc = -ENOMEM; 2500 - dev_err(&skdev->pdev->dev, "msix table allocation error\n"); 2501 - goto out; 2502 - } 2503 - 2504 - /* Enable MSI-X vectors for the base queue */ 2505 - for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) { 2506 - struct skd_msix_entry *qentry = &skdev->msix_entries[i]; 2507 - 2508 - snprintf(qentry->isr_name, sizeof(qentry->isr_name), 2509 - "%s%d-msix %s", DRV_NAME, skdev->devno, 2510 - msix_entries[i].name); 2511 - 2512 - rc = devm_request_irq(&skdev->pdev->dev, 2513 - pci_irq_vector(skdev->pdev, i), 2514 - msix_entries[i].handler, 0, 2515 - qentry->isr_name, skdev); 2516 - if (rc) { 2517 - dev_err(&skdev->pdev->dev, 2518 - "Unable to register(%d) MSI-X handler %d: %s\n", 2519 - rc, i, qentry->isr_name); 2520 - goto msix_out; 2521 - } 2522 - } 2523 - 2524 - dev_dbg(&skdev->pdev->dev, "%d msix irq(s) enabled\n", 2525 - SKD_MAX_MSIX_COUNT); 2526 - return 0; 2527 - 2528 - msix_out: 2529 - while (--i >= 0) 2530 - devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev); 2531 - out: 2532 - kfree(skdev->msix_entries); 2533 - skdev->msix_entries = NULL; 2534 - return rc; 2535 - } 2536 - 2537 - static int skd_acquire_irq(struct skd_device *skdev) 2538 - { 2539 - struct pci_dev *pdev = skdev->pdev; 2540 - unsigned int irq_flag = PCI_IRQ_LEGACY; 2541 - int rc; 2542 - 2543 - if (skd_isr_type == SKD_IRQ_MSIX) { 2544 - rc = skd_acquire_msix(skdev); 2545 - if (!rc) 2546 - return 0; 2547 - 2548 - dev_err(&skdev->pdev->dev, 2549 - "failed to enable MSI-X, re-trying with MSI %d\n", rc); 2550 - } 2551 - 2552 - snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME, 2553 - skdev->devno); 2554 - 2555 - if (skd_isr_type != SKD_IRQ_LEGACY) 2556 - irq_flag |= PCI_IRQ_MSI; 2557 - rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag); 2558 - if (rc < 0) { 2559 - dev_err(&skdev->pdev->dev, 2560 - "failed to allocate the MSI interrupt %d\n", rc); 2561 - return rc; 2562 - } 2563 - 2564 - rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, 2565 - pdev->msi_enabled ? 0 : IRQF_SHARED, 2566 - skdev->isr_name, skdev); 2567 - if (rc) { 2568 - pci_free_irq_vectors(pdev); 2569 - dev_err(&skdev->pdev->dev, "failed to allocate interrupt %d\n", 2570 - rc); 2571 - return rc; 2572 - } 2573 - 2574 - return 0; 2575 - } 2576 - 2577 - static void skd_release_irq(struct skd_device *skdev) 2578 - { 2579 - struct pci_dev *pdev = skdev->pdev; 2580 - 2581 - if (skdev->msix_entries) { 2582 - int i; 2583 - 2584 - for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) { 2585 - devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), 2586 - skdev); 2587 - } 2588 - 2589 - kfree(skdev->msix_entries); 2590 - skdev->msix_entries = NULL; 2591 - } else { 2592 - devm_free_irq(&pdev->dev, pdev->irq, skdev); 2593 - } 2594 - 2595 - pci_free_irq_vectors(pdev); 2596 - } 2597 - 2598 - /* 2599 - ***************************************************************************** 2600 - * CONSTRUCT 2601 - ***************************************************************************** 2602 - */ 2603 - 2604 - static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s, 2605 - dma_addr_t *dma_handle, gfp_t gfp, 2606 - enum dma_data_direction dir) 2607 - { 2608 - struct device *dev = &skdev->pdev->dev; 2609 - void *buf; 2610 - 2611 - buf = kmem_cache_alloc(s, gfp); 2612 - if (!buf) 2613 - return NULL; 2614 - *dma_handle = dma_map_single(dev, buf, 2615 - kmem_cache_size(s), dir); 2616 - if (dma_mapping_error(dev, *dma_handle)) { 2617 - kmem_cache_free(s, buf); 2618 - buf = NULL; 2619 - } 2620 - return buf; 2621 - } 2622 - 2623 - static void skd_free_dma(struct skd_device *skdev, struct kmem_cache *s, 2624 - void *vaddr, dma_addr_t dma_handle, 2625 - enum dma_data_direction dir) 2626 - { 2627 - if (!vaddr) 2628 - return; 2629 - 2630 - dma_unmap_single(&skdev->pdev->dev, dma_handle, 2631 - kmem_cache_size(s), dir); 2632 - kmem_cache_free(s, vaddr); 2633 - } 2634 - 2635 - static int skd_cons_skcomp(struct skd_device *skdev) 2636 - { 2637 - int rc = 0; 2638 - struct fit_completion_entry_v1 *skcomp; 2639 - 2640 - dev_dbg(&skdev->pdev->dev, 2641 - "comp pci_alloc, total bytes %zd entries %d\n", 2642 - SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY); 2643 - 2644 - skcomp = dma_alloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE, 2645 - &skdev->cq_dma_address, GFP_KERNEL); 2646 - 2647 - if (skcomp == NULL) { 2648 - rc = -ENOMEM; 2649 - goto err_out; 2650 - } 2651 - 2652 - skdev->skcomp_table = skcomp; 2653 - skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp + 2654 - sizeof(*skcomp) * 2655 - SKD_N_COMPLETION_ENTRY); 2656 - 2657 - err_out: 2658 - return rc; 2659 - } 2660 - 2661 - static int skd_cons_skmsg(struct skd_device *skdev) 2662 - { 2663 - int rc = 0; 2664 - u32 i; 2665 - 2666 - dev_dbg(&skdev->pdev->dev, 2667 - "skmsg_table kcalloc, struct %lu, count %u total %lu\n", 2668 - sizeof(struct skd_fitmsg_context), skdev->num_fitmsg_context, 2669 - sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context); 2670 - 2671 - skdev->skmsg_table = kcalloc(skdev->num_fitmsg_context, 2672 - sizeof(struct skd_fitmsg_context), 2673 - GFP_KERNEL); 2674 - if (skdev->skmsg_table == NULL) { 2675 - rc = -ENOMEM; 2676 - goto err_out; 2677 - } 2678 - 2679 - for (i = 0; i < skdev->num_fitmsg_context; i++) { 2680 - struct skd_fitmsg_context *skmsg; 2681 - 2682 - skmsg = &skdev->skmsg_table[i]; 2683 - 2684 - skmsg->id = i + SKD_ID_FIT_MSG; 2685 - 2686 - skmsg->msg_buf = dma_alloc_coherent(&skdev->pdev->dev, 2687 - SKD_N_FITMSG_BYTES, 2688 - &skmsg->mb_dma_address, 2689 - GFP_KERNEL); 2690 - if (skmsg->msg_buf == NULL) { 2691 - rc = -ENOMEM; 2692 - goto err_out; 2693 - } 2694 - 2695 - WARN(((uintptr_t)skmsg->msg_buf | skmsg->mb_dma_address) & 2696 - (FIT_QCMD_ALIGN - 1), 2697 - "not aligned: msg_buf %p mb_dma_address %pad\n", 2698 - skmsg->msg_buf, &skmsg->mb_dma_address); 2699 - } 2700 - 2701 - err_out: 2702 - return rc; 2703 - } 2704 - 2705 - static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev, 2706 - u32 n_sg, 2707 - dma_addr_t *ret_dma_addr) 2708 - { 2709 - struct fit_sg_descriptor *sg_list; 2710 - 2711 - sg_list = skd_alloc_dma(skdev, skdev->sglist_cache, ret_dma_addr, 2712 - GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE); 2713 - 2714 - if (sg_list != NULL) { 2715 - uint64_t dma_address = *ret_dma_addr; 2716 - u32 i; 2717 - 2718 - for (i = 0; i < n_sg - 1; i++) { 2719 - uint64_t ndp_off; 2720 - ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor); 2721 - 2722 - sg_list[i].next_desc_ptr = dma_address + ndp_off; 2723 - } 2724 - sg_list[i].next_desc_ptr = 0LL; 2725 - } 2726 - 2727 - return sg_list; 2728 - } 2729 - 2730 - static void skd_free_sg_list(struct skd_device *skdev, 2731 - struct fit_sg_descriptor *sg_list, 2732 - dma_addr_t dma_addr) 2733 - { 2734 - if (WARN_ON_ONCE(!sg_list)) 2735 - return; 2736 - 2737 - skd_free_dma(skdev, skdev->sglist_cache, sg_list, dma_addr, 2738 - DMA_TO_DEVICE); 2739 - } 2740 - 2741 - static int skd_init_request(struct blk_mq_tag_set *set, struct request *rq, 2742 - unsigned int hctx_idx, unsigned int numa_node) 2743 - { 2744 - struct skd_device *skdev = set->driver_data; 2745 - struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq); 2746 - 2747 - skreq->state = SKD_REQ_STATE_IDLE; 2748 - skreq->sg = (void *)(skreq + 1); 2749 - sg_init_table(skreq->sg, skd_sgs_per_request); 2750 - skreq->sksg_list = skd_cons_sg_list(skdev, skd_sgs_per_request, 2751 - &skreq->sksg_dma_address); 2752 - 2753 - return skreq->sksg_list ? 0 : -ENOMEM; 2754 - } 2755 - 2756 - static void skd_exit_request(struct blk_mq_tag_set *set, struct request *rq, 2757 - unsigned int hctx_idx) 2758 - { 2759 - struct skd_device *skdev = set->driver_data; 2760 - struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq); 2761 - 2762 - skd_free_sg_list(skdev, skreq->sksg_list, skreq->sksg_dma_address); 2763 - } 2764 - 2765 - static int skd_cons_sksb(struct skd_device *skdev) 2766 - { 2767 - int rc = 0; 2768 - struct skd_special_context *skspcl; 2769 - 2770 - skspcl = &skdev->internal_skspcl; 2771 - 2772 - skspcl->req.id = 0 + SKD_ID_INTERNAL; 2773 - skspcl->req.state = SKD_REQ_STATE_IDLE; 2774 - 2775 - skspcl->data_buf = skd_alloc_dma(skdev, skdev->databuf_cache, 2776 - &skspcl->db_dma_address, 2777 - GFP_DMA | __GFP_ZERO, 2778 - DMA_BIDIRECTIONAL); 2779 - if (skspcl->data_buf == NULL) { 2780 - rc = -ENOMEM; 2781 - goto err_out; 2782 - } 2783 - 2784 - skspcl->msg_buf = skd_alloc_dma(skdev, skdev->msgbuf_cache, 2785 - &skspcl->mb_dma_address, 2786 - GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE); 2787 - if (skspcl->msg_buf == NULL) { 2788 - rc = -ENOMEM; 2789 - goto err_out; 2790 - } 2791 - 2792 - skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1, 2793 - &skspcl->req.sksg_dma_address); 2794 - if (skspcl->req.sksg_list == NULL) { 2795 - rc = -ENOMEM; 2796 - goto err_out; 2797 - } 2798 - 2799 - if (!skd_format_internal_skspcl(skdev)) { 2800 - rc = -EINVAL; 2801 - goto err_out; 2802 - } 2803 - 2804 - err_out: 2805 - return rc; 2806 - } 2807 - 2808 - static const struct blk_mq_ops skd_mq_ops = { 2809 - .queue_rq = skd_mq_queue_rq, 2810 - .complete = skd_complete_rq, 2811 - .timeout = skd_timed_out, 2812 - .init_request = skd_init_request, 2813 - .exit_request = skd_exit_request, 2814 - }; 2815 - 2816 - static int skd_cons_disk(struct skd_device *skdev) 2817 - { 2818 - int rc = 0; 2819 - struct gendisk *disk; 2820 - struct request_queue *q; 2821 - unsigned long flags; 2822 - 2823 - disk = alloc_disk(SKD_MINORS_PER_DEVICE); 2824 - if (!disk) { 2825 - rc = -ENOMEM; 2826 - goto err_out; 2827 - } 2828 - 2829 - skdev->disk = disk; 2830 - sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno); 2831 - 2832 - disk->major = skdev->major; 2833 - disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE; 2834 - disk->fops = &skd_blockdev_ops; 2835 - disk->private_data = skdev; 2836 - 2837 - memset(&skdev->tag_set, 0, sizeof(skdev->tag_set)); 2838 - skdev->tag_set.ops = &skd_mq_ops; 2839 - skdev->tag_set.nr_hw_queues = 1; 2840 - skdev->tag_set.queue_depth = skd_max_queue_depth; 2841 - skdev->tag_set.cmd_size = sizeof(struct skd_request_context) + 2842 - skdev->sgs_per_request * sizeof(struct scatterlist); 2843 - skdev->tag_set.numa_node = NUMA_NO_NODE; 2844 - skdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | 2845 - BLK_ALLOC_POLICY_TO_MQ_FLAG(BLK_TAG_ALLOC_FIFO); 2846 - skdev->tag_set.driver_data = skdev; 2847 - rc = blk_mq_alloc_tag_set(&skdev->tag_set); 2848 - if (rc) 2849 - goto err_out; 2850 - q = blk_mq_init_queue(&skdev->tag_set); 2851 - if (IS_ERR(q)) { 2852 - blk_mq_free_tag_set(&skdev->tag_set); 2853 - rc = PTR_ERR(q); 2854 - goto err_out; 2855 - } 2856 - q->queuedata = skdev; 2857 - 2858 - skdev->queue = q; 2859 - disk->queue = q; 2860 - 2861 - blk_queue_write_cache(q, true, true); 2862 - blk_queue_max_segments(q, skdev->sgs_per_request); 2863 - blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS); 2864 - 2865 - /* set optimal I/O size to 8KB */ 2866 - blk_queue_io_opt(q, 8192); 2867 - 2868 - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 2869 - blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); 2870 - 2871 - blk_queue_rq_timeout(q, 8 * HZ); 2872 - 2873 - spin_lock_irqsave(&skdev->lock, flags); 2874 - dev_dbg(&skdev->pdev->dev, "stopping queue\n"); 2875 - blk_mq_stop_hw_queues(skdev->queue); 2876 - spin_unlock_irqrestore(&skdev->lock, flags); 2877 - 2878 - err_out: 2879 - return rc; 2880 - } 2881 - 2882 - #define SKD_N_DEV_TABLE 16u 2883 - static u32 skd_next_devno; 2884 - 2885 - static struct skd_device *skd_construct(struct pci_dev *pdev) 2886 - { 2887 - struct skd_device *skdev; 2888 - int blk_major = skd_major; 2889 - size_t size; 2890 - int rc; 2891 - 2892 - skdev = kzalloc(sizeof(*skdev), GFP_KERNEL); 2893 - 2894 - if (!skdev) { 2895 - dev_err(&pdev->dev, "memory alloc failure\n"); 2896 - return NULL; 2897 - } 2898 - 2899 - skdev->state = SKD_DRVR_STATE_LOAD; 2900 - skdev->pdev = pdev; 2901 - skdev->devno = skd_next_devno++; 2902 - skdev->major = blk_major; 2903 - skdev->dev_max_queue_depth = 0; 2904 - 2905 - skdev->num_req_context = skd_max_queue_depth; 2906 - skdev->num_fitmsg_context = skd_max_queue_depth; 2907 - skdev->cur_max_queue_depth = 1; 2908 - skdev->queue_low_water_mark = 1; 2909 - skdev->proto_ver = 99; 2910 - skdev->sgs_per_request = skd_sgs_per_request; 2911 - skdev->dbg_level = skd_dbg_level; 2912 - 2913 - spin_lock_init(&skdev->lock); 2914 - 2915 - INIT_WORK(&skdev->start_queue, skd_start_queue); 2916 - INIT_WORK(&skdev->completion_worker, skd_completion_worker); 2917 - 2918 - size = max(SKD_N_FITMSG_BYTES, SKD_N_SPECIAL_FITMSG_BYTES); 2919 - skdev->msgbuf_cache = kmem_cache_create("skd-msgbuf", size, 0, 2920 - SLAB_HWCACHE_ALIGN, NULL); 2921 - if (!skdev->msgbuf_cache) 2922 - goto err_out; 2923 - WARN_ONCE(kmem_cache_size(skdev->msgbuf_cache) < size, 2924 - "skd-msgbuf: %d < %zd\n", 2925 - kmem_cache_size(skdev->msgbuf_cache), size); 2926 - size = skd_sgs_per_request * sizeof(struct fit_sg_descriptor); 2927 - skdev->sglist_cache = kmem_cache_create("skd-sglist", size, 0, 2928 - SLAB_HWCACHE_ALIGN, NULL); 2929 - if (!skdev->sglist_cache) 2930 - goto err_out; 2931 - WARN_ONCE(kmem_cache_size(skdev->sglist_cache) < size, 2932 - "skd-sglist: %d < %zd\n", 2933 - kmem_cache_size(skdev->sglist_cache), size); 2934 - size = SKD_N_INTERNAL_BYTES; 2935 - skdev->databuf_cache = kmem_cache_create("skd-databuf", size, 0, 2936 - SLAB_HWCACHE_ALIGN, NULL); 2937 - if (!skdev->databuf_cache) 2938 - goto err_out; 2939 - WARN_ONCE(kmem_cache_size(skdev->databuf_cache) < size, 2940 - "skd-databuf: %d < %zd\n", 2941 - kmem_cache_size(skdev->databuf_cache), size); 2942 - 2943 - dev_dbg(&skdev->pdev->dev, "skcomp\n"); 2944 - rc = skd_cons_skcomp(skdev); 2945 - if (rc < 0) 2946 - goto err_out; 2947 - 2948 - dev_dbg(&skdev->pdev->dev, "skmsg\n"); 2949 - rc = skd_cons_skmsg(skdev); 2950 - if (rc < 0) 2951 - goto err_out; 2952 - 2953 - dev_dbg(&skdev->pdev->dev, "sksb\n"); 2954 - rc = skd_cons_sksb(skdev); 2955 - if (rc < 0) 2956 - goto err_out; 2957 - 2958 - dev_dbg(&skdev->pdev->dev, "disk\n"); 2959 - rc = skd_cons_disk(skdev); 2960 - if (rc < 0) 2961 - goto err_out; 2962 - 2963 - dev_dbg(&skdev->pdev->dev, "VICTORY\n"); 2964 - return skdev; 2965 - 2966 - err_out: 2967 - dev_dbg(&skdev->pdev->dev, "construct failed\n"); 2968 - skd_destruct(skdev); 2969 - return NULL; 2970 - } 2971 - 2972 - /* 2973 - ***************************************************************************** 2974 - * DESTRUCT (FREE) 2975 - ***************************************************************************** 2976 - */ 2977 - 2978 - static void skd_free_skcomp(struct skd_device *skdev) 2979 - { 2980 - if (skdev->skcomp_table) 2981 - dma_free_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE, 2982 - skdev->skcomp_table, skdev->cq_dma_address); 2983 - 2984 - skdev->skcomp_table = NULL; 2985 - skdev->cq_dma_address = 0; 2986 - } 2987 - 2988 - static void skd_free_skmsg(struct skd_device *skdev) 2989 - { 2990 - u32 i; 2991 - 2992 - if (skdev->skmsg_table == NULL) 2993 - return; 2994 - 2995 - for (i = 0; i < skdev->num_fitmsg_context; i++) { 2996 - struct skd_fitmsg_context *skmsg; 2997 - 2998 - skmsg = &skdev->skmsg_table[i]; 2999 - 3000 - if (skmsg->msg_buf != NULL) { 3001 - dma_free_coherent(&skdev->pdev->dev, SKD_N_FITMSG_BYTES, 3002 - skmsg->msg_buf, 3003 - skmsg->mb_dma_address); 3004 - } 3005 - skmsg->msg_buf = NULL; 3006 - skmsg->mb_dma_address = 0; 3007 - } 3008 - 3009 - kfree(skdev->skmsg_table); 3010 - skdev->skmsg_table = NULL; 3011 - } 3012 - 3013 - static void skd_free_sksb(struct skd_device *skdev) 3014 - { 3015 - struct skd_special_context *skspcl = &skdev->internal_skspcl; 3016 - 3017 - skd_free_dma(skdev, skdev->databuf_cache, skspcl->data_buf, 3018 - skspcl->db_dma_address, DMA_BIDIRECTIONAL); 3019 - 3020 - skspcl->data_buf = NULL; 3021 - skspcl->db_dma_address = 0; 3022 - 3023 - skd_free_dma(skdev, skdev->msgbuf_cache, skspcl->msg_buf, 3024 - skspcl->mb_dma_address, DMA_TO_DEVICE); 3025 - 3026 - skspcl->msg_buf = NULL; 3027 - skspcl->mb_dma_address = 0; 3028 - 3029 - skd_free_sg_list(skdev, skspcl->req.sksg_list, 3030 - skspcl->req.sksg_dma_address); 3031 - 3032 - skspcl->req.sksg_list = NULL; 3033 - skspcl->req.sksg_dma_address = 0; 3034 - } 3035 - 3036 - static void skd_free_disk(struct skd_device *skdev) 3037 - { 3038 - struct gendisk *disk = skdev->disk; 3039 - 3040 - if (disk && (disk->flags & GENHD_FL_UP)) 3041 - del_gendisk(disk); 3042 - 3043 - if (skdev->queue) { 3044 - blk_cleanup_queue(skdev->queue); 3045 - skdev->queue = NULL; 3046 - if (disk) 3047 - disk->queue = NULL; 3048 - } 3049 - 3050 - if (skdev->tag_set.tags) 3051 - blk_mq_free_tag_set(&skdev->tag_set); 3052 - 3053 - put_disk(disk); 3054 - skdev->disk = NULL; 3055 - } 3056 - 3057 - static void skd_destruct(struct skd_device *skdev) 3058 - { 3059 - if (skdev == NULL) 3060 - return; 3061 - 3062 - cancel_work_sync(&skdev->start_queue); 3063 - 3064 - dev_dbg(&skdev->pdev->dev, "disk\n"); 3065 - skd_free_disk(skdev); 3066 - 3067 - dev_dbg(&skdev->pdev->dev, "sksb\n"); 3068 - skd_free_sksb(skdev); 3069 - 3070 - dev_dbg(&skdev->pdev->dev, "skmsg\n"); 3071 - skd_free_skmsg(skdev); 3072 - 3073 - dev_dbg(&skdev->pdev->dev, "skcomp\n"); 3074 - skd_free_skcomp(skdev); 3075 - 3076 - kmem_cache_destroy(skdev->databuf_cache); 3077 - kmem_cache_destroy(skdev->sglist_cache); 3078 - kmem_cache_destroy(skdev->msgbuf_cache); 3079 - 3080 - dev_dbg(&skdev->pdev->dev, "skdev\n"); 3081 - kfree(skdev); 3082 - } 3083 - 3084 - /* 3085 - ***************************************************************************** 3086 - * BLOCK DEVICE (BDEV) GLUE 3087 - ***************************************************************************** 3088 - */ 3089 - 3090 - static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo) 3091 - { 3092 - struct skd_device *skdev; 3093 - u64 capacity; 3094 - 3095 - skdev = bdev->bd_disk->private_data; 3096 - 3097 - dev_dbg(&skdev->pdev->dev, "%s: CMD[%s] getgeo device\n", 3098 - bdev->bd_disk->disk_name, current->comm); 3099 - 3100 - if (skdev->read_cap_is_valid) { 3101 - capacity = get_capacity(skdev->disk); 3102 - geo->heads = 64; 3103 - geo->sectors = 255; 3104 - geo->cylinders = (capacity) / (255 * 64); 3105 - 3106 - return 0; 3107 - } 3108 - return -EIO; 3109 - } 3110 - 3111 - static int skd_bdev_attach(struct device *parent, struct skd_device *skdev) 3112 - { 3113 - dev_dbg(&skdev->pdev->dev, "add_disk\n"); 3114 - device_add_disk(parent, skdev->disk, NULL); 3115 - return 0; 3116 - } 3117 - 3118 - static const struct block_device_operations skd_blockdev_ops = { 3119 - .owner = THIS_MODULE, 3120 - .getgeo = skd_bdev_getgeo, 3121 - }; 3122 - 3123 - /* 3124 - ***************************************************************************** 3125 - * PCIe DRIVER GLUE 3126 - ***************************************************************************** 3127 - */ 3128 - 3129 - static const struct pci_device_id skd_pci_tbl[] = { 3130 - { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120, 3131 - PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, 3132 - { 0 } /* terminate list */ 3133 - }; 3134 - 3135 - MODULE_DEVICE_TABLE(pci, skd_pci_tbl); 3136 - 3137 - static char *skd_pci_info(struct skd_device *skdev, char *str) 3138 - { 3139 - int pcie_reg; 3140 - 3141 - strcpy(str, "PCIe ("); 3142 - pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP); 3143 - 3144 - if (pcie_reg) { 3145 - 3146 - char lwstr[6]; 3147 - uint16_t pcie_lstat, lspeed, lwidth; 3148 - 3149 - pcie_reg += 0x12; 3150 - pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat); 3151 - lspeed = pcie_lstat & (0xF); 3152 - lwidth = (pcie_lstat & 0x3F0) >> 4; 3153 - 3154 - if (lspeed == 1) 3155 - strcat(str, "2.5GT/s "); 3156 - else if (lspeed == 2) 3157 - strcat(str, "5.0GT/s "); 3158 - else 3159 - strcat(str, "<unknown> "); 3160 - snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth); 3161 - strcat(str, lwstr); 3162 - } 3163 - return str; 3164 - } 3165 - 3166 - static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3167 - { 3168 - int i; 3169 - int rc = 0; 3170 - char pci_str[32]; 3171 - struct skd_device *skdev; 3172 - 3173 - dev_dbg(&pdev->dev, "vendor=%04X device=%04x\n", pdev->vendor, 3174 - pdev->device); 3175 - 3176 - rc = pci_enable_device(pdev); 3177 - if (rc) 3178 - return rc; 3179 - rc = pci_request_regions(pdev, DRV_NAME); 3180 - if (rc) 3181 - goto err_out; 3182 - rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3183 - if (rc) 3184 - rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3185 - if (rc) { 3186 - dev_err(&pdev->dev, "DMA mask error %d\n", rc); 3187 - goto err_out_regions; 3188 - } 3189 - 3190 - if (!skd_major) { 3191 - rc = register_blkdev(0, DRV_NAME); 3192 - if (rc < 0) 3193 - goto err_out_regions; 3194 - BUG_ON(!rc); 3195 - skd_major = rc; 3196 - } 3197 - 3198 - skdev = skd_construct(pdev); 3199 - if (skdev == NULL) { 3200 - rc = -ENOMEM; 3201 - goto err_out_regions; 3202 - } 3203 - 3204 - skd_pci_info(skdev, pci_str); 3205 - dev_info(&pdev->dev, "%s 64bit\n", pci_str); 3206 - 3207 - pci_set_master(pdev); 3208 - rc = pci_enable_pcie_error_reporting(pdev); 3209 - if (rc) { 3210 - dev_err(&pdev->dev, 3211 - "bad enable of PCIe error reporting rc=%d\n", rc); 3212 - skdev->pcie_error_reporting_is_enabled = 0; 3213 - } else 3214 - skdev->pcie_error_reporting_is_enabled = 1; 3215 - 3216 - pci_set_drvdata(pdev, skdev); 3217 - 3218 - for (i = 0; i < SKD_MAX_BARS; i++) { 3219 - skdev->mem_phys[i] = pci_resource_start(pdev, i); 3220 - skdev->mem_size[i] = (u32)pci_resource_len(pdev, i); 3221 - skdev->mem_map[i] = ioremap(skdev->mem_phys[i], 3222 - skdev->mem_size[i]); 3223 - if (!skdev->mem_map[i]) { 3224 - dev_err(&pdev->dev, 3225 - "Unable to map adapter memory!\n"); 3226 - rc = -ENODEV; 3227 - goto err_out_iounmap; 3228 - } 3229 - dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n", 3230 - skdev->mem_map[i], (uint64_t)skdev->mem_phys[i], 3231 - skdev->mem_size[i]); 3232 - } 3233 - 3234 - rc = skd_acquire_irq(skdev); 3235 - if (rc) { 3236 - dev_err(&pdev->dev, "interrupt resource error %d\n", rc); 3237 - goto err_out_iounmap; 3238 - } 3239 - 3240 - rc = skd_start_timer(skdev); 3241 - if (rc) 3242 - goto err_out_timer; 3243 - 3244 - init_waitqueue_head(&skdev->waitq); 3245 - 3246 - skd_start_device(skdev); 3247 - 3248 - rc = wait_event_interruptible_timeout(skdev->waitq, 3249 - (skdev->gendisk_on), 3250 - (SKD_START_WAIT_SECONDS * HZ)); 3251 - if (skdev->gendisk_on > 0) { 3252 - /* device came on-line after reset */ 3253 - skd_bdev_attach(&pdev->dev, skdev); 3254 - rc = 0; 3255 - } else { 3256 - /* we timed out, something is wrong with the device, 3257 - don't add the disk structure */ 3258 - dev_err(&pdev->dev, "error: waiting for s1120 timed out %d!\n", 3259 - rc); 3260 - /* in case of no error; we timeout with ENXIO */ 3261 - if (!rc) 3262 - rc = -ENXIO; 3263 - goto err_out_timer; 3264 - } 3265 - 3266 - return rc; 3267 - 3268 - err_out_timer: 3269 - skd_stop_device(skdev); 3270 - skd_release_irq(skdev); 3271 - 3272 - err_out_iounmap: 3273 - for (i = 0; i < SKD_MAX_BARS; i++) 3274 - if (skdev->mem_map[i]) 3275 - iounmap(skdev->mem_map[i]); 3276 - 3277 - if (skdev->pcie_error_reporting_is_enabled) 3278 - pci_disable_pcie_error_reporting(pdev); 3279 - 3280 - skd_destruct(skdev); 3281 - 3282 - err_out_regions: 3283 - pci_release_regions(pdev); 3284 - 3285 - err_out: 3286 - pci_disable_device(pdev); 3287 - pci_set_drvdata(pdev, NULL); 3288 - return rc; 3289 - } 3290 - 3291 - static void skd_pci_remove(struct pci_dev *pdev) 3292 - { 3293 - int i; 3294 - struct skd_device *skdev; 3295 - 3296 - skdev = pci_get_drvdata(pdev); 3297 - if (!skdev) { 3298 - dev_err(&pdev->dev, "no device data for PCI\n"); 3299 - return; 3300 - } 3301 - skd_stop_device(skdev); 3302 - skd_release_irq(skdev); 3303 - 3304 - for (i = 0; i < SKD_MAX_BARS; i++) 3305 - if (skdev->mem_map[i]) 3306 - iounmap(skdev->mem_map[i]); 3307 - 3308 - if (skdev->pcie_error_reporting_is_enabled) 3309 - pci_disable_pcie_error_reporting(pdev); 3310 - 3311 - skd_destruct(skdev); 3312 - 3313 - pci_release_regions(pdev); 3314 - pci_disable_device(pdev); 3315 - pci_set_drvdata(pdev, NULL); 3316 - 3317 - return; 3318 - } 3319 - 3320 - static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state) 3321 - { 3322 - int i; 3323 - struct skd_device *skdev; 3324 - 3325 - skdev = pci_get_drvdata(pdev); 3326 - if (!skdev) { 3327 - dev_err(&pdev->dev, "no device data for PCI\n"); 3328 - return -EIO; 3329 - } 3330 - 3331 - skd_stop_device(skdev); 3332 - 3333 - skd_release_irq(skdev); 3334 - 3335 - for (i = 0; i < SKD_MAX_BARS; i++) 3336 - if (skdev->mem_map[i]) 3337 - iounmap(skdev->mem_map[i]); 3338 - 3339 - if (skdev->pcie_error_reporting_is_enabled) 3340 - pci_disable_pcie_error_reporting(pdev); 3341 - 3342 - pci_release_regions(pdev); 3343 - pci_save_state(pdev); 3344 - pci_disable_device(pdev); 3345 - pci_set_power_state(pdev, pci_choose_state(pdev, state)); 3346 - return 0; 3347 - } 3348 - 3349 - static int skd_pci_resume(struct pci_dev *pdev) 3350 - { 3351 - int i; 3352 - int rc = 0; 3353 - struct skd_device *skdev; 3354 - 3355 - skdev = pci_get_drvdata(pdev); 3356 - if (!skdev) { 3357 - dev_err(&pdev->dev, "no device data for PCI\n"); 3358 - return -1; 3359 - } 3360 - 3361 - pci_set_power_state(pdev, PCI_D0); 3362 - pci_enable_wake(pdev, PCI_D0, 0); 3363 - pci_restore_state(pdev); 3364 - 3365 - rc = pci_enable_device(pdev); 3366 - if (rc) 3367 - return rc; 3368 - rc = pci_request_regions(pdev, DRV_NAME); 3369 - if (rc) 3370 - goto err_out; 3371 - rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3372 - if (rc) 3373 - rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3374 - if (rc) { 3375 - dev_err(&pdev->dev, "DMA mask error %d\n", rc); 3376 - goto err_out_regions; 3377 - } 3378 - 3379 - pci_set_master(pdev); 3380 - rc = pci_enable_pcie_error_reporting(pdev); 3381 - if (rc) { 3382 - dev_err(&pdev->dev, 3383 - "bad enable of PCIe error reporting rc=%d\n", rc); 3384 - skdev->pcie_error_reporting_is_enabled = 0; 3385 - } else 3386 - skdev->pcie_error_reporting_is_enabled = 1; 3387 - 3388 - for (i = 0; i < SKD_MAX_BARS; i++) { 3389 - 3390 - skdev->mem_phys[i] = pci_resource_start(pdev, i); 3391 - skdev->mem_size[i] = (u32)pci_resource_len(pdev, i); 3392 - skdev->mem_map[i] = ioremap(skdev->mem_phys[i], 3393 - skdev->mem_size[i]); 3394 - if (!skdev->mem_map[i]) { 3395 - dev_err(&pdev->dev, "Unable to map adapter memory!\n"); 3396 - rc = -ENODEV; 3397 - goto err_out_iounmap; 3398 - } 3399 - dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n", 3400 - skdev->mem_map[i], (uint64_t)skdev->mem_phys[i], 3401 - skdev->mem_size[i]); 3402 - } 3403 - rc = skd_acquire_irq(skdev); 3404 - if (rc) { 3405 - dev_err(&pdev->dev, "interrupt resource error %d\n", rc); 3406 - goto err_out_iounmap; 3407 - } 3408 - 3409 - rc = skd_start_timer(skdev); 3410 - if (rc) 3411 - goto err_out_timer; 3412 - 3413 - init_waitqueue_head(&skdev->waitq); 3414 - 3415 - skd_start_device(skdev); 3416 - 3417 - return rc; 3418 - 3419 - err_out_timer: 3420 - skd_stop_device(skdev); 3421 - skd_release_irq(skdev); 3422 - 3423 - err_out_iounmap: 3424 - for (i = 0; i < SKD_MAX_BARS; i++) 3425 - if (skdev->mem_map[i]) 3426 - iounmap(skdev->mem_map[i]); 3427 - 3428 - if (skdev->pcie_error_reporting_is_enabled) 3429 - pci_disable_pcie_error_reporting(pdev); 3430 - 3431 - err_out_regions: 3432 - pci_release_regions(pdev); 3433 - 3434 - err_out: 3435 - pci_disable_device(pdev); 3436 - return rc; 3437 - } 3438 - 3439 - static void skd_pci_shutdown(struct pci_dev *pdev) 3440 - { 3441 - struct skd_device *skdev; 3442 - 3443 - dev_err(&pdev->dev, "%s called\n", __func__); 3444 - 3445 - skdev = pci_get_drvdata(pdev); 3446 - if (!skdev) { 3447 - dev_err(&pdev->dev, "no device data for PCI\n"); 3448 - return; 3449 - } 3450 - 3451 - dev_err(&pdev->dev, "calling stop\n"); 3452 - skd_stop_device(skdev); 3453 - } 3454 - 3455 - static struct pci_driver skd_driver = { 3456 - .name = DRV_NAME, 3457 - .id_table = skd_pci_tbl, 3458 - .probe = skd_pci_probe, 3459 - .remove = skd_pci_remove, 3460 - .suspend = skd_pci_suspend, 3461 - .resume = skd_pci_resume, 3462 - .shutdown = skd_pci_shutdown, 3463 - }; 3464 - 3465 - /* 3466 - ***************************************************************************** 3467 - * LOGGING SUPPORT 3468 - ***************************************************************************** 3469 - */ 3470 - 3471 - const char *skd_drive_state_to_str(int state) 3472 - { 3473 - switch (state) { 3474 - case FIT_SR_DRIVE_OFFLINE: 3475 - return "OFFLINE"; 3476 - case FIT_SR_DRIVE_INIT: 3477 - return "INIT"; 3478 - case FIT_SR_DRIVE_ONLINE: 3479 - return "ONLINE"; 3480 - case FIT_SR_DRIVE_BUSY: 3481 - return "BUSY"; 3482 - case FIT_SR_DRIVE_FAULT: 3483 - return "FAULT"; 3484 - case FIT_SR_DRIVE_DEGRADED: 3485 - return "DEGRADED"; 3486 - case FIT_SR_PCIE_LINK_DOWN: 3487 - return "INK_DOWN"; 3488 - case FIT_SR_DRIVE_SOFT_RESET: 3489 - return "SOFT_RESET"; 3490 - case FIT_SR_DRIVE_NEED_FW_DOWNLOAD: 3491 - return "NEED_FW"; 3492 - case FIT_SR_DRIVE_INIT_FAULT: 3493 - return "INIT_FAULT"; 3494 - case FIT_SR_DRIVE_BUSY_SANITIZE: 3495 - return "BUSY_SANITIZE"; 3496 - case FIT_SR_DRIVE_BUSY_ERASE: 3497 - return "BUSY_ERASE"; 3498 - case FIT_SR_DRIVE_FW_BOOTING: 3499 - return "FW_BOOTING"; 3500 - default: 3501 - return "???"; 3502 - } 3503 - } 3504 - 3505 - const char *skd_skdev_state_to_str(enum skd_drvr_state state) 3506 - { 3507 - switch (state) { 3508 - case SKD_DRVR_STATE_LOAD: 3509 - return "LOAD"; 3510 - case SKD_DRVR_STATE_IDLE: 3511 - return "IDLE"; 3512 - case SKD_DRVR_STATE_BUSY: 3513 - return "BUSY"; 3514 - case SKD_DRVR_STATE_STARTING: 3515 - return "STARTING"; 3516 - case SKD_DRVR_STATE_ONLINE: 3517 - return "ONLINE"; 3518 - case SKD_DRVR_STATE_PAUSING: 3519 - return "PAUSING"; 3520 - case SKD_DRVR_STATE_PAUSED: 3521 - return "PAUSED"; 3522 - case SKD_DRVR_STATE_RESTARTING: 3523 - return "RESTARTING"; 3524 - case SKD_DRVR_STATE_RESUMING: 3525 - return "RESUMING"; 3526 - case SKD_DRVR_STATE_STOPPING: 3527 - return "STOPPING"; 3528 - case SKD_DRVR_STATE_SYNCING: 3529 - return "SYNCING"; 3530 - case SKD_DRVR_STATE_FAULT: 3531 - return "FAULT"; 3532 - case SKD_DRVR_STATE_DISAPPEARED: 3533 - return "DISAPPEARED"; 3534 - case SKD_DRVR_STATE_BUSY_ERASE: 3535 - return "BUSY_ERASE"; 3536 - case SKD_DRVR_STATE_BUSY_SANITIZE: 3537 - return "BUSY_SANITIZE"; 3538 - case SKD_DRVR_STATE_BUSY_IMMINENT: 3539 - return "BUSY_IMMINENT"; 3540 - case SKD_DRVR_STATE_WAIT_BOOT: 3541 - return "WAIT_BOOT"; 3542 - 3543 - default: 3544 - return "???"; 3545 - } 3546 - } 3547 - 3548 - static const char *skd_skreq_state_to_str(enum skd_req_state state) 3549 - { 3550 - switch (state) { 3551 - case SKD_REQ_STATE_IDLE: 3552 - return "IDLE"; 3553 - case SKD_REQ_STATE_SETUP: 3554 - return "SETUP"; 3555 - case SKD_REQ_STATE_BUSY: 3556 - return "BUSY"; 3557 - case SKD_REQ_STATE_COMPLETED: 3558 - return "COMPLETED"; 3559 - case SKD_REQ_STATE_TIMEOUT: 3560 - return "TIMEOUT"; 3561 - default: 3562 - return "???"; 3563 - } 3564 - } 3565 - 3566 - static void skd_log_skdev(struct skd_device *skdev, const char *event) 3567 - { 3568 - dev_dbg(&skdev->pdev->dev, "skdev=%p event='%s'\n", skdev, event); 3569 - dev_dbg(&skdev->pdev->dev, " drive_state=%s(%d) driver_state=%s(%d)\n", 3570 - skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, 3571 - skd_skdev_state_to_str(skdev->state), skdev->state); 3572 - dev_dbg(&skdev->pdev->dev, " busy=%d limit=%d dev=%d lowat=%d\n", 3573 - skd_in_flight(skdev), skdev->cur_max_queue_depth, 3574 - skdev->dev_max_queue_depth, skdev->queue_low_water_mark); 3575 - dev_dbg(&skdev->pdev->dev, " cycle=%d cycle_ix=%d\n", 3576 - skdev->skcomp_cycle, skdev->skcomp_ix); 3577 - } 3578 - 3579 - static void skd_log_skreq(struct skd_device *skdev, 3580 - struct skd_request_context *skreq, const char *event) 3581 - { 3582 - struct request *req = blk_mq_rq_from_pdu(skreq); 3583 - u32 lba = blk_rq_pos(req); 3584 - u32 count = blk_rq_sectors(req); 3585 - 3586 - dev_dbg(&skdev->pdev->dev, "skreq=%p event='%s'\n", skreq, event); 3587 - dev_dbg(&skdev->pdev->dev, " state=%s(%d) id=0x%04x fitmsg=0x%04x\n", 3588 - skd_skreq_state_to_str(skreq->state), skreq->state, skreq->id, 3589 - skreq->fitmsg_id); 3590 - dev_dbg(&skdev->pdev->dev, " sg_dir=%d n_sg=%d\n", 3591 - skreq->data_dir, skreq->n_sg); 3592 - 3593 - dev_dbg(&skdev->pdev->dev, 3594 - "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba, lba, 3595 - count, count, (int)rq_data_dir(req)); 3596 - } 3597 - 3598 - /* 3599 - ***************************************************************************** 3600 - * MODULE GLUE 3601 - ***************************************************************************** 3602 - */ 3603 - 3604 - static int __init skd_init(void) 3605 - { 3606 - BUILD_BUG_ON(sizeof(struct fit_completion_entry_v1) != 8); 3607 - BUILD_BUG_ON(sizeof(struct fit_comp_error_info) != 32); 3608 - BUILD_BUG_ON(sizeof(struct skd_command_header) != 16); 3609 - BUILD_BUG_ON(sizeof(struct skd_scsi_request) != 32); 3610 - BUILD_BUG_ON(sizeof(struct driver_inquiry_data) != 44); 3611 - BUILD_BUG_ON(offsetof(struct skd_msg_buf, fmh) != 0); 3612 - BUILD_BUG_ON(offsetof(struct skd_msg_buf, scsi) != 64); 3613 - BUILD_BUG_ON(sizeof(struct skd_msg_buf) != SKD_N_FITMSG_BYTES); 3614 - 3615 - switch (skd_isr_type) { 3616 - case SKD_IRQ_LEGACY: 3617 - case SKD_IRQ_MSI: 3618 - case SKD_IRQ_MSIX: 3619 - break; 3620 - default: 3621 - pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n", 3622 - skd_isr_type, SKD_IRQ_DEFAULT); 3623 - skd_isr_type = SKD_IRQ_DEFAULT; 3624 - } 3625 - 3626 - if (skd_max_queue_depth < 1 || 3627 - skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) { 3628 - pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n", 3629 - skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT); 3630 - skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT; 3631 - } 3632 - 3633 - if (skd_max_req_per_msg < 1 || 3634 - skd_max_req_per_msg > SKD_MAX_REQ_PER_MSG) { 3635 - pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n", 3636 - skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT); 3637 - skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT; 3638 - } 3639 - 3640 - if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) { 3641 - pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n", 3642 - skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT); 3643 - skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT; 3644 - } 3645 - 3646 - if (skd_dbg_level < 0 || skd_dbg_level > 2) { 3647 - pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n", 3648 - skd_dbg_level, 0); 3649 - skd_dbg_level = 0; 3650 - } 3651 - 3652 - if (skd_isr_comp_limit < 0) { 3653 - pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n", 3654 - skd_isr_comp_limit, 0); 3655 - skd_isr_comp_limit = 0; 3656 - } 3657 - 3658 - return pci_register_driver(&skd_driver); 3659 - } 3660 - 3661 - static void __exit skd_exit(void) 3662 - { 3663 - pci_unregister_driver(&skd_driver); 3664 - 3665 - if (skd_major) 3666 - unregister_blkdev(skd_major, DRV_NAME); 3667 - } 3668 - 3669 - module_init(skd_init); 3670 - module_exit(skd_exit);
-322
drivers/block/skd_s1120.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0-only */ 2 - /* 3 - * Copyright 2012 STEC, Inc. 4 - * Copyright (c) 2017 Western Digital Corporation or its affiliates. 5 - */ 6 - 7 - 8 - #ifndef SKD_S1120_H 9 - #define SKD_S1120_H 10 - 11 - /* 12 - * Q-channel, 64-bit r/w 13 - */ 14 - #define FIT_Q_COMMAND 0x400u 15 - #define FIT_QCMD_QID_MASK (0x3 << 1) 16 - #define FIT_QCMD_QID0 (0x0 << 1) 17 - #define FIT_QCMD_QID_NORMAL FIT_QCMD_QID0 18 - #define FIT_QCMD_QID1 (0x1 << 1) 19 - #define FIT_QCMD_QID2 (0x2 << 1) 20 - #define FIT_QCMD_QID3 (0x3 << 1) 21 - #define FIT_QCMD_FLUSH_QUEUE (0ull) /* add QID */ 22 - #define FIT_QCMD_MSGSIZE_MASK (0x3 << 4) 23 - #define FIT_QCMD_MSGSIZE_64 (0x0 << 4) 24 - #define FIT_QCMD_MSGSIZE_128 (0x1 << 4) 25 - #define FIT_QCMD_MSGSIZE_256 (0x2 << 4) 26 - #define FIT_QCMD_MSGSIZE_512 (0x3 << 4) 27 - #define FIT_QCMD_ALIGN L1_CACHE_BYTES 28 - 29 - /* 30 - * Control, 32-bit r/w 31 - */ 32 - #define FIT_CONTROL 0x500u 33 - #define FIT_CR_HARD_RESET (1u << 0u) 34 - #define FIT_CR_SOFT_RESET (1u << 1u) 35 - #define FIT_CR_DIS_TIMESTAMPS (1u << 6u) 36 - #define FIT_CR_ENABLE_INTERRUPTS (1u << 7u) 37 - 38 - /* 39 - * Status, 32-bit, r/o 40 - */ 41 - #define FIT_STATUS 0x510u 42 - #define FIT_SR_DRIVE_STATE_MASK 0x000000FFu 43 - #define FIT_SR_SIGNATURE (0xFF << 8) 44 - #define FIT_SR_PIO_DMA (1 << 16) 45 - #define FIT_SR_DRIVE_OFFLINE 0x00 46 - #define FIT_SR_DRIVE_INIT 0x01 47 - /* #define FIT_SR_DRIVE_READY 0x02 */ 48 - #define FIT_SR_DRIVE_ONLINE 0x03 49 - #define FIT_SR_DRIVE_BUSY 0x04 50 - #define FIT_SR_DRIVE_FAULT 0x05 51 - #define FIT_SR_DRIVE_DEGRADED 0x06 52 - #define FIT_SR_PCIE_LINK_DOWN 0x07 53 - #define FIT_SR_DRIVE_SOFT_RESET 0x08 54 - #define FIT_SR_DRIVE_INIT_FAULT 0x09 55 - #define FIT_SR_DRIVE_BUSY_SANITIZE 0x0A 56 - #define FIT_SR_DRIVE_BUSY_ERASE 0x0B 57 - #define FIT_SR_DRIVE_FW_BOOTING 0x0C 58 - #define FIT_SR_DRIVE_NEED_FW_DOWNLOAD 0xFE 59 - #define FIT_SR_DEVICE_MISSING 0xFF 60 - #define FIT_SR__RESERVED 0xFFFFFF00u 61 - 62 - /* 63 - * FIT_STATUS - Status register data definition 64 - */ 65 - #define FIT_SR_STATE_MASK (0xFF << 0) 66 - #define FIT_SR_SIGNATURE (0xFF << 8) 67 - #define FIT_SR_PIO_DMA (1 << 16) 68 - 69 - /* 70 - * Interrupt status, 32-bit r/w1c (w1c ==> write 1 to clear) 71 - */ 72 - #define FIT_INT_STATUS_HOST 0x520u 73 - #define FIT_ISH_FW_STATE_CHANGE (1u << 0u) 74 - #define FIT_ISH_COMPLETION_POSTED (1u << 1u) 75 - #define FIT_ISH_MSG_FROM_DEV (1u << 2u) 76 - #define FIT_ISH_UNDEFINED_3 (1u << 3u) 77 - #define FIT_ISH_UNDEFINED_4 (1u << 4u) 78 - #define FIT_ISH_Q0_FULL (1u << 5u) 79 - #define FIT_ISH_Q1_FULL (1u << 6u) 80 - #define FIT_ISH_Q2_FULL (1u << 7u) 81 - #define FIT_ISH_Q3_FULL (1u << 8u) 82 - #define FIT_ISH_QCMD_FIFO_OVERRUN (1u << 9u) 83 - #define FIT_ISH_BAD_EXP_ROM_READ (1u << 10u) 84 - 85 - #define FIT_INT_DEF_MASK \ 86 - (FIT_ISH_FW_STATE_CHANGE | \ 87 - FIT_ISH_COMPLETION_POSTED | \ 88 - FIT_ISH_MSG_FROM_DEV | \ 89 - FIT_ISH_Q0_FULL | \ 90 - FIT_ISH_Q1_FULL | \ 91 - FIT_ISH_Q2_FULL | \ 92 - FIT_ISH_Q3_FULL | \ 93 - FIT_ISH_QCMD_FIFO_OVERRUN | \ 94 - FIT_ISH_BAD_EXP_ROM_READ) 95 - 96 - #define FIT_INT_QUEUE_FULL \ 97 - (FIT_ISH_Q0_FULL | \ 98 - FIT_ISH_Q1_FULL | \ 99 - FIT_ISH_Q2_FULL | \ 100 - FIT_ISH_Q3_FULL) 101 - 102 - #define MSI_MSG_NWL_ERROR_0 0x00000000 103 - #define MSI_MSG_NWL_ERROR_1 0x00000001 104 - #define MSI_MSG_NWL_ERROR_2 0x00000002 105 - #define MSI_MSG_NWL_ERROR_3 0x00000003 106 - #define MSI_MSG_STATE_CHANGE 0x00000004 107 - #define MSI_MSG_COMPLETION_POSTED 0x00000005 108 - #define MSI_MSG_MSG_FROM_DEV 0x00000006 109 - #define MSI_MSG_RESERVED_0 0x00000007 110 - #define MSI_MSG_RESERVED_1 0x00000008 111 - #define MSI_MSG_QUEUE_0_FULL 0x00000009 112 - #define MSI_MSG_QUEUE_1_FULL 0x0000000A 113 - #define MSI_MSG_QUEUE_2_FULL 0x0000000B 114 - #define MSI_MSG_QUEUE_3_FULL 0x0000000C 115 - 116 - #define FIT_INT_RESERVED_MASK \ 117 - (FIT_ISH_UNDEFINED_3 | \ 118 - FIT_ISH_UNDEFINED_4) 119 - 120 - /* 121 - * Interrupt mask, 32-bit r/w 122 - * Bit definitions are the same as FIT_INT_STATUS_HOST 123 - */ 124 - #define FIT_INT_MASK_HOST 0x528u 125 - 126 - /* 127 - * Message to device, 32-bit r/w 128 - */ 129 - #define FIT_MSG_TO_DEVICE 0x540u 130 - 131 - /* 132 - * Message from device, 32-bit, r/o 133 - */ 134 - #define FIT_MSG_FROM_DEVICE 0x548u 135 - 136 - /* 137 - * 32-bit messages to/from device, composition/extraction macros 138 - */ 139 - #define FIT_MXD_CONS(TYPE, PARAM, DATA) \ 140 - ((((TYPE) & 0xFFu) << 24u) | \ 141 - (((PARAM) & 0xFFu) << 16u) | \ 142 - (((DATA) & 0xFFFFu) << 0u)) 143 - #define FIT_MXD_TYPE(MXD) (((MXD) >> 24u) & 0xFFu) 144 - #define FIT_MXD_PARAM(MXD) (((MXD) >> 16u) & 0xFFu) 145 - #define FIT_MXD_DATA(MXD) (((MXD) >> 0u) & 0xFFFFu) 146 - 147 - /* 148 - * Types of messages to/from device 149 - */ 150 - #define FIT_MTD_FITFW_INIT 0x01u 151 - #define FIT_MTD_GET_CMDQ_DEPTH 0x02u 152 - #define FIT_MTD_SET_COMPQ_DEPTH 0x03u 153 - #define FIT_MTD_SET_COMPQ_ADDR 0x04u 154 - #define FIT_MTD_ARM_QUEUE 0x05u 155 - #define FIT_MTD_CMD_LOG_HOST_ID 0x07u 156 - #define FIT_MTD_CMD_LOG_TIME_STAMP_LO 0x08u 157 - #define FIT_MTD_CMD_LOG_TIME_STAMP_HI 0x09u 158 - #define FIT_MFD_SMART_EXCEEDED 0x10u 159 - #define FIT_MFD_POWER_DOWN 0x11u 160 - #define FIT_MFD_OFFLINE 0x12u 161 - #define FIT_MFD_ONLINE 0x13u 162 - #define FIT_MFD_FW_RESTARTING 0x14u 163 - #define FIT_MFD_PM_ACTIVE 0x15u 164 - #define FIT_MFD_PM_STANDBY 0x16u 165 - #define FIT_MFD_PM_SLEEP 0x17u 166 - #define FIT_MFD_CMD_PROGRESS 0x18u 167 - 168 - #define FIT_MTD_DEBUG 0xFEu 169 - #define FIT_MFD_DEBUG 0xFFu 170 - 171 - #define FIT_MFD_MASK (0xFFu) 172 - #define FIT_MFD_DATA_MASK (0xFFu) 173 - #define FIT_MFD_MSG(x) (((x) >> 24) & FIT_MFD_MASK) 174 - #define FIT_MFD_DATA(x) ((x) & FIT_MFD_MASK) 175 - 176 - /* 177 - * Extra arg to FIT_MSG_TO_DEVICE, 64-bit r/w 178 - * Used to set completion queue address (FIT_MTD_SET_COMPQ_ADDR) 179 - * (was Response buffer in docs) 180 - */ 181 - #define FIT_MSG_TO_DEVICE_ARG 0x580u 182 - 183 - /* 184 - * Hardware (ASIC) version, 32-bit r/o 185 - */ 186 - #define FIT_HW_VERSION 0x588u 187 - 188 - /* 189 - * Scatter/gather list descriptor. 190 - * 32-bytes and must be aligned on a 32-byte boundary. 191 - * All fields are in little endian order. 192 - */ 193 - struct fit_sg_descriptor { 194 - uint32_t control; 195 - uint32_t byte_count; 196 - uint64_t host_side_addr; 197 - uint64_t dev_side_addr; 198 - uint64_t next_desc_ptr; 199 - }; 200 - 201 - #define FIT_SGD_CONTROL_NOT_LAST 0x000u 202 - #define FIT_SGD_CONTROL_LAST 0x40Eu 203 - 204 - /* 205 - * Header at the beginning of a FIT message. The header 206 - * is followed by SSDI requests each 64 bytes. 207 - * A FIT message can be up to 512 bytes long and must start 208 - * on a 64-byte boundary. 209 - */ 210 - struct fit_msg_hdr { 211 - uint8_t protocol_id; 212 - uint8_t num_protocol_cmds_coalesced; 213 - uint8_t _reserved[62]; 214 - }; 215 - 216 - #define FIT_PROTOCOL_ID_FIT 1 217 - #define FIT_PROTOCOL_ID_SSDI 2 218 - #define FIT_PROTOCOL_ID_SOFIT 3 219 - 220 - 221 - #define FIT_PROTOCOL_MINOR_VER(mtd_val) ((mtd_val >> 16) & 0xF) 222 - #define FIT_PROTOCOL_MAJOR_VER(mtd_val) ((mtd_val >> 20) & 0xF) 223 - 224 - /* 225 - * Format of a completion entry. The completion queue is circular 226 - * and must have at least as many entries as the maximum number 227 - * of commands that may be issued to the device. 228 - * 229 - * There are no head/tail pointers. The cycle value is used to 230 - * infer the presence of new completion records. 231 - * Initially the cycle in all entries is 0, the index is 0, and 232 - * the cycle value to expect is 1. When completions are added 233 - * their cycle values are set to 1. When the index wraps the 234 - * cycle value to expect is incremented. 235 - * 236 - * Command_context is opaque and taken verbatim from the SSDI command. 237 - * All other fields are big endian. 238 - */ 239 - #define FIT_PROTOCOL_VERSION_0 0 240 - 241 - /* 242 - * Protocol major version 1 completion entry. 243 - * The major protocol version is found in bits 244 - * 20-23 of the FIT_MTD_FITFW_INIT response. 245 - */ 246 - struct fit_completion_entry_v1 { 247 - __be32 num_returned_bytes; 248 - uint16_t tag; 249 - uint8_t status; /* SCSI status */ 250 - uint8_t cycle; 251 - }; 252 - #define FIT_PROTOCOL_VERSION_1 1 253 - #define FIT_PROTOCOL_VERSION_CURRENT FIT_PROTOCOL_VERSION_1 254 - 255 - struct fit_comp_error_info { 256 - uint8_t type:7; /* 00: Bits0-6 indicates the type of sense data. */ 257 - uint8_t valid:1; /* 00: Bit 7 := 1 ==> info field is valid. */ 258 - uint8_t reserved0; /* 01: Obsolete field */ 259 - uint8_t key:4; /* 02: Bits0-3 indicate the sense key. */ 260 - uint8_t reserved2:1; /* 02: Reserved bit. */ 261 - uint8_t bad_length:1; /* 02: Incorrect Length Indicator */ 262 - uint8_t end_medium:1; /* 02: End of Medium */ 263 - uint8_t file_mark:1; /* 02: Filemark */ 264 - uint8_t info[4]; /* 03: */ 265 - uint8_t reserved1; /* 07: Additional Sense Length */ 266 - uint8_t cmd_spec[4]; /* 08: Command Specific Information */ 267 - uint8_t code; /* 0C: Additional Sense Code */ 268 - uint8_t qual; /* 0D: Additional Sense Code Qualifier */ 269 - uint8_t fruc; /* 0E: Field Replaceable Unit Code */ 270 - uint8_t sks_high:7; /* 0F: Sense Key Specific (MSB) */ 271 - uint8_t sks_valid:1; /* 0F: Sense Key Specific Valid */ 272 - uint16_t sks_low; /* 10: Sense Key Specific (LSW) */ 273 - uint16_t reserved3; /* 12: Part of additional sense bytes (unused) */ 274 - uint16_t uec; /* 14: Additional Sense Bytes */ 275 - uint64_t per __packed; /* 16: Additional Sense Bytes */ 276 - uint8_t reserved4[2]; /* 1E: Additional Sense Bytes (unused) */ 277 - }; 278 - 279 - 280 - /* Task management constants */ 281 - #define SOFT_TASK_SIMPLE 0x00 282 - #define SOFT_TASK_HEAD_OF_QUEUE 0x01 283 - #define SOFT_TASK_ORDERED 0x02 284 - 285 - /* Version zero has the last 32 bits reserved, 286 - * Version one has the last 32 bits sg_list_len_bytes; 287 - */ 288 - struct skd_command_header { 289 - __be64 sg_list_dma_address; 290 - uint16_t tag; 291 - uint8_t attribute; 292 - uint8_t add_cdb_len; /* In 32 bit words */ 293 - __be32 sg_list_len_bytes; 294 - }; 295 - 296 - struct skd_scsi_request { 297 - struct skd_command_header hdr; 298 - unsigned char cdb[16]; 299 - /* unsigned char _reserved[16]; */ 300 - }; 301 - 302 - struct driver_inquiry_data { 303 - uint8_t peripheral_device_type:5; 304 - uint8_t qualifier:3; 305 - uint8_t page_code; 306 - __be16 page_length; 307 - __be16 pcie_bus_number; 308 - uint8_t pcie_device_number; 309 - uint8_t pcie_function_number; 310 - uint8_t pcie_link_speed; 311 - uint8_t pcie_link_lanes; 312 - __be16 pcie_vendor_id; 313 - __be16 pcie_device_id; 314 - __be16 pcie_subsystem_vendor_id; 315 - __be16 pcie_subsystem_device_id; 316 - uint8_t reserved1[2]; 317 - uint8_t reserved2[3]; 318 - uint8_t driver_version_length; 319 - uint8_t driver_version[0x14]; 320 - }; 321 - 322 - #endif /* SKD_S1120_H */
+1 -2
drivers/block/zram/zram_drv.c
··· 530 530 531 531 return len; 532 532 out: 533 - if (bitmap) 534 - kvfree(bitmap); 533 + kvfree(bitmap); 535 534 536 535 if (bdev) 537 536 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+2 -3
drivers/lightnvm/pblk-core.c
··· 988 988 bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len); 989 989 990 990 smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC); 991 - guid_copy((guid_t *)&smeta_buf->header.uuid, &pblk->instance_uuid); 991 + export_guid(smeta_buf->header.uuid, &pblk->instance_uuid); 992 992 smeta_buf->header.id = cpu_to_le32(line->id); 993 993 smeta_buf->header.type = cpu_to_le16(line->type); 994 994 smeta_buf->header.version_major = SMETA_VERSION_MAJOR; ··· 1803 1803 1804 1804 if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC) { 1805 1805 emeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC); 1806 - guid_copy((guid_t *)&emeta_buf->header.uuid, 1807 - &pblk->instance_uuid); 1806 + export_guid(emeta_buf->header.uuid, &pblk->instance_uuid); 1808 1807 emeta_buf->header.id = cpu_to_le32(line->id); 1809 1808 emeta_buf->header.type = cpu_to_le16(line->type); 1810 1809 emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
+1 -2
drivers/lightnvm/pblk-gc.c
··· 23 23 24 24 static void pblk_gc_free_gc_rq(struct pblk_gc_rq *gc_rq) 25 25 { 26 - if (gc_rq->data) 27 - vfree(gc_rq->data); 26 + vfree(gc_rq->data); 28 27 kfree(gc_rq); 29 28 } 30 29
+1 -2
drivers/lightnvm/pblk-recovery.c
··· 706 706 707 707 /* The first valid instance uuid is used for initialization */ 708 708 if (!valid_uuid) { 709 - guid_copy(&pblk->instance_uuid, 710 - (guid_t *)&smeta_buf->header.uuid); 709 + import_guid(&pblk->instance_uuid, smeta_buf->header.uuid); 711 710 valid_uuid = 1; 712 711 } 713 712
+7
drivers/md/bcache/bcache.h
··· 373 373 unsigned int partial_stripes_expensive:1; 374 374 unsigned int writeback_metadata:1; 375 375 unsigned int writeback_running:1; 376 + unsigned int writeback_consider_fragment:1; 376 377 unsigned char writeback_percent; 377 378 unsigned int writeback_delay; 378 379 ··· 386 385 unsigned int writeback_rate_update_seconds; 387 386 unsigned int writeback_rate_i_term_inverse; 388 387 unsigned int writeback_rate_p_term_inverse; 388 + unsigned int writeback_rate_fp_term_low; 389 + unsigned int writeback_rate_fp_term_mid; 390 + unsigned int writeback_rate_fp_term_high; 389 391 unsigned int writeback_rate_minimum; 390 392 391 393 enum stop_on_failure stop_when_cache_set_failed; ··· 1005 1001 1006 1002 extern struct workqueue_struct *bcache_wq; 1007 1003 extern struct workqueue_struct *bch_journal_wq; 1004 + extern struct workqueue_struct *bch_flush_wq; 1008 1005 extern struct mutex bch_register_lock; 1009 1006 extern struct list_head bch_cache_sets; 1010 1007 ··· 1047 1042 void bch_debug_init(void); 1048 1043 void bch_request_exit(void); 1049 1044 int bch_request_init(void); 1045 + void bch_btree_exit(void); 1046 + int bch_btree_init(void); 1050 1047 1051 1048 #endif /* _BCACHE_H */
+8 -4
drivers/md/bcache/bset.c
··· 712 712 for (j = inorder_next(0, t->size); 713 713 j; 714 714 j = inorder_next(j, t->size)) { 715 - while (bkey_to_cacheline(t, k) < cacheline) 716 - prev = k, k = bkey_next(k); 715 + while (bkey_to_cacheline(t, k) < cacheline) { 716 + prev = k; 717 + k = bkey_next(k); 718 + } 717 719 718 720 t->prev[j] = bkey_u64s(prev); 719 721 t->tree[j].m = bkey_to_cacheline_offset(t, cacheline++, k); ··· 903 901 status = BTREE_INSERT_STATUS_INSERT; 904 902 905 903 while (m != bset_bkey_last(i) && 906 - bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0) 907 - prev = m, m = bkey_next(m); 904 + bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0) { 905 + prev = m; 906 + m = bkey_next(m); 907 + } 908 908 909 909 /* prev is in the tree, if we merge we're done */ 910 910 status = BTREE_INSERT_STATUS_BACK_MERGE;
+19 -2
drivers/md/bcache/btree.c
··· 99 99 #define PTR_HASH(c, k) \ 100 100 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0)) 101 101 102 + static struct workqueue_struct *btree_io_wq; 103 + 102 104 #define insert_lock(s, b) ((b)->level <= (s)->lock) 103 105 104 106 ··· 310 308 btree_complete_write(b, w); 311 309 312 310 if (btree_node_dirty(b)) 313 - schedule_delayed_work(&b->work, 30 * HZ); 311 + queue_delayed_work(btree_io_wq, &b->work, 30 * HZ); 314 312 315 313 closure_return_with_destructor(cl, btree_node_write_unlock); 316 314 } ··· 483 481 BUG_ON(!i->keys); 484 482 485 483 if (!btree_node_dirty(b)) 486 - schedule_delayed_work(&b->work, 30 * HZ); 484 + queue_delayed_work(btree_io_wq, &b->work, 30 * HZ); 487 485 488 486 set_btree_node_dirty(b); 489 487 ··· 2765 2763 2766 2764 spin_lock_init(&buf->lock); 2767 2765 array_allocator_init(&buf->freelist); 2766 + } 2767 + 2768 + void bch_btree_exit(void) 2769 + { 2770 + if (btree_io_wq) 2771 + destroy_workqueue(btree_io_wq); 2772 + } 2773 + 2774 + int __init bch_btree_init(void) 2775 + { 2776 + btree_io_wq = alloc_workqueue("bch_btree_io", WQ_MEM_RECLAIM, 0); 2777 + if (!btree_io_wq) 2778 + return -ENOMEM; 2779 + 2780 + return 0; 2768 2781 }
+2 -2
drivers/md/bcache/journal.c
··· 932 932 journal_try_write(c); 933 933 } else if (!w->dirty) { 934 934 w->dirty = true; 935 - schedule_delayed_work(&c->journal.work, 936 - msecs_to_jiffies(c->journal_delay_ms)); 935 + queue_delayed_work(bch_flush_wq, &c->journal.work, 936 + msecs_to_jiffies(c->journal_delay_ms)); 937 937 spin_unlock(&c->journal.lock); 938 938 } else { 939 939 spin_unlock(&c->journal.lock);
+22 -2
drivers/md/bcache/super.c
··· 49 49 static DEFINE_IDA(bcache_device_idx); 50 50 static wait_queue_head_t unregister_wait; 51 51 struct workqueue_struct *bcache_wq; 52 + struct workqueue_struct *bch_flush_wq; 52 53 struct workqueue_struct *bch_journal_wq; 53 54 54 55 ··· 2518 2517 module_put(THIS_MODULE); 2519 2518 } 2520 2519 2521 - static void register_device_aync(struct async_reg_args *args) 2520 + static void register_device_async(struct async_reg_args *args) 2522 2521 { 2523 2522 if (SB_IS_BDEV(args->sb)) 2524 2523 INIT_DELAYED_WORK(&args->reg_work, register_bdev_worker); ··· 2612 2611 args->sb = sb; 2613 2612 args->sb_disk = sb_disk; 2614 2613 args->bdev = bdev; 2615 - register_device_aync(args); 2614 + register_device_async(args); 2616 2615 /* No wait and returns to user space */ 2617 2616 goto async_done; 2618 2617 } ··· 2822 2821 destroy_workqueue(bcache_wq); 2823 2822 if (bch_journal_wq) 2824 2823 destroy_workqueue(bch_journal_wq); 2824 + if (bch_flush_wq) 2825 + destroy_workqueue(bch_flush_wq); 2826 + bch_btree_exit(); 2825 2827 2826 2828 if (bcache_major) 2827 2829 unregister_blkdev(bcache_major, "bcache"); ··· 2880 2876 return bcache_major; 2881 2877 } 2882 2878 2879 + if (bch_btree_init()) 2880 + goto err; 2881 + 2883 2882 bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0); 2884 2883 if (!bcache_wq) 2884 + goto err; 2885 + 2886 + /* 2887 + * Let's not make this `WQ_MEM_RECLAIM` for the following reasons: 2888 + * 2889 + * 1. It used `system_wq` before which also does no memory reclaim. 2890 + * 2. With `WQ_MEM_RECLAIM` desktop stalls, increased boot times, and 2891 + * reduced throughput can be observed. 2892 + * 2893 + * We still want to user our own queue to not congest the `system_wq`. 2894 + */ 2895 + bch_flush_wq = alloc_workqueue("bch_flush", 0, 0); 2896 + if (!bch_flush_wq) 2885 2897 goto err; 2886 2898 2887 2899 bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0);
+27 -2
drivers/md/bcache/sysfs.c
··· 117 117 rw_attribute(writeback_percent); 118 118 rw_attribute(writeback_delay); 119 119 rw_attribute(writeback_rate); 120 + rw_attribute(writeback_consider_fragment); 120 121 121 122 rw_attribute(writeback_rate_update_seconds); 122 123 rw_attribute(writeback_rate_i_term_inverse); 123 124 rw_attribute(writeback_rate_p_term_inverse); 125 + rw_attribute(writeback_rate_fp_term_low); 126 + rw_attribute(writeback_rate_fp_term_mid); 127 + rw_attribute(writeback_rate_fp_term_high); 124 128 rw_attribute(writeback_rate_minimum); 125 129 read_attribute(writeback_rate_debug); 126 130 ··· 199 195 var_printf(bypass_torture_test, "%i"); 200 196 var_printf(writeback_metadata, "%i"); 201 197 var_printf(writeback_running, "%i"); 198 + var_printf(writeback_consider_fragment, "%i"); 202 199 var_print(writeback_delay); 203 200 var_print(writeback_percent); 204 201 sysfs_hprint(writeback_rate, ··· 210 205 var_print(writeback_rate_update_seconds); 211 206 var_print(writeback_rate_i_term_inverse); 212 207 var_print(writeback_rate_p_term_inverse); 208 + var_print(writeback_rate_fp_term_low); 209 + var_print(writeback_rate_fp_term_mid); 210 + var_print(writeback_rate_fp_term_high); 213 211 var_print(writeback_rate_minimum); 214 212 215 213 if (attr == &sysfs_writeback_rate_debug) { ··· 311 303 sysfs_strtoul_bool(bypass_torture_test, dc->bypass_torture_test); 312 304 sysfs_strtoul_bool(writeback_metadata, dc->writeback_metadata); 313 305 sysfs_strtoul_bool(writeback_running, dc->writeback_running); 306 + sysfs_strtoul_bool(writeback_consider_fragment, dc->writeback_consider_fragment); 314 307 sysfs_strtoul_clamp(writeback_delay, dc->writeback_delay, 0, UINT_MAX); 315 308 316 309 sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, ··· 340 331 sysfs_strtoul_clamp(writeback_rate_p_term_inverse, 341 332 dc->writeback_rate_p_term_inverse, 342 333 1, UINT_MAX); 334 + sysfs_strtoul_clamp(writeback_rate_fp_term_low, 335 + dc->writeback_rate_fp_term_low, 336 + 1, dc->writeback_rate_fp_term_mid - 1); 337 + sysfs_strtoul_clamp(writeback_rate_fp_term_mid, 338 + dc->writeback_rate_fp_term_mid, 339 + dc->writeback_rate_fp_term_low + 1, 340 + dc->writeback_rate_fp_term_high - 1); 341 + sysfs_strtoul_clamp(writeback_rate_fp_term_high, 342 + dc->writeback_rate_fp_term_high, 343 + dc->writeback_rate_fp_term_mid + 1, UINT_MAX); 343 344 sysfs_strtoul_clamp(writeback_rate_minimum, 344 345 dc->writeback_rate_minimum, 345 346 1, UINT_MAX); ··· 518 499 &sysfs_writeback_delay, 519 500 &sysfs_writeback_percent, 520 501 &sysfs_writeback_rate, 502 + &sysfs_writeback_consider_fragment, 521 503 &sysfs_writeback_rate_update_seconds, 522 504 &sysfs_writeback_rate_i_term_inverse, 523 505 &sysfs_writeback_rate_p_term_inverse, 506 + &sysfs_writeback_rate_fp_term_low, 507 + &sysfs_writeback_rate_fp_term_mid, 508 + &sysfs_writeback_rate_fp_term_high, 524 509 &sysfs_writeback_rate_minimum, 525 510 &sysfs_writeback_rate_debug, 526 511 &sysfs_io_errors, ··· 1094 1071 --n; 1095 1072 1096 1073 while (cached < p + n && 1097 - *cached == BTREE_PRIO) 1098 - cached++, n--; 1074 + *cached == BTREE_PRIO) { 1075 + cached++; 1076 + n--; 1077 + } 1099 1078 1100 1079 for (i = 0; i < n; i++) 1101 1080 sum += INITIAL_PRIO - cached[i];
+42
drivers/md/bcache/writeback.c
··· 88 88 int64_t integral_scaled; 89 89 uint32_t new_rate; 90 90 91 + /* 92 + * We need to consider the number of dirty buckets as well 93 + * when calculating the proportional_scaled, Otherwise we might 94 + * have an unreasonable small writeback rate at a highly fragmented situation 95 + * when very few dirty sectors consumed a lot dirty buckets, the 96 + * worst case is when dirty buckets reached cutoff_writeback_sync and 97 + * dirty data is still not even reached to writeback percent, so the rate 98 + * still will be at the minimum value, which will cause the write 99 + * stuck at a non-writeback mode. 100 + */ 101 + struct cache_set *c = dc->disk.c; 102 + 103 + int64_t dirty_buckets = c->nbuckets - c->avail_nbuckets; 104 + 105 + if (dc->writeback_consider_fragment && 106 + c->gc_stats.in_use > BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW && dirty > 0) { 107 + int64_t fragment = 108 + div_s64((dirty_buckets * c->cache->sb.bucket_size), dirty); 109 + int64_t fp_term; 110 + int64_t fps; 111 + 112 + if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID) { 113 + fp_term = dc->writeback_rate_fp_term_low * 114 + (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW); 115 + } else if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH) { 116 + fp_term = dc->writeback_rate_fp_term_mid * 117 + (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID); 118 + } else { 119 + fp_term = dc->writeback_rate_fp_term_high * 120 + (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH); 121 + } 122 + fps = div_s64(dirty, dirty_buckets) * fp_term; 123 + if (fragment > 3 && fps > proportional_scaled) { 124 + /* Only overrite the p when fragment > 3 */ 125 + proportional_scaled = fps; 126 + } 127 + } 128 + 91 129 if ((error < 0 && dc->writeback_rate_integral > 0) || 92 130 (error > 0 && time_before64(local_clock(), 93 131 dc->writeback_rate.next + NSEC_PER_MSEC))) { ··· 1015 977 1016 978 dc->writeback_metadata = true; 1017 979 dc->writeback_running = false; 980 + dc->writeback_consider_fragment = true; 1018 981 dc->writeback_percent = 10; 1019 982 dc->writeback_delay = 30; 1020 983 atomic_long_set(&dc->writeback_rate.rate, 1024); ··· 1023 984 1024 985 dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT; 1025 986 dc->writeback_rate_p_term_inverse = 40; 987 + dc->writeback_rate_fp_term_low = 1; 988 + dc->writeback_rate_fp_term_mid = 10; 989 + dc->writeback_rate_fp_term_high = 1000; 1026 990 dc->writeback_rate_i_term_inverse = 10000; 1027 991 1028 992 WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
+4
drivers/md/bcache/writeback.h
··· 16 16 17 17 #define BCH_AUTO_GC_DIRTY_THRESHOLD 50 18 18 19 + #define BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW 50 20 + #define BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID 57 21 + #define BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH 64 22 + 19 23 #define BCH_DIRTY_INIT_THRD_MAX 64 20 24 /* 21 25 * 14 (16384ths) is chosen here as something that each backing device
+1 -1
drivers/md/raid5.c
··· 7643 7643 } 7644 7644 7645 7645 /* device size must be a multiple of chunk size */ 7646 - mddev->dev_sectors &= ~(mddev->chunk_sectors - 1); 7646 + mddev->dev_sectors &= ~((sector_t)mddev->chunk_sectors - 1); 7647 7647 mddev->resync_max_sectors = mddev->dev_sectors; 7648 7648 7649 7649 if (mddev->degraded > dirty_parity_disks &&
+49 -14
drivers/nvme/host/core.c
··· 279 279 280 280 static void nvme_retry_req(struct request *req) 281 281 { 282 - struct nvme_ns *ns = req->q->queuedata; 283 282 unsigned long delay = 0; 284 283 u16 crd; 285 284 286 285 /* The mask and shift result must be <= 3 */ 287 286 crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11; 288 - if (ns && crd) 289 - delay = ns->ctrl->crdt[crd - 1] * 100; 287 + if (crd) 288 + delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100; 290 289 291 290 nvme_req(req)->retries++; 292 291 blk_mq_requeue_request(req, false); ··· 355 356 } 356 357 EXPORT_SYMBOL_GPL(nvme_complete_rq); 357 358 359 + /* 360 + * Called to unwind from ->queue_rq on a failed command submission so that the 361 + * multipathing code gets called to potentially failover to another path. 362 + * The caller needs to unwind all transport specific resource allocations and 363 + * must return propagate the return value. 364 + */ 365 + blk_status_t nvme_host_path_error(struct request *req) 366 + { 367 + nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR; 368 + blk_mq_set_request_complete(req); 369 + nvme_complete_rq(req); 370 + return BLK_STS_OK; 371 + } 372 + EXPORT_SYMBOL_GPL(nvme_host_path_error); 373 + 358 374 bool nvme_cancel_request(struct request *req, void *data, bool reserved) 359 375 { 360 376 dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, ··· 384 370 return true; 385 371 } 386 372 EXPORT_SYMBOL_GPL(nvme_cancel_request); 373 + 374 + void nvme_cancel_tagset(struct nvme_ctrl *ctrl) 375 + { 376 + if (ctrl->tagset) { 377 + blk_mq_tagset_busy_iter(ctrl->tagset, 378 + nvme_cancel_request, ctrl); 379 + blk_mq_tagset_wait_completed_request(ctrl->tagset); 380 + } 381 + } 382 + EXPORT_SYMBOL_GPL(nvme_cancel_tagset); 383 + 384 + void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl) 385 + { 386 + if (ctrl->admin_tagset) { 387 + blk_mq_tagset_busy_iter(ctrl->admin_tagset, 388 + nvme_cancel_request, ctrl); 389 + blk_mq_tagset_wait_completed_request(ctrl->admin_tagset); 390 + } 391 + } 392 + EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset); 387 393 388 394 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, 389 395 enum nvme_ctrl_state new_state) ··· 876 842 void nvme_cleanup_cmd(struct request *req) 877 843 { 878 844 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { 879 - struct nvme_ns *ns = req->rq_disk->private_data; 845 + struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; 880 846 struct page *page = req->special_vec.bv_page; 881 847 882 - if (page == ns->ctrl->discard_page) 883 - clear_bit_unlock(0, &ns->ctrl->discard_page_busy); 848 + if (page == ctrl->discard_page) 849 + clear_bit_unlock(0, &ctrl->discard_page_busy); 884 850 else 885 851 kfree(page_address(page) + req->special_vec.bv_offset); 886 852 } ··· 2865 2831 struct nvme_subsystem *subsys = 2866 2832 container_of(dev, struct nvme_subsystem, dev); 2867 2833 2868 - return snprintf(buf, PAGE_SIZE, "%s\n", subsys->subnqn); 2834 + return sysfs_emit(buf, "%s\n", subsys->subnqn); 2869 2835 } 2870 2836 static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn); 2871 2837 ··· 2895 2861 NULL, 2896 2862 }; 2897 2863 2898 - static struct attribute_group nvme_subsys_attrs_group = { 2864 + static const struct attribute_group nvme_subsys_attrs_group = { 2899 2865 .attrs = nvme_subsys_attrs, 2900 2866 }; 2901 2867 ··· 3558 3524 { 3559 3525 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3560 3526 3561 - return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name); 3527 + return sysfs_emit(buf, "%s\n", ctrl->ops->name); 3562 3528 } 3563 3529 static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL); 3564 3530 ··· 3592 3558 { 3593 3559 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3594 3560 3595 - return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn); 3561 + return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn); 3596 3562 } 3597 3563 static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL); 3598 3564 ··· 3602 3568 { 3603 3569 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3604 3570 3605 - return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->opts->host->nqn); 3571 + return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn); 3606 3572 } 3607 3573 static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL); 3608 3574 ··· 3612 3578 { 3613 3579 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3614 3580 3615 - return snprintf(buf, PAGE_SIZE, "%pU\n", &ctrl->opts->host->id); 3581 + return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id); 3616 3582 } 3617 3583 static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL); 3618 3584 ··· 3730 3696 return a->mode; 3731 3697 } 3732 3698 3733 - static struct attribute_group nvme_dev_attrs_group = { 3699 + static const struct attribute_group nvme_dev_attrs_group = { 3734 3700 .attrs = nvme_dev_attrs, 3735 3701 .is_visible = nvme_dev_attrs_are_visible, 3736 3702 }; ··· 4473 4439 4474 4440 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) 4475 4441 { 4442 + nvme_hwmon_exit(ctrl); 4476 4443 nvme_fault_inject_fini(&ctrl->fault_inject); 4477 4444 dev_pm_qos_hide_latency_tolerance(ctrl->device); 4478 4445 cdev_device_del(&ctrl->cdev, ctrl->device); ··· 4486 4451 struct nvme_effects_log *cel; 4487 4452 unsigned long i; 4488 4453 4489 - xa_for_each (&ctrl->cels, i, cel) { 4454 + xa_for_each(&ctrl->cels, i, cel) { 4490 4455 xa_erase(&ctrl->cels, i); 4491 4456 kfree(cel); 4492 4457 }
+1 -5
drivers/nvme/host/fabrics.c
··· 552 552 !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) && 553 553 !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) 554 554 return BLK_STS_RESOURCE; 555 - 556 - nvme_req(rq)->status = NVME_SC_HOST_PATH_ERROR; 557 - blk_mq_start_request(rq); 558 - nvme_complete_rq(rq); 559 - return BLK_STS_OK; 555 + return nvme_host_path_error(rq); 560 556 } 561 557 EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command); 562 558
+1 -1
drivers/nvme/host/fc.c
··· 3789 3789 NULL 3790 3790 }; 3791 3791 3792 - static struct attribute_group nvme_fc_attr_group = { 3792 + static const struct attribute_group nvme_fc_attr_group = { 3793 3793 .attrs = nvme_fc_attrs, 3794 3794 }; 3795 3795
+21 -10
drivers/nvme/host/hwmon.c
··· 223 223 224 224 int nvme_hwmon_init(struct nvme_ctrl *ctrl) 225 225 { 226 - struct device *dev = ctrl->dev; 226 + struct device *dev = ctrl->device; 227 227 struct nvme_hwmon_data *data; 228 228 struct device *hwmon; 229 229 int err; 230 230 231 - data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 231 + data = kzalloc(sizeof(*data), GFP_KERNEL); 232 232 if (!data) 233 233 return 0; 234 234 ··· 237 237 238 238 err = nvme_hwmon_get_smart_log(data); 239 239 if (err) { 240 - dev_warn(ctrl->device, 241 - "Failed to read smart log (error %d)\n", err); 242 - devm_kfree(dev, data); 240 + dev_warn(dev, "Failed to read smart log (error %d)\n", err); 241 + kfree(data); 243 242 return err; 244 243 } 245 244 246 - hwmon = devm_hwmon_device_register_with_info(dev, "nvme", data, 247 - &nvme_hwmon_chip_info, 248 - NULL); 245 + hwmon = hwmon_device_register_with_info(dev, "nvme", 246 + data, &nvme_hwmon_chip_info, 247 + NULL); 249 248 if (IS_ERR(hwmon)) { 250 249 dev_warn(dev, "Failed to instantiate hwmon device\n"); 251 - devm_kfree(dev, data); 250 + kfree(data); 252 251 } 253 - 252 + ctrl->hwmon_device = hwmon; 254 253 return 0; 254 + } 255 + 256 + void nvme_hwmon_exit(struct nvme_ctrl *ctrl) 257 + { 258 + if (ctrl->hwmon_device) { 259 + struct nvme_hwmon_data *data = 260 + dev_get_drvdata(ctrl->hwmon_device); 261 + 262 + hwmon_device_unregister(ctrl->hwmon_device); 263 + ctrl->hwmon_device = NULL; 264 + kfree(data); 265 + } 255 266 }
+4
drivers/nvme/host/multipath.c
··· 677 677 if (blk_queue_stable_writes(ns->queue) && ns->head->disk) 678 678 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, 679 679 ns->head->disk->queue); 680 + #ifdef CONFIG_BLK_DEV_ZONED 681 + if (blk_queue_is_zoned(ns->queue) && ns->head->disk) 682 + ns->head->disk->queue->nr_zones = ns->queue->nr_zones; 683 + #endif 680 684 } 681 685 682 686 void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+17
drivers/nvme/host/nvme.h
··· 144 144 * NVMe 1.3 compliance. 145 145 */ 146 146 NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15), 147 + 148 + /* 149 + * The controller does not properly handle DMA addresses over 150 + * 48 bits. 151 + */ 152 + NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16), 147 153 }; 148 154 149 155 /* ··· 252 246 struct rw_semaphore namespaces_rwsem; 253 247 struct device ctrl_device; 254 248 struct device *device; /* char device */ 249 + #ifdef CONFIG_NVME_HWMON 250 + struct device *hwmon_device; 251 + #endif 255 252 struct cdev cdev; 256 253 struct work_struct reset_work; 257 254 struct work_struct delete_work; ··· 584 575 } 585 576 586 577 void nvme_complete_rq(struct request *req); 578 + blk_status_t nvme_host_path_error(struct request *req); 587 579 bool nvme_cancel_request(struct request *req, void *data, bool reserved); 580 + void nvme_cancel_tagset(struct nvme_ctrl *ctrl); 581 + void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl); 588 582 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, 589 583 enum nvme_ctrl_state new_state); 590 584 bool nvme_wait_reset(struct nvme_ctrl *ctrl); ··· 821 809 822 810 #ifdef CONFIG_NVME_HWMON 823 811 int nvme_hwmon_init(struct nvme_ctrl *ctrl); 812 + void nvme_hwmon_exit(struct nvme_ctrl *ctrl); 824 813 #else 825 814 static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl) 826 815 { 827 816 return 0; 817 + } 818 + 819 + static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl) 820 + { 828 821 } 829 822 #endif 830 823
+16 -1
drivers/nvme/host/pci.c
··· 2362 2362 { 2363 2363 int result = -ENOMEM; 2364 2364 struct pci_dev *pdev = to_pci_dev(dev->dev); 2365 + int dma_address_bits = 64; 2365 2366 2366 2367 if (pci_enable_device_mem(pdev)) 2367 2368 return result; 2368 2369 2369 2370 pci_set_master(pdev); 2370 2371 2371 - if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64))) 2372 + if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48) 2373 + dma_address_bits = 48; 2374 + if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(dma_address_bits))) 2372 2375 goto disable; 2373 2376 2374 2377 if (readl(dev->bar + NVME_REG_CSTS) == -1) { ··· 3266 3263 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3267 3264 { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ 3268 3265 .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 3266 + { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), 3267 + .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 3268 + { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065), 3269 + .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 3270 + { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x8061), 3271 + .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 3272 + { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd00), 3273 + .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 3274 + { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd01), 3275 + .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 3276 + { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02), 3277 + .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 3269 3278 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), 3270 3279 .driver_data = NVME_QUIRK_SINGLE_VECTOR }, 3271 3280 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
+21 -13
drivers/nvme/host/rdma.c
··· 919 919 920 920 error = nvme_init_identify(&ctrl->ctrl); 921 921 if (error) 922 - goto out_stop_queue; 922 + goto out_quiesce_queue; 923 923 924 924 return 0; 925 925 926 + out_quiesce_queue: 927 + blk_mq_quiesce_queue(ctrl->ctrl.admin_q); 928 + blk_sync_queue(ctrl->ctrl.admin_q); 926 929 out_stop_queue: 927 930 nvme_rdma_stop_queue(&ctrl->queues[0]); 931 + nvme_cancel_admin_tagset(&ctrl->ctrl); 928 932 out_cleanup_queue: 929 933 if (new) 930 934 blk_cleanup_queue(ctrl->ctrl.admin_q); ··· 1005 1001 1006 1002 out_wait_freeze_timed_out: 1007 1003 nvme_stop_queues(&ctrl->ctrl); 1004 + nvme_sync_io_queues(&ctrl->ctrl); 1008 1005 nvme_rdma_stop_io_queues(ctrl); 1009 1006 out_cleanup_connect_q: 1007 + nvme_cancel_tagset(&ctrl->ctrl); 1010 1008 if (new) 1011 1009 blk_cleanup_queue(ctrl->ctrl.connect_q); 1012 1010 out_free_tag_set: ··· 1025 1019 blk_mq_quiesce_queue(ctrl->ctrl.admin_q); 1026 1020 blk_sync_queue(ctrl->ctrl.admin_q); 1027 1021 nvme_rdma_stop_queue(&ctrl->queues[0]); 1028 - if (ctrl->ctrl.admin_tagset) { 1029 - blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset, 1030 - nvme_cancel_request, &ctrl->ctrl); 1031 - blk_mq_tagset_wait_completed_request(ctrl->ctrl.admin_tagset); 1032 - } 1022 + nvme_cancel_admin_tagset(&ctrl->ctrl); 1033 1023 if (remove) 1034 1024 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 1035 1025 nvme_rdma_destroy_admin_queue(ctrl, remove); ··· 1039 1037 nvme_stop_queues(&ctrl->ctrl); 1040 1038 nvme_sync_io_queues(&ctrl->ctrl); 1041 1039 nvme_rdma_stop_io_queues(ctrl); 1042 - if (ctrl->ctrl.tagset) { 1043 - blk_mq_tagset_busy_iter(ctrl->ctrl.tagset, 1044 - nvme_cancel_request, &ctrl->ctrl); 1045 - blk_mq_tagset_wait_completed_request(ctrl->ctrl.tagset); 1046 - } 1040 + nvme_cancel_tagset(&ctrl->ctrl); 1047 1041 if (remove) 1048 1042 nvme_start_queues(&ctrl->ctrl); 1049 1043 nvme_rdma_destroy_io_queues(ctrl, remove); ··· 1142 1144 return 0; 1143 1145 1144 1146 destroy_io: 1145 - if (ctrl->ctrl.queue_count > 1) 1147 + if (ctrl->ctrl.queue_count > 1) { 1148 + nvme_stop_queues(&ctrl->ctrl); 1149 + nvme_sync_io_queues(&ctrl->ctrl); 1150 + nvme_rdma_stop_io_queues(ctrl); 1151 + nvme_cancel_tagset(&ctrl->ctrl); 1146 1152 nvme_rdma_destroy_io_queues(ctrl, new); 1153 + } 1147 1154 destroy_admin: 1155 + blk_mq_quiesce_queue(ctrl->ctrl.admin_q); 1156 + blk_sync_queue(ctrl->ctrl.admin_q); 1148 1157 nvme_rdma_stop_queue(&ctrl->queues[0]); 1158 + nvme_cancel_admin_tagset(&ctrl->ctrl); 1149 1159 nvme_rdma_destroy_admin_queue(ctrl, new); 1150 1160 return ret; 1151 1161 } ··· 2098 2092 err_unmap: 2099 2093 nvme_rdma_unmap_data(queue, rq); 2100 2094 err: 2101 - if (err == -ENOMEM || err == -EAGAIN) 2095 + if (err == -EIO) 2096 + ret = nvme_host_path_error(rq); 2097 + else if (err == -ENOMEM || err == -EAGAIN) 2102 2098 ret = BLK_STS_RESOURCE; 2103 2099 else 2104 2100 ret = BLK_STS_IOERR;
+29 -26
drivers/nvme/host/tcp.c
··· 206 206 req->pdu_len - req->pdu_sent); 207 207 } 208 208 209 - static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req) 210 - { 211 - return req->iter.iov_offset; 212 - } 213 - 214 209 static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req) 215 210 { 216 211 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ? ··· 224 229 struct request *rq = blk_mq_rq_from_pdu(req); 225 230 struct bio_vec *vec; 226 231 unsigned int size; 227 - int nsegs; 232 + int nr_bvec; 228 233 size_t offset; 229 234 230 235 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) { 231 236 vec = &rq->special_vec; 232 - nsegs = 1; 237 + nr_bvec = 1; 233 238 size = blk_rq_payload_bytes(rq); 234 239 offset = 0; 235 240 } else { 236 241 struct bio *bio = req->curr_bio; 242 + struct bvec_iter bi; 243 + struct bio_vec bv; 237 244 238 245 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); 239 - nsegs = bio_segments(bio); 246 + nr_bvec = 0; 247 + bio_for_each_bvec(bv, bio, bi) { 248 + nr_bvec++; 249 + } 240 250 size = bio->bi_iter.bi_size; 241 251 offset = bio->bi_iter.bi_bvec_done; 242 252 } 243 253 244 - iov_iter_bvec(&req->iter, dir, vec, nsegs, size); 254 + iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size); 245 255 req->iter.iov_offset = offset; 246 256 } 247 257 ··· 983 983 req->state = NVME_TCP_SEND_DATA; 984 984 if (queue->data_digest) 985 985 crypto_ahash_init(queue->snd_hash); 986 - nvme_tcp_init_iter(req, WRITE); 987 986 } else { 988 987 nvme_tcp_done_send_req(queue); 989 988 } ··· 1015 1016 req->state = NVME_TCP_SEND_DATA; 1016 1017 if (queue->data_digest) 1017 1018 crypto_ahash_init(queue->snd_hash); 1018 - if (!req->data_sent) 1019 - nvme_tcp_init_iter(req, WRITE); 1020 1019 return 1; 1021 1020 } 1022 1021 req->offset += ret; ··· 1812 1815 1813 1816 out_wait_freeze_timed_out: 1814 1817 nvme_stop_queues(ctrl); 1818 + nvme_sync_io_queues(ctrl); 1815 1819 nvme_tcp_stop_io_queues(ctrl); 1816 1820 out_cleanup_connect_q: 1821 + nvme_cancel_tagset(ctrl); 1817 1822 if (new) 1818 1823 blk_cleanup_queue(ctrl->connect_q); 1819 1824 out_free_tag_set: ··· 1877 1878 1878 1879 error = nvme_init_identify(ctrl); 1879 1880 if (error) 1880 - goto out_stop_queue; 1881 + goto out_quiesce_queue; 1881 1882 1882 1883 return 0; 1883 1884 1885 + out_quiesce_queue: 1886 + blk_mq_quiesce_queue(ctrl->admin_q); 1887 + blk_sync_queue(ctrl->admin_q); 1884 1888 out_stop_queue: 1885 1889 nvme_tcp_stop_queue(ctrl, 0); 1890 + nvme_cancel_admin_tagset(ctrl); 1886 1891 out_cleanup_queue: 1887 1892 if (new) 1888 1893 blk_cleanup_queue(ctrl->admin_q); ··· 1907 1904 blk_mq_quiesce_queue(ctrl->admin_q); 1908 1905 blk_sync_queue(ctrl->admin_q); 1909 1906 nvme_tcp_stop_queue(ctrl, 0); 1910 - if (ctrl->admin_tagset) { 1911 - blk_mq_tagset_busy_iter(ctrl->admin_tagset, 1912 - nvme_cancel_request, ctrl); 1913 - blk_mq_tagset_wait_completed_request(ctrl->admin_tagset); 1914 - } 1907 + nvme_cancel_admin_tagset(ctrl); 1915 1908 if (remove) 1916 1909 blk_mq_unquiesce_queue(ctrl->admin_q); 1917 1910 nvme_tcp_destroy_admin_queue(ctrl, remove); ··· 1923 1924 nvme_stop_queues(ctrl); 1924 1925 nvme_sync_io_queues(ctrl); 1925 1926 nvme_tcp_stop_io_queues(ctrl); 1926 - if (ctrl->tagset) { 1927 - blk_mq_tagset_busy_iter(ctrl->tagset, 1928 - nvme_cancel_request, ctrl); 1929 - blk_mq_tagset_wait_completed_request(ctrl->tagset); 1930 - } 1927 + nvme_cancel_tagset(ctrl); 1931 1928 if (remove) 1932 1929 nvme_start_queues(ctrl); 1933 1930 nvme_tcp_destroy_io_queues(ctrl, remove); ··· 1998 2003 return 0; 1999 2004 2000 2005 destroy_io: 2001 - if (ctrl->queue_count > 1) 2006 + if (ctrl->queue_count > 1) { 2007 + nvme_stop_queues(ctrl); 2008 + nvme_sync_io_queues(ctrl); 2009 + nvme_tcp_stop_io_queues(ctrl); 2010 + nvme_cancel_tagset(ctrl); 2002 2011 nvme_tcp_destroy_io_queues(ctrl, new); 2012 + } 2003 2013 destroy_admin: 2014 + blk_mq_quiesce_queue(ctrl->admin_q); 2015 + blk_sync_queue(ctrl->admin_q); 2004 2016 nvme_tcp_stop_queue(ctrl, 0); 2017 + nvme_cancel_admin_tagset(ctrl); 2005 2018 nvme_tcp_destroy_admin_queue(ctrl, new); 2006 2019 return ret; 2007 2020 } ··· 2271 2268 req->data_len = blk_rq_nr_phys_segments(rq) ? 2272 2269 blk_rq_payload_bytes(rq) : 0; 2273 2270 req->curr_bio = rq->bio; 2271 + if (req->curr_bio && req->data_len) 2272 + nvme_tcp_init_iter(req, rq_data_dir(rq)); 2274 2273 2275 2274 if (rq_data_dir(rq) == WRITE && 2276 2275 req->data_len <= nvme_tcp_inline_data_size(queue)) 2277 2276 req->pdu_len = req->data_len; 2278 - else if (req->curr_bio) 2279 - nvme_tcp_init_iter(req, READ); 2280 2277 2281 2278 pdu->hdr.type = nvme_tcp_cmd; 2282 2279 pdu->hdr.flags = 0;
+53
drivers/nvme/host/trace.c
··· 102 102 return ret; 103 103 } 104 104 105 + static const char *nvme_trace_admin_format_nvm(struct trace_seq *p, u8 *cdw10) 106 + { 107 + const char *ret = trace_seq_buffer_ptr(p); 108 + u8 lbaf = cdw10[0] & 0xF; 109 + u8 mset = (cdw10[0] >> 4) & 0x1; 110 + u8 pi = (cdw10[0] >> 5) & 0x7; 111 + u8 pil = cdw10[1] & 0x1; 112 + u8 ses = (cdw10[1] >> 1) & 0x7; 113 + 114 + trace_seq_printf(p, "lbaf=%u, mset=%u, pi=%u, pil=%u, ses=%u", 115 + lbaf, mset, pi, pil, ses); 116 + 117 + trace_seq_putc(p, 0); 118 + 119 + return ret; 120 + } 121 + 105 122 static const char *nvme_trace_read_write(struct trace_seq *p, u8 *cdw10) 106 123 { 107 124 const char *ret = trace_seq_buffer_ptr(p); ··· 143 126 trace_seq_printf(p, "nr=%u, attributes=%u", 144 127 get_unaligned_le32(cdw10), 145 128 get_unaligned_le32(cdw10 + 4)); 129 + trace_seq_putc(p, 0); 130 + 131 + return ret; 132 + } 133 + 134 + static const char *nvme_trace_zone_mgmt_send(struct trace_seq *p, u8 *cdw10) 135 + { 136 + const char *ret = trace_seq_buffer_ptr(p); 137 + u64 slba = get_unaligned_le64(cdw10); 138 + u8 zsa = cdw10[12]; 139 + u8 all = cdw10[13]; 140 + 141 + trace_seq_printf(p, "slba=%llu, zsa=%u, all=%u", slba, zsa, all); 142 + trace_seq_putc(p, 0); 143 + 144 + return ret; 145 + } 146 + 147 + static const char *nvme_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10) 148 + { 149 + const char *ret = trace_seq_buffer_ptr(p); 150 + u64 slba = get_unaligned_le64(cdw10); 151 + u32 numd = get_unaligned_le32(cdw10 + 8); 152 + u8 zra = cdw10[12]; 153 + u8 zrasf = cdw10[13]; 154 + u8 pr = cdw10[14]; 155 + 156 + trace_seq_printf(p, "slba=%llu, numd=%u, zra=%u, zrasf=%u, pr=%u", 157 + slba, numd, zra, zrasf, pr); 146 158 trace_seq_putc(p, 0); 147 159 148 160 return ret; ··· 205 159 return nvme_trace_admin_get_features(p, cdw10); 206 160 case nvme_admin_get_lba_status: 207 161 return nvme_trace_get_lba_status(p, cdw10); 162 + case nvme_admin_format_nvm: 163 + return nvme_trace_admin_format_nvm(p, cdw10); 208 164 default: 209 165 return nvme_trace_common(p, cdw10); 210 166 } ··· 219 171 case nvme_cmd_read: 220 172 case nvme_cmd_write: 221 173 case nvme_cmd_write_zeroes: 174 + case nvme_cmd_zone_append: 222 175 return nvme_trace_read_write(p, cdw10); 223 176 case nvme_cmd_dsm: 224 177 return nvme_trace_dsm(p, cdw10); 178 + case nvme_cmd_zone_mgmt_send: 179 + return nvme_trace_zone_mgmt_send(p, cdw10); 180 + case nvme_cmd_zone_mgmt_recv: 181 + return nvme_trace_zone_mgmt_recv(p, cdw10); 225 182 default: 226 183 return nvme_trace_common(p, cdw10); 227 184 }
+49 -65
drivers/nvme/target/admin-cmd.c
··· 74 74 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req, 75 75 struct nvme_smart_log *slog) 76 76 { 77 - struct nvmet_ns *ns; 78 77 u64 host_reads, host_writes, data_units_read, data_units_written; 78 + u16 status; 79 79 80 - ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid); 81 - if (!ns) { 82 - pr_err("Could not find namespace id : %d\n", 83 - le32_to_cpu(req->cmd->get_log_page.nsid)); 84 - req->error_loc = offsetof(struct nvme_rw_command, nsid); 85 - return NVME_SC_INVALID_NS; 86 - } 80 + status = nvmet_req_find_ns(req); 81 + if (status) 82 + return status; 87 83 88 84 /* we don't have the right data for file backed ns */ 89 - if (!ns->bdev) 90 - goto out; 85 + if (!req->ns->bdev) 86 + return NVME_SC_SUCCESS; 91 87 92 - host_reads = part_stat_read(ns->bdev, ios[READ]); 88 + host_reads = part_stat_read(req->ns->bdev, ios[READ]); 93 89 data_units_read = 94 - DIV_ROUND_UP(part_stat_read(ns->bdev, sectors[READ]), 1000); 95 - host_writes = part_stat_read(ns->bdev, ios[WRITE]); 90 + DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000); 91 + host_writes = part_stat_read(req->ns->bdev, ios[WRITE]); 96 92 data_units_written = 97 - DIV_ROUND_UP(part_stat_read(ns->bdev, sectors[WRITE]), 1000); 93 + DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000); 98 94 99 95 put_unaligned_le64(host_reads, &slog->host_reads[0]); 100 96 put_unaligned_le64(data_units_read, &slog->data_units_read[0]); 101 97 put_unaligned_le64(host_writes, &slog->host_writes[0]); 102 98 put_unaligned_le64(data_units_written, &slog->data_units_written[0]); 103 - out: 104 - nvmet_put_namespace(ns); 105 99 106 100 return NVME_SC_SUCCESS; 107 101 } ··· 462 468 463 469 static void nvmet_execute_identify_ns(struct nvmet_req *req) 464 470 { 465 - struct nvmet_ctrl *ctrl = req->sq->ctrl; 466 - struct nvmet_ns *ns; 467 471 struct nvme_id_ns *id; 468 - u16 status = 0; 472 + u16 status; 469 473 470 474 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { 471 475 req->error_loc = offsetof(struct nvme_identify, nsid); ··· 478 486 } 479 487 480 488 /* return an all zeroed buffer if we can't find an active namespace */ 481 - ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid); 482 - if (!ns) { 483 - status = NVME_SC_INVALID_NS; 489 + status = nvmet_req_find_ns(req); 490 + if (status) { 491 + status = 0; 484 492 goto done; 485 493 } 486 494 487 - nvmet_ns_revalidate(ns); 495 + nvmet_ns_revalidate(req->ns); 488 496 489 497 /* 490 498 * nuse = ncap = nsze isn't always true, but we have no way to find 491 499 * that out from the underlying device. 492 500 */ 493 - id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift); 494 - switch (req->port->ana_state[ns->anagrpid]) { 501 + id->ncap = id->nsze = 502 + cpu_to_le64(req->ns->size >> req->ns->blksize_shift); 503 + switch (req->port->ana_state[req->ns->anagrpid]) { 495 504 case NVME_ANA_INACCESSIBLE: 496 505 case NVME_ANA_PERSISTENT_LOSS: 497 506 break; ··· 501 508 break; 502 509 } 503 510 504 - if (ns->bdev) 505 - nvmet_bdev_set_limits(ns->bdev, id); 511 + if (req->ns->bdev) 512 + nvmet_bdev_set_limits(req->ns->bdev, id); 506 513 507 514 /* 508 515 * We just provide a single LBA format that matches what the ··· 516 523 * controllers, but also with any other user of the block device. 517 524 */ 518 525 id->nmic = (1 << 0); 519 - id->anagrpid = cpu_to_le32(ns->anagrpid); 526 + id->anagrpid = cpu_to_le32(req->ns->anagrpid); 520 527 521 - memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid)); 528 + memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid)); 522 529 523 - id->lbaf[0].ds = ns->blksize_shift; 530 + id->lbaf[0].ds = req->ns->blksize_shift; 524 531 525 - if (ctrl->pi_support && nvmet_ns_has_pi(ns)) { 532 + if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) { 526 533 id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST | 527 534 NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 | 528 535 NVME_NS_DPC_PI_TYPE3; 529 536 id->mc = NVME_MC_EXTENDED_LBA; 530 - id->dps = ns->pi_type; 537 + id->dps = req->ns->pi_type; 531 538 id->flbas = NVME_NS_FLBAS_META_EXT; 532 - id->lbaf[0].ms = cpu_to_le16(ns->metadata_size); 539 + id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size); 533 540 } 534 541 535 - if (ns->readonly) 542 + if (req->ns->readonly) 536 543 id->nsattr |= (1 << 0); 537 - nvmet_put_namespace(ns); 538 544 done: 539 545 if (!status) 540 546 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); ··· 599 607 600 608 static void nvmet_execute_identify_desclist(struct nvmet_req *req) 601 609 { 602 - struct nvmet_ns *ns; 603 - u16 status = 0; 604 610 off_t off = 0; 611 + u16 status; 605 612 606 - ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid); 607 - if (!ns) { 608 - req->error_loc = offsetof(struct nvme_identify, nsid); 609 - status = NVME_SC_INVALID_NS | NVME_SC_DNR; 613 + status = nvmet_req_find_ns(req); 614 + if (status) 610 615 goto out; 611 - } 612 616 613 - if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) { 617 + if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) { 614 618 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID, 615 619 NVME_NIDT_UUID_LEN, 616 - &ns->uuid, &off); 620 + &req->ns->uuid, &off); 617 621 if (status) 618 - goto out_put_ns; 622 + goto out; 619 623 } 620 - if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) { 624 + if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) { 621 625 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID, 622 626 NVME_NIDT_NGUID_LEN, 623 - &ns->nguid, &off); 627 + &req->ns->nguid, &off); 624 628 if (status) 625 - goto out_put_ns; 629 + goto out; 626 630 } 627 631 628 632 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off, 629 633 off) != NVME_IDENTIFY_DATA_SIZE - off) 630 634 status = NVME_SC_INTERNAL | NVME_SC_DNR; 631 - out_put_ns: 632 - nvmet_put_namespace(ns); 635 + 633 636 out: 634 637 nvmet_req_complete(req, status); 635 638 } ··· 683 696 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req) 684 697 { 685 698 u32 write_protect = le32_to_cpu(req->cmd->common.cdw11); 686 - struct nvmet_subsys *subsys = req->sq->ctrl->subsys; 687 - u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE; 699 + struct nvmet_subsys *subsys = nvmet_req_subsys(req); 700 + u16 status; 688 701 689 - req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid); 690 - if (unlikely(!req->ns)) { 691 - req->error_loc = offsetof(struct nvme_common_command, nsid); 702 + status = nvmet_req_find_ns(req); 703 + if (status) 692 704 return status; 693 - } 694 705 695 706 mutex_lock(&subsys->lock); 696 707 switch (write_protect) { ··· 742 757 743 758 void nvmet_execute_set_features(struct nvmet_req *req) 744 759 { 745 - struct nvmet_subsys *subsys = req->sq->ctrl->subsys; 760 + struct nvmet_subsys *subsys = nvmet_req_subsys(req); 746 761 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); 747 762 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11); 748 763 u16 status = 0; ··· 786 801 787 802 static u16 nvmet_get_feat_write_protect(struct nvmet_req *req) 788 803 { 789 - struct nvmet_subsys *subsys = req->sq->ctrl->subsys; 804 + struct nvmet_subsys *subsys = nvmet_req_subsys(req); 790 805 u32 result; 791 806 792 - req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid); 793 - if (!req->ns) { 794 - req->error_loc = offsetof(struct nvme_common_command, nsid); 795 - return NVME_SC_INVALID_NS | NVME_SC_DNR; 796 - } 807 + result = nvmet_req_find_ns(req); 808 + if (result) 809 + return result; 810 + 797 811 mutex_lock(&subsys->lock); 798 812 if (req->ns->readonly == true) 799 813 result = NVME_NS_WRITE_PROTECT; ··· 816 832 817 833 void nvmet_execute_get_features(struct nvmet_req *req) 818 834 { 819 - struct nvmet_subsys *subsys = req->sq->ctrl->subsys; 835 + struct nvmet_subsys *subsys = nvmet_req_subsys(req); 820 836 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); 821 837 u16 status = 0; 822 838 ··· 923 939 924 940 if (nvme_is_fabrics(cmd)) 925 941 return nvmet_parse_fabrics_cmd(req); 926 - if (req->sq->ctrl->subsys->type == NVME_NQN_DISC) 942 + if (nvmet_req_subsys(req)->type == NVME_NQN_DISC) 927 943 return nvmet_parse_discovery_cmd(req); 928 944 929 945 ret = nvmet_check_ctrl_status(req, cmd);
+2 -4
drivers/nvme/target/configfs.c
··· 45 45 { 46 46 if (p->enabled) 47 47 pr_err("Disable port '%u' before changing attribute in %s\n", 48 - le16_to_cpu(p->disc_addr.portid), caller); 48 + le16_to_cpu(p->disc_addr.portid), caller); 49 49 return p->enabled; 50 50 } 51 51 ··· 266 266 if (strtobool(page, &val)) 267 267 return -EINVAL; 268 268 269 - if (port->enabled) { 270 - pr_err("Disable port before setting pi_enable value.\n"); 269 + if (nvmet_is_port_enabled(port, __func__)) 271 270 return -EACCES; 272 - } 273 271 274 272 port->pi_enable = val; 275 273 return count;
+24 -13
drivers/nvme/target/core.c
··· 82 82 return status; 83 83 } 84 84 85 + u16 nvmet_report_invalid_opcode(struct nvmet_req *req) 86 + { 87 + pr_debug("unhandled cmd %d on qid %d\n", req->cmd->common.opcode, 88 + req->sq->qid); 89 + 90 + req->error_loc = offsetof(struct nvme_common_command, opcode); 91 + return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 92 + } 93 + 85 94 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, 86 95 const char *subsysnqn); 87 96 ··· 426 417 cancel_delayed_work_sync(&ctrl->ka_work); 427 418 } 428 419 429 - struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid) 420 + u16 nvmet_req_find_ns(struct nvmet_req *req) 430 421 { 431 - struct nvmet_ns *ns; 422 + u32 nsid = le32_to_cpu(req->cmd->common.nsid); 432 423 433 - ns = xa_load(&ctrl->subsys->namespaces, le32_to_cpu(nsid)); 434 - if (ns) 435 - percpu_ref_get(&ns->ref); 424 + req->ns = xa_load(&nvmet_req_subsys(req)->namespaces, nsid); 425 + if (unlikely(!req->ns)) { 426 + req->error_loc = offsetof(struct nvme_common_command, nsid); 427 + return NVME_SC_INVALID_NS | NVME_SC_DNR; 428 + } 436 429 437 - return ns; 430 + percpu_ref_get(&req->ns->ref); 431 + return NVME_SC_SUCCESS; 438 432 } 439 433 440 434 static void nvmet_destroy_namespace(struct percpu_ref *ref) ··· 874 862 if (nvmet_req_passthru_ctrl(req)) 875 863 return nvmet_parse_passthru_io_cmd(req); 876 864 877 - req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid); 878 - if (unlikely(!req->ns)) { 879 - req->error_loc = offsetof(struct nvme_common_command, nsid); 880 - return NVME_SC_INVALID_NS | NVME_SC_DNR; 881 - } 865 + ret = nvmet_req_find_ns(req); 866 + if (unlikely(ret)) 867 + return ret; 868 + 882 869 ret = nvmet_check_ana_state(req->port, req->ns); 883 870 if (unlikely(ret)) { 884 871 req->error_loc = offsetof(struct nvme_common_command, nsid); ··· 891 880 892 881 if (req->ns->file) 893 882 return nvmet_file_parse_io_cmd(req); 894 - else 895 - return nvmet_bdev_parse_io_cmd(req); 883 + 884 + return nvmet_bdev_parse_io_cmd(req); 896 885 } 897 886 898 887 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
+39 -44
drivers/nvme/target/fc.c
··· 145 145 struct list_head avail_defer_list; 146 146 struct workqueue_struct *work_q; 147 147 struct kref ref; 148 + struct rcu_head rcu; 148 149 struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */ 149 150 } __aligned(sizeof(unsigned long long)); 150 151 ··· 165 164 struct nvmet_fc_hostport *hostport; 166 165 struct nvmet_fc_ls_iod *rcv_disconn; 167 166 struct list_head a_list; 168 - struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1]; 167 + struct nvmet_fc_tgt_queue __rcu *queues[NVMET_NR_QUEUES + 1]; 169 168 struct kref ref; 170 169 struct work_struct del_work; 170 + struct rcu_head rcu; 171 171 }; 172 172 173 173 ··· 792 790 u16 qid, u16 sqsize) 793 791 { 794 792 struct nvmet_fc_tgt_queue *queue; 795 - unsigned long flags; 796 793 int ret; 797 794 798 795 if (qid > NVMET_NR_QUEUES) ··· 830 829 goto out_fail_iodlist; 831 830 832 831 WARN_ON(assoc->queues[qid]); 833 - spin_lock_irqsave(&assoc->tgtport->lock, flags); 834 - assoc->queues[qid] = queue; 835 - spin_unlock_irqrestore(&assoc->tgtport->lock, flags); 832 + rcu_assign_pointer(assoc->queues[qid], queue); 836 833 837 834 return queue; 838 835 ··· 850 851 { 851 852 struct nvmet_fc_tgt_queue *queue = 852 853 container_of(ref, struct nvmet_fc_tgt_queue, ref); 853 - unsigned long flags; 854 854 855 - spin_lock_irqsave(&queue->assoc->tgtport->lock, flags); 856 - queue->assoc->queues[queue->qid] = NULL; 857 - spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags); 855 + rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL); 858 856 859 857 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue); 860 858 ··· 859 863 860 864 destroy_workqueue(queue->work_q); 861 865 862 - kfree(queue); 866 + kfree_rcu(queue, rcu); 863 867 } 864 868 865 869 static void ··· 961 965 struct nvmet_fc_tgt_queue *queue; 962 966 u64 association_id = nvmet_fc_getassociationid(connection_id); 963 967 u16 qid = nvmet_fc_getqueueid(connection_id); 964 - unsigned long flags; 965 968 966 969 if (qid > NVMET_NR_QUEUES) 967 970 return NULL; 968 971 969 - spin_lock_irqsave(&tgtport->lock, flags); 970 - list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { 972 + rcu_read_lock(); 973 + list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 971 974 if (association_id == assoc->association_id) { 972 - queue = assoc->queues[qid]; 975 + queue = rcu_dereference(assoc->queues[qid]); 973 976 if (queue && 974 977 (!atomic_read(&queue->connected) || 975 978 !nvmet_fc_tgt_q_get(queue))) 976 979 queue = NULL; 977 - spin_unlock_irqrestore(&tgtport->lock, flags); 980 + rcu_read_unlock(); 978 981 return queue; 979 982 } 980 983 } 981 - spin_unlock_irqrestore(&tgtport->lock, flags); 984 + rcu_read_unlock(); 982 985 return NULL; 983 986 } 984 987 ··· 1132 1137 } 1133 1138 if (!needrandom) { 1134 1139 assoc->association_id = ran; 1135 - list_add_tail(&assoc->a_list, &tgtport->assoc_list); 1140 + list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list); 1136 1141 } 1137 1142 spin_unlock_irqrestore(&tgtport->lock, flags); 1138 1143 } ··· 1162 1167 1163 1168 nvmet_fc_free_hostport(assoc->hostport); 1164 1169 spin_lock_irqsave(&tgtport->lock, flags); 1165 - list_del(&assoc->a_list); 1170 + list_del_rcu(&assoc->a_list); 1166 1171 oldls = assoc->rcv_disconn; 1167 1172 spin_unlock_irqrestore(&tgtport->lock, flags); 1168 1173 /* if pending Rcv Disconnect Association LS, send rsp now */ ··· 1172 1177 dev_info(tgtport->dev, 1173 1178 "{%d:%d} Association freed\n", 1174 1179 tgtport->fc_target_port.port_num, assoc->a_id); 1175 - kfree(assoc); 1180 + kfree_rcu(assoc, rcu); 1176 1181 nvmet_fc_tgtport_put(tgtport); 1177 1182 } 1178 1183 ··· 1193 1198 { 1194 1199 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 1195 1200 struct nvmet_fc_tgt_queue *queue; 1196 - unsigned long flags; 1197 1201 int i, terminating; 1198 1202 1199 1203 terminating = atomic_xchg(&assoc->terminating, 1); ··· 1201 1207 if (terminating) 1202 1208 return; 1203 1209 1204 - spin_lock_irqsave(&tgtport->lock, flags); 1210 + 1205 1211 for (i = NVMET_NR_QUEUES; i >= 0; i--) { 1206 - queue = assoc->queues[i]; 1207 - if (queue) { 1208 - if (!nvmet_fc_tgt_q_get(queue)) 1209 - continue; 1210 - spin_unlock_irqrestore(&tgtport->lock, flags); 1211 - nvmet_fc_delete_target_queue(queue); 1212 - nvmet_fc_tgt_q_put(queue); 1213 - spin_lock_irqsave(&tgtport->lock, flags); 1212 + rcu_read_lock(); 1213 + queue = rcu_dereference(assoc->queues[i]); 1214 + if (!queue) { 1215 + rcu_read_unlock(); 1216 + continue; 1214 1217 } 1218 + 1219 + if (!nvmet_fc_tgt_q_get(queue)) { 1220 + rcu_read_unlock(); 1221 + continue; 1222 + } 1223 + rcu_read_unlock(); 1224 + nvmet_fc_delete_target_queue(queue); 1225 + nvmet_fc_tgt_q_put(queue); 1215 1226 } 1216 - spin_unlock_irqrestore(&tgtport->lock, flags); 1217 1227 1218 1228 dev_info(tgtport->dev, 1219 1229 "{%d:%d} Association deleted\n", ··· 1232 1234 { 1233 1235 struct nvmet_fc_tgt_assoc *assoc; 1234 1236 struct nvmet_fc_tgt_assoc *ret = NULL; 1235 - unsigned long flags; 1236 1237 1237 - spin_lock_irqsave(&tgtport->lock, flags); 1238 - list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { 1238 + rcu_read_lock(); 1239 + list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 1239 1240 if (association_id == assoc->association_id) { 1240 1241 ret = assoc; 1241 1242 if (!nvmet_fc_tgt_a_get(assoc)) ··· 1242 1245 break; 1243 1246 } 1244 1247 } 1245 - spin_unlock_irqrestore(&tgtport->lock, flags); 1248 + rcu_read_unlock(); 1246 1249 1247 1250 return ret; 1248 1251 } ··· 1470 1473 static void 1471 1474 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport) 1472 1475 { 1473 - struct nvmet_fc_tgt_assoc *assoc, *next; 1474 - unsigned long flags; 1476 + struct nvmet_fc_tgt_assoc *assoc; 1475 1477 1476 - spin_lock_irqsave(&tgtport->lock, flags); 1477 - list_for_each_entry_safe(assoc, next, 1478 - &tgtport->assoc_list, a_list) { 1478 + rcu_read_lock(); 1479 + list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 1479 1480 if (!nvmet_fc_tgt_a_get(assoc)) 1480 1481 continue; 1481 1482 if (!schedule_work(&assoc->del_work)) 1482 1483 /* already deleting - release local reference */ 1483 1484 nvmet_fc_tgt_a_put(assoc); 1484 1485 } 1485 - spin_unlock_irqrestore(&tgtport->lock, flags); 1486 + rcu_read_unlock(); 1486 1487 } 1487 1488 1488 1489 /** ··· 1563 1568 continue; 1564 1569 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1565 1570 1566 - spin_lock_irqsave(&tgtport->lock, flags); 1567 - list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { 1568 - queue = assoc->queues[0]; 1571 + rcu_read_lock(); 1572 + list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 1573 + queue = rcu_dereference(assoc->queues[0]); 1569 1574 if (queue && queue->nvme_sq.ctrl == ctrl) { 1570 1575 if (nvmet_fc_tgt_a_get(assoc)) 1571 1576 found_ctrl = true; 1572 1577 break; 1573 1578 } 1574 1579 } 1575 - spin_unlock_irqrestore(&tgtport->lock, flags); 1580 + rcu_read_unlock(); 1576 1581 1577 1582 nvmet_fc_tgtport_put(tgtport); 1578 1583
+1 -1
drivers/nvme/target/fcloop.c
··· 1545 1545 NULL 1546 1546 }; 1547 1547 1548 - static struct attribute_group fclopp_dev_attrs_group = { 1548 + static const struct attribute_group fclopp_dev_attrs_group = { 1549 1549 .attrs = fcloop_dev_attrs, 1550 1550 }; 1551 1551
+4 -9
drivers/nvme/target/io-cmd-bdev.c
··· 256 256 if (is_pci_p2pdma_page(sg_page(req->sg))) 257 257 op |= REQ_NOMERGE; 258 258 259 - sector = le64_to_cpu(req->cmd->rw.slba); 260 - sector <<= (req->ns->blksize_shift - 9); 259 + sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); 261 260 262 261 if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) { 263 262 bio = &req->b.inline_bio; ··· 344 345 int ret; 345 346 346 347 ret = __blkdev_issue_discard(ns->bdev, 347 - le64_to_cpu(range->slba) << (ns->blksize_shift - 9), 348 + nvmet_lba_to_sect(ns, range->slba), 348 349 le32_to_cpu(range->nlb) << (ns->blksize_shift - 9), 349 350 GFP_KERNEL, 0, bio); 350 351 if (ret && ret != -EOPNOTSUPP) { ··· 413 414 if (!nvmet_check_transfer_len(req, 0)) 414 415 return; 415 416 416 - sector = le64_to_cpu(write_zeroes->slba) << 417 - (req->ns->blksize_shift - 9); 417 + sector = nvmet_lba_to_sect(req->ns, write_zeroes->slba); 418 418 nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) << 419 419 (req->ns->blksize_shift - 9)); 420 420 ··· 449 451 req->execute = nvmet_bdev_execute_write_zeroes; 450 452 return 0; 451 453 default: 452 - pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode, 453 - req->sq->qid); 454 - req->error_loc = offsetof(struct nvme_common_command, opcode); 455 - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 454 + return nvmet_report_invalid_opcode(req); 456 455 } 457 456 }
+1 -4
drivers/nvme/target/io-cmd-file.c
··· 400 400 req->execute = nvmet_file_execute_write_zeroes; 401 401 return 0; 402 402 default: 403 - pr_err("unhandled cmd for file ns %d on qid %d\n", 404 - cmd->common.opcode, req->sq->qid); 405 - req->error_loc = offsetof(struct nvme_common_command, opcode); 406 - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 403 + return nvmet_report_invalid_opcode(req); 407 404 } 408 405 }
+18 -2
drivers/nvme/target/nvmet.h
··· 443 443 void nvmet_subsys_put(struct nvmet_subsys *subsys); 444 444 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys); 445 445 446 - struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid); 446 + u16 nvmet_req_find_ns(struct nvmet_req *req); 447 447 void nvmet_put_namespace(struct nvmet_ns *ns); 448 448 int nvmet_ns_enable(struct nvmet_ns *ns); 449 449 void nvmet_ns_disable(struct nvmet_ns *ns); ··· 551 551 sizeof(struct nvme_dsm_range); 552 552 } 553 553 554 + static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req) 555 + { 556 + return req->sq->ctrl->subsys; 557 + } 558 + 554 559 #ifdef CONFIG_NVME_TARGET_PASSTHRU 555 560 void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys); 556 561 int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys); ··· 590 585 static inline struct nvme_ctrl * 591 586 nvmet_req_passthru_ctrl(struct nvmet_req *req) 592 587 { 593 - return nvmet_passthru_ctrl(req->sq->ctrl->subsys); 588 + return nvmet_passthru_ctrl(nvmet_req_subsys(req)); 594 589 } 595 590 596 591 u16 errno_to_nvme_status(struct nvmet_req *req, int errno); 592 + u16 nvmet_report_invalid_opcode(struct nvmet_req *req); 597 593 598 594 /* Convert a 32-bit number to a 16-bit 0's based number */ 599 595 static inline __le16 to0based(u32 a) ··· 607 601 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) 608 602 return false; 609 603 return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple); 604 + } 605 + 606 + static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect) 607 + { 608 + return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT)); 609 + } 610 + 611 + static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba) 612 + { 613 + return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT); 610 614 } 611 615 612 616 #endif /* _NVMET_H */
+3 -3
drivers/nvme/target/passthru.c
··· 239 239 } 240 240 241 241 q = ns->queue; 242 - timeout = req->sq->ctrl->subsys->io_timeout; 242 + timeout = nvmet_req_subsys(req)->io_timeout; 243 243 } else { 244 - timeout = req->sq->ctrl->subsys->admin_timeout; 244 + timeout = nvmet_req_subsys(req)->admin_timeout; 245 245 } 246 246 247 247 rq = nvme_alloc_request(q, req->cmd, 0); ··· 494 494 return nvmet_setup_passthru_command(req); 495 495 default: 496 496 /* Reject commands not in the allowlist above */ 497 - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 497 + return nvmet_report_invalid_opcode(req); 498 498 } 499 499 } 500 500
+42 -17
drivers/nvme/target/tcp.c
··· 379 379 return NVME_SC_INTERNAL; 380 380 } 381 381 382 - static void nvmet_tcp_ddgst(struct ahash_request *hash, 382 + static void nvmet_tcp_send_ddgst(struct ahash_request *hash, 383 383 struct nvmet_tcp_cmd *cmd) 384 384 { 385 385 ahash_request_set_crypt(hash, cmd->req.sg, 386 386 (void *)&cmd->exp_ddgst, cmd->req.transfer_len); 387 387 crypto_ahash_digest(hash); 388 + } 389 + 390 + static void nvmet_tcp_recv_ddgst(struct ahash_request *hash, 391 + struct nvmet_tcp_cmd *cmd) 392 + { 393 + struct scatterlist sg; 394 + struct kvec *iov; 395 + int i; 396 + 397 + crypto_ahash_init(hash); 398 + for (i = 0, iov = cmd->iov; i < cmd->nr_mapped; i++, iov++) { 399 + sg_init_one(&sg, iov->iov_base, iov->iov_len); 400 + ahash_request_set_crypt(hash, &sg, NULL, iov->iov_len); 401 + crypto_ahash_update(hash); 402 + } 403 + ahash_request_set_crypt(hash, NULL, (void *)&cmd->exp_ddgst, 0); 404 + crypto_ahash_final(hash); 388 405 } 389 406 390 407 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) ··· 428 411 429 412 if (queue->data_digest) { 430 413 pdu->hdr.flags |= NVME_TCP_F_DDGST; 431 - nvmet_tcp_ddgst(queue->snd_hash, cmd); 414 + nvmet_tcp_send_ddgst(queue->snd_hash, cmd); 432 415 } 433 416 434 417 if (cmd->queue->hdr_digest) { ··· 1077 1060 { 1078 1061 struct nvmet_tcp_queue *queue = cmd->queue; 1079 1062 1080 - nvmet_tcp_ddgst(queue->rcv_hash, cmd); 1063 + nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd); 1081 1064 queue->offset = 0; 1082 1065 queue->left = NVME_TCP_DIGEST_LENGTH; 1083 1066 queue->rcv_state = NVMET_TCP_RECV_DDGST; ··· 1098 1081 cmd->rbytes_done += ret; 1099 1082 } 1100 1083 1084 + if (queue->data_digest) { 1085 + nvmet_tcp_prep_recv_ddgst(cmd); 1086 + return 0; 1087 + } 1101 1088 nvmet_tcp_unmap_pdu_iovec(cmd); 1102 1089 1103 1090 if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) && 1104 1091 cmd->rbytes_done == cmd->req.transfer_len) { 1105 - if (queue->data_digest) { 1106 - nvmet_tcp_prep_recv_ddgst(cmd); 1107 - return 0; 1108 - } 1109 1092 cmd->req.execute(&cmd->req); 1110 1093 } 1111 1094 ··· 1485 1468 if (inet->rcv_tos > 0) 1486 1469 ip_sock_set_tos(sock->sk, inet->rcv_tos); 1487 1470 1471 + ret = 0; 1488 1472 write_lock_bh(&sock->sk->sk_callback_lock); 1489 - sock->sk->sk_user_data = queue; 1490 - queue->data_ready = sock->sk->sk_data_ready; 1491 - sock->sk->sk_data_ready = nvmet_tcp_data_ready; 1492 - queue->state_change = sock->sk->sk_state_change; 1493 - sock->sk->sk_state_change = nvmet_tcp_state_change; 1494 - queue->write_space = sock->sk->sk_write_space; 1495 - sock->sk->sk_write_space = nvmet_tcp_write_space; 1473 + if (sock->sk->sk_state != TCP_ESTABLISHED) { 1474 + /* 1475 + * If the socket is already closing, don't even start 1476 + * consuming it 1477 + */ 1478 + ret = -ENOTCONN; 1479 + } else { 1480 + sock->sk->sk_user_data = queue; 1481 + queue->data_ready = sock->sk->sk_data_ready; 1482 + sock->sk->sk_data_ready = nvmet_tcp_data_ready; 1483 + queue->state_change = sock->sk->sk_state_change; 1484 + sock->sk->sk_state_change = nvmet_tcp_state_change; 1485 + queue->write_space = sock->sk->sk_write_space; 1486 + sock->sk->sk_write_space = nvmet_tcp_write_space; 1487 + queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); 1488 + } 1496 1489 write_unlock_bh(&sock->sk->sk_callback_lock); 1497 1490 1498 - return 0; 1491 + return ret; 1499 1492 } 1500 1493 1501 1494 static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, ··· 1552 1525 ret = nvmet_tcp_set_queue_sock(queue); 1553 1526 if (ret) 1554 1527 goto out_destroy_sq; 1555 - 1556 - queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); 1557 1528 1558 1529 return 0; 1559 1530 out_destroy_sq:
+6 -3
drivers/nvme/target/trace.h
··· 48 48 49 49 static inline void __assign_req_name(char *name, struct nvmet_req *req) 50 50 { 51 - if (req->ns) 52 - strncpy(name, req->ns->device_path, DISK_NAME_LEN); 53 - else 51 + if (!req->ns) { 54 52 memset(name, 0, DISK_NAME_LEN); 53 + return; 54 + } 55 + 56 + strncpy(name, req->ns->device_path, 57 + min_t(size_t, DISK_NAME_LEN, strlen(req->ns->device_path))); 55 58 } 56 59 #endif 57 60
+12
include/linux/blk-mq.h
··· 490 490 return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE; 491 491 } 492 492 493 + /* 494 + * 495 + * Set the state to complete when completing a request from inside ->queue_rq. 496 + * This is used by drivers that want to ensure special complete actions that 497 + * need access to the request are called on failure, e.g. by nvme for 498 + * multipathing. 499 + */ 500 + static inline void blk_mq_set_request_complete(struct request *rq) 501 + { 502 + WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); 503 + } 504 + 493 505 void blk_mq_start_request(struct request *rq); 494 506 void blk_mq_end_request(struct request *rq, blk_status_t error); 495 507 void __blk_mq_end_request(struct request *rq, blk_status_t error);
+25 -5
include/linux/nvme.h
··· 697 697 nvme_opcode_name(nvme_cmd_resv_register), \ 698 698 nvme_opcode_name(nvme_cmd_resv_report), \ 699 699 nvme_opcode_name(nvme_cmd_resv_acquire), \ 700 - nvme_opcode_name(nvme_cmd_resv_release)) 700 + nvme_opcode_name(nvme_cmd_resv_release), \ 701 + nvme_opcode_name(nvme_cmd_zone_mgmt_send), \ 702 + nvme_opcode_name(nvme_cmd_zone_mgmt_recv), \ 703 + nvme_opcode_name(nvme_cmd_zone_append)) 704 + 701 705 702 706 703 707 /* ··· 1477 1473 NVME_SC_SGL_INVALID_DATA = 0xf, 1478 1474 NVME_SC_SGL_INVALID_METADATA = 0x10, 1479 1475 NVME_SC_SGL_INVALID_TYPE = 0x11, 1480 - 1476 + NVME_SC_CMB_INVALID_USE = 0x12, 1477 + NVME_SC_PRP_INVALID_OFFSET = 0x13, 1478 + NVME_SC_ATOMIC_WU_EXCEEDED = 0x14, 1479 + NVME_SC_OP_DENIED = 0x15, 1481 1480 NVME_SC_SGL_INVALID_OFFSET = 0x16, 1482 - NVME_SC_SGL_INVALID_SUBTYPE = 0x17, 1483 - 1481 + NVME_SC_RESERVED = 0x17, 1482 + NVME_SC_HOST_ID_INCONSIST = 0x18, 1483 + NVME_SC_KA_TIMEOUT_EXPIRED = 0x19, 1484 + NVME_SC_KA_TIMEOUT_INVALID = 0x1A, 1485 + NVME_SC_ABORTED_PREEMPT_ABORT = 0x1B, 1484 1486 NVME_SC_SANITIZE_FAILED = 0x1C, 1485 1487 NVME_SC_SANITIZE_IN_PROGRESS = 0x1D, 1486 - 1488 + NVME_SC_SGL_INVALID_GRANULARITY = 0x1E, 1489 + NVME_SC_CMD_NOT_SUP_CMB_QUEUE = 0x1F, 1487 1490 NVME_SC_NS_WRITE_PROTECTED = 0x20, 1488 1491 NVME_SC_CMD_INTERRUPTED = 0x21, 1492 + NVME_SC_TRANSIENT_TR_ERR = 0x22, 1489 1493 1490 1494 NVME_SC_LBA_RANGE = 0x80, 1491 1495 NVME_SC_CAP_EXCEEDED = 0x81, 1492 1496 NVME_SC_NS_NOT_READY = 0x82, 1493 1497 NVME_SC_RESERVATION_CONFLICT = 0x83, 1498 + NVME_SC_FORMAT_IN_PROGRESS = 0x84, 1494 1499 1495 1500 /* 1496 1501 * Command Specific Status: ··· 1532 1519 NVME_SC_NS_NOT_ATTACHED = 0x11a, 1533 1520 NVME_SC_THIN_PROV_NOT_SUPP = 0x11b, 1534 1521 NVME_SC_CTRL_LIST_INVALID = 0x11c, 1522 + NVME_SC_SELT_TEST_IN_PROGRESS = 0x11d, 1535 1523 NVME_SC_BP_WRITE_PROHIBITED = 0x11e, 1524 + NVME_SC_CTRL_ID_INVALID = 0x11f, 1525 + NVME_SC_SEC_CTRL_STATE_INVALID = 0x120, 1526 + NVME_SC_CTRL_RES_NUM_INVALID = 0x121, 1527 + NVME_SC_RES_ID_INVALID = 0x122, 1536 1528 NVME_SC_PMR_SAN_PROHIBITED = 0x123, 1529 + NVME_SC_ANA_GROUP_ID_INVALID = 0x124, 1530 + NVME_SC_ANA_ATTACH_FAILED = 0x125, 1537 1531 1538 1532 /* 1539 1533 * I/O Command Set Specific - NVM commands: