Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
"Three small fixes, all in drivers, and one sizeable update to the UFS
driver to remove the HPB 2.0 feature that has been objected to by Jens
and Christoph.

Although the UFS patch is large and last minute, it's essentially the
least intrusive way of resolving the objections in time for the 5.15
release"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
scsi: ufs: ufshpb: Remove HPB2.0 flows
scsi: mpt3sas: Fix reference tag handling for WRITE_INSERT
scsi: ufs: ufs-exynos: Correct timeout value setting registers
scsi: ibmvfc: Fix up duplicate response detection

Changed files
+15 -295
drivers
+2 -1
drivers/scsi/ibmvscsi/ibmvfc.c
··· 1696 1696 1697 1697 spin_lock_irqsave(&evt->queue->l_lock, flags); 1698 1698 list_add_tail(&evt->queue_list, &evt->queue->sent); 1699 + atomic_set(&evt->active, 1); 1699 1700 1700 1701 mb(); 1701 1702 ··· 1711 1710 be64_to_cpu(crq_as_u64[1])); 1712 1711 1713 1712 if (rc) { 1713 + atomic_set(&evt->active, 0); 1714 1714 list_del(&evt->queue_list); 1715 1715 spin_unlock_irqrestore(&evt->queue->l_lock, flags); 1716 1716 del_timer(&evt->timer); ··· 1739 1737 1740 1738 evt->done(evt); 1741 1739 } else { 1742 - atomic_set(&evt->active, 1); 1743 1740 spin_unlock_irqrestore(&evt->queue->l_lock, flags); 1744 1741 ibmvfc_trc_start(evt); 1745 1742 }
+6 -3
drivers/scsi/mpt3sas/mpt3sas_scsih.c
··· 5065 5065 if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK) 5066 5066 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; 5067 5067 5068 - if (scmd->prot_flags & SCSI_PROT_REF_CHECK) { 5069 - eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 5070 - MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG; 5068 + if (scmd->prot_flags & SCSI_PROT_REF_CHECK) 5069 + eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG; 5070 + 5071 + if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) { 5072 + eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG; 5073 + 5071 5074 mpi_request->CDB.EEDP32.PrimaryReferenceTag = 5072 5075 cpu_to_be32(scsi_prot_ref_tag(scmd)); 5073 5076 }
+3 -3
drivers/scsi/ufs/ufs-exynos.c
··· 642 642 } 643 643 644 644 /* setting for three timeout values for traffic class #0 */ 645 - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 8064); 646 - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 28224); 647 - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 20160); 645 + ufshcd_dme_set(hba, UIC_ARG_MIB(DL_FC0PROTTIMEOUTVAL), 8064); 646 + ufshcd_dme_set(hba, UIC_ARG_MIB(DL_TC0REPLAYTIMEOUTVAL), 28224); 647 + ufshcd_dme_set(hba, UIC_ARG_MIB(DL_AFC0REQTIMEOUTVAL), 20160); 648 648 649 649 return 0; 650 650 out:
+1 -6
drivers/scsi/ufs/ufshcd.c
··· 2737 2737 2738 2738 lrbp->req_abort_skip = false; 2739 2739 2740 - err = ufshpb_prep(hba, lrbp); 2741 - if (err == -EAGAIN) { 2742 - lrbp->cmd = NULL; 2743 - ufshcd_release(hba); 2744 - goto out; 2745 - } 2740 + ufshpb_prep(hba, lrbp); 2746 2741 2747 2742 ufshcd_comp_scsi_upiu(hba, lrbp); 2748 2743
+3 -280
drivers/scsi/ufs/ufshpb.c
··· 84 84 return transfer_len <= hpb->pre_req_max_tr_len; 85 85 } 86 86 87 - /* 88 - * In this driver, WRITE_BUFFER CMD support 36KB (len=9) ~ 1MB (len=256) as 89 - * default. It is possible to change range of transfer_len through sysfs. 90 - */ 91 - static inline bool ufshpb_is_required_wb(struct ufshpb_lu *hpb, int len) 92 - { 93 - return len > hpb->pre_req_min_tr_len && 94 - len <= hpb->pre_req_max_tr_len; 95 - } 96 - 97 87 static bool ufshpb_is_general_lun(int lun) 98 88 { 99 89 return lun < UFS_UPIU_MAX_UNIT_NUM_ID; ··· 324 334 325 335 static void 326 336 ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, 327 - __be64 ppn, u8 transfer_len, int read_id) 337 + __be64 ppn, u8 transfer_len) 328 338 { 329 339 unsigned char *cdb = lrbp->cmd->cmnd; 330 340 __be64 ppn_tmp = ppn; ··· 336 346 /* ppn value is stored as big-endian in the host memory */ 337 347 memcpy(&cdb[6], &ppn_tmp, sizeof(__be64)); 338 348 cdb[14] = transfer_len; 339 - cdb[15] = read_id; 349 + cdb[15] = 0; 340 350 341 351 lrbp->cmd->cmd_len = UFS_CDB_SIZE; 342 - } 343 - 344 - static inline void ufshpb_set_write_buf_cmd(unsigned char *cdb, 345 - unsigned long lpn, unsigned int len, 346 - int read_id) 347 - { 348 - cdb[0] = UFSHPB_WRITE_BUFFER; 349 - cdb[1] = UFSHPB_WRITE_BUFFER_PREFETCH_ID; 350 - 351 - put_unaligned_be32(lpn, &cdb[2]); 352 - cdb[6] = read_id; 353 - put_unaligned_be16(len * HPB_ENTRY_SIZE, &cdb[7]); 354 - 355 - cdb[9] = 0x00; /* Control = 0x00 */ 356 - } 357 - 358 - static struct ufshpb_req *ufshpb_get_pre_req(struct ufshpb_lu *hpb) 359 - { 360 - struct ufshpb_req *pre_req; 361 - 362 - if (hpb->num_inflight_pre_req >= hpb->throttle_pre_req) { 363 - dev_info(&hpb->sdev_ufs_lu->sdev_dev, 364 - "pre_req throttle. inflight %d throttle %d", 365 - hpb->num_inflight_pre_req, hpb->throttle_pre_req); 366 - return NULL; 367 - } 368 - 369 - pre_req = list_first_entry_or_null(&hpb->lh_pre_req_free, 370 - struct ufshpb_req, list_req); 371 - if (!pre_req) { 372 - dev_info(&hpb->sdev_ufs_lu->sdev_dev, "There is no pre_req"); 373 - return NULL; 374 - } 375 - 376 - list_del_init(&pre_req->list_req); 377 - hpb->num_inflight_pre_req++; 378 - 379 - return pre_req; 380 - } 381 - 382 - static inline void ufshpb_put_pre_req(struct ufshpb_lu *hpb, 383 - struct ufshpb_req *pre_req) 384 - { 385 - pre_req->req = NULL; 386 - bio_reset(pre_req->bio); 387 - list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free); 388 - hpb->num_inflight_pre_req--; 389 - } 390 - 391 - static void ufshpb_pre_req_compl_fn(struct request *req, blk_status_t error) 392 - { 393 - struct ufshpb_req *pre_req = (struct ufshpb_req *)req->end_io_data; 394 - struct ufshpb_lu *hpb = pre_req->hpb; 395 - unsigned long flags; 396 - 397 - if (error) { 398 - struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); 399 - struct scsi_sense_hdr sshdr; 400 - 401 - dev_err(&hpb->sdev_ufs_lu->sdev_dev, "block status %d", error); 402 - scsi_command_normalize_sense(cmd, &sshdr); 403 - dev_err(&hpb->sdev_ufs_lu->sdev_dev, 404 - "code %x sense_key %x asc %x ascq %x", 405 - sshdr.response_code, 406 - sshdr.sense_key, sshdr.asc, sshdr.ascq); 407 - dev_err(&hpb->sdev_ufs_lu->sdev_dev, 408 - "byte4 %x byte5 %x byte6 %x additional_len %x", 409 - sshdr.byte4, sshdr.byte5, 410 - sshdr.byte6, sshdr.additional_length); 411 - } 412 - 413 - blk_mq_free_request(req); 414 - spin_lock_irqsave(&hpb->rgn_state_lock, flags); 415 - ufshpb_put_pre_req(pre_req->hpb, pre_req); 416 - spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); 417 - } 418 - 419 - static int ufshpb_prep_entry(struct ufshpb_req *pre_req, struct page *page) 420 - { 421 - struct ufshpb_lu *hpb = pre_req->hpb; 422 - struct ufshpb_region *rgn; 423 - struct ufshpb_subregion *srgn; 424 - __be64 *addr; 425 - int offset = 0; 426 - int copied; 427 - unsigned long lpn = pre_req->wb.lpn; 428 - int rgn_idx, srgn_idx, srgn_offset; 429 - unsigned long flags; 430 - 431 - addr = page_address(page); 432 - ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset); 433 - 434 - spin_lock_irqsave(&hpb->rgn_state_lock, flags); 435 - 436 - next_offset: 437 - rgn = hpb->rgn_tbl + rgn_idx; 438 - srgn = rgn->srgn_tbl + srgn_idx; 439 - 440 - if (!ufshpb_is_valid_srgn(rgn, srgn)) 441 - goto mctx_error; 442 - 443 - if (!srgn->mctx) 444 - goto mctx_error; 445 - 446 - copied = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 447 - pre_req->wb.len - offset, 448 - &addr[offset]); 449 - 450 - if (copied < 0) 451 - goto mctx_error; 452 - 453 - offset += copied; 454 - srgn_offset += copied; 455 - 456 - if (srgn_offset == hpb->entries_per_srgn) { 457 - srgn_offset = 0; 458 - 459 - if (++srgn_idx == hpb->srgns_per_rgn) { 460 - srgn_idx = 0; 461 - rgn_idx++; 462 - } 463 - } 464 - 465 - if (offset < pre_req->wb.len) 466 - goto next_offset; 467 - 468 - spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); 469 - return 0; 470 - mctx_error: 471 - spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); 472 - return -ENOMEM; 473 - } 474 - 475 - static int ufshpb_pre_req_add_bio_page(struct ufshpb_lu *hpb, 476 - struct request_queue *q, 477 - struct ufshpb_req *pre_req) 478 - { 479 - struct page *page = pre_req->wb.m_page; 480 - struct bio *bio = pre_req->bio; 481 - int entries_bytes, ret; 482 - 483 - if (!page) 484 - return -ENOMEM; 485 - 486 - if (ufshpb_prep_entry(pre_req, page)) 487 - return -ENOMEM; 488 - 489 - entries_bytes = pre_req->wb.len * sizeof(__be64); 490 - 491 - ret = bio_add_pc_page(q, bio, page, entries_bytes, 0); 492 - if (ret != entries_bytes) { 493 - dev_err(&hpb->sdev_ufs_lu->sdev_dev, 494 - "bio_add_pc_page fail: %d", ret); 495 - return -ENOMEM; 496 - } 497 - return 0; 498 - } 499 - 500 - static inline int ufshpb_get_read_id(struct ufshpb_lu *hpb) 501 - { 502 - if (++hpb->cur_read_id >= MAX_HPB_READ_ID) 503 - hpb->cur_read_id = 1; 504 - return hpb->cur_read_id; 505 - } 506 - 507 - static int ufshpb_execute_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd, 508 - struct ufshpb_req *pre_req, int read_id) 509 - { 510 - struct scsi_device *sdev = cmd->device; 511 - struct request_queue *q = sdev->request_queue; 512 - struct request *req; 513 - struct scsi_request *rq; 514 - struct bio *bio = pre_req->bio; 515 - 516 - pre_req->hpb = hpb; 517 - pre_req->wb.lpn = sectors_to_logical(cmd->device, 518 - blk_rq_pos(scsi_cmd_to_rq(cmd))); 519 - pre_req->wb.len = sectors_to_logical(cmd->device, 520 - blk_rq_sectors(scsi_cmd_to_rq(cmd))); 521 - if (ufshpb_pre_req_add_bio_page(hpb, q, pre_req)) 522 - return -ENOMEM; 523 - 524 - req = pre_req->req; 525 - 526 - /* 1. request setup */ 527 - blk_rq_append_bio(req, bio); 528 - req->rq_disk = NULL; 529 - req->end_io_data = (void *)pre_req; 530 - req->end_io = ufshpb_pre_req_compl_fn; 531 - 532 - /* 2. scsi_request setup */ 533 - rq = scsi_req(req); 534 - rq->retries = 1; 535 - 536 - ufshpb_set_write_buf_cmd(rq->cmd, pre_req->wb.lpn, pre_req->wb.len, 537 - read_id); 538 - rq->cmd_len = scsi_command_size(rq->cmd); 539 - 540 - if (blk_insert_cloned_request(q, req) != BLK_STS_OK) 541 - return -EAGAIN; 542 - 543 - hpb->stats.pre_req_cnt++; 544 - 545 - return 0; 546 - } 547 - 548 - static int ufshpb_issue_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd, 549 - int *read_id) 550 - { 551 - struct ufshpb_req *pre_req; 552 - struct request *req = NULL; 553 - unsigned long flags; 554 - int _read_id; 555 - int ret = 0; 556 - 557 - req = blk_get_request(cmd->device->request_queue, 558 - REQ_OP_DRV_OUT | REQ_SYNC, BLK_MQ_REQ_NOWAIT); 559 - if (IS_ERR(req)) 560 - return -EAGAIN; 561 - 562 - spin_lock_irqsave(&hpb->rgn_state_lock, flags); 563 - pre_req = ufshpb_get_pre_req(hpb); 564 - if (!pre_req) { 565 - ret = -EAGAIN; 566 - goto unlock_out; 567 - } 568 - _read_id = ufshpb_get_read_id(hpb); 569 - spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); 570 - 571 - pre_req->req = req; 572 - 573 - ret = ufshpb_execute_pre_req(hpb, cmd, pre_req, _read_id); 574 - if (ret) 575 - goto free_pre_req; 576 - 577 - *read_id = _read_id; 578 - 579 - return ret; 580 - free_pre_req: 581 - spin_lock_irqsave(&hpb->rgn_state_lock, flags); 582 - ufshpb_put_pre_req(hpb, pre_req); 583 - unlock_out: 584 - spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); 585 - blk_put_request(req); 586 - return ret; 587 352 } 588 353 589 354 /* ··· 354 609 __be64 ppn; 355 610 unsigned long flags; 356 611 int transfer_len, rgn_idx, srgn_idx, srgn_offset; 357 - int read_id = 0; 358 612 int err = 0; 359 613 360 614 hpb = ufshpb_get_hpb_data(cmd->device); ··· 429 685 dev_err(hba->dev, "get ppn failed. err %d\n", err); 430 686 return err; 431 687 } 432 - if (!ufshpb_is_legacy(hba) && 433 - ufshpb_is_required_wb(hpb, transfer_len)) { 434 - err = ufshpb_issue_pre_req(hpb, cmd, &read_id); 435 - if (err) { 436 - unsigned long timeout; 437 688 438 - timeout = cmd->jiffies_at_alloc + msecs_to_jiffies( 439 - hpb->params.requeue_timeout_ms); 440 - 441 - if (time_before(jiffies, timeout)) 442 - return -EAGAIN; 443 - 444 - hpb->stats.miss_cnt++; 445 - return 0; 446 - } 447 - } 448 - 449 - ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len, read_id); 689 + ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len); 450 690 451 691 hpb->stats.hit_cnt++; 452 692 return 0; ··· 1569 1841 u32 entries_per_rgn; 1570 1842 u64 rgn_mem_size, tmp; 1571 1843 1572 - /* for pre_req */ 1573 - hpb->pre_req_min_tr_len = hpb_dev_info->max_hpb_single_cmd + 1; 1574 - 1575 1844 if (ufshpb_is_legacy(hba)) 1576 1845 hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH; 1577 1846 else 1578 1847 hpb->pre_req_max_tr_len = HPB_MULTI_CHUNK_HIGH; 1579 - 1580 - hpb->cur_read_id = 0; 1581 1848 1582 1849 hpb->lu_pinned_start = hpb_lu_info->pinned_start; 1583 1850 hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
-2
drivers/scsi/ufs/ufshpb.h
··· 241 241 spinlock_t param_lock; 242 242 243 243 struct list_head lh_pre_req_free; 244 - int cur_read_id; 245 - int pre_req_min_tr_len; 246 244 int pre_req_max_tr_len; 247 245 248 246 /* cached L2P map management worker */