Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sx8: use a per-host tag_set

The current sx8 code spends a lot of effort dealing with the fact that
tags are per-host, but there might be multiple queues. Now that the
driver has been converted to blk-mq it can take care of the blk-mq
tag_set concept that has been designed just for that.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
72d7ce8e cd94c9ed

+95 -248
+95 -248
drivers/block/sx8.c
··· 243 243 unsigned int port_no; 244 244 struct gendisk *disk; 245 245 struct carm_host *host; 246 - struct blk_mq_tag_set tag_set; 247 246 248 247 /* attached device characteristics */ 249 248 u64 capacity; ··· 253 254 }; 254 255 255 256 struct carm_request { 256 - unsigned int tag; 257 257 int n_elem; 258 258 unsigned int msg_type; 259 259 unsigned int msg_subtype; 260 260 unsigned int msg_bucket; 261 - struct request *rq; 262 - struct carm_port *port; 263 261 struct scatterlist sg[CARM_MAX_REQ_SG]; 264 262 }; 265 263 ··· 287 291 unsigned int wait_q_cons; 288 292 struct request_queue *wait_q[CARM_MAX_WAIT_Q]; 289 293 290 - unsigned int n_msgs; 291 - u64 msg_alloc; 292 - struct carm_request req[CARM_MAX_REQ]; 293 294 void *msg_base; 294 295 dma_addr_t msg_dma; 295 296 ··· 471 478 } 472 479 473 480 static int carm_send_msg(struct carm_host *host, 474 - struct carm_request *crq) 481 + struct carm_request *crq, unsigned tag) 475 482 { 476 483 void __iomem *mmio = host->mmio; 477 - u32 msg = (u32) carm_ref_msg_dma(host, crq->tag); 484 + u32 msg = (u32) carm_ref_msg_dma(host, tag); 478 485 u32 cm_bucket = crq->msg_bucket; 479 486 u32 tmp; 480 487 int rc = 0; ··· 499 506 return rc; 500 507 } 501 508 502 - static struct carm_request *carm_get_request(struct carm_host *host) 503 - { 504 - unsigned int i; 505 - 506 - /* obey global hardware limit on S/G entries */ 507 - if (host->hw_sg_used >= (CARM_MAX_HOST_SG - CARM_MAX_REQ_SG)) 508 - return NULL; 509 - 510 - for (i = 0; i < max_queue; i++) 511 - if ((host->msg_alloc & (1ULL << i)) == 0) { 512 - struct carm_request *crq = &host->req[i]; 513 - crq->port = NULL; 514 - crq->n_elem = 0; 515 - 516 - host->msg_alloc |= (1ULL << i); 517 - host->n_msgs++; 518 - 519 - assert(host->n_msgs <= CARM_MAX_REQ); 520 - sg_init_table(crq->sg, CARM_MAX_REQ_SG); 521 - return crq; 522 - } 523 - 524 - DPRINTK("no request available, returning NULL\n"); 525 - return NULL; 526 - } 527 - 528 - static int carm_put_request(struct carm_host *host, struct carm_request *crq) 529 - { 530 - assert(crq->tag < max_queue); 531 - 532 - if (unlikely((host->msg_alloc & (1ULL << crq->tag)) == 0)) 533 - return -EINVAL; /* tried to clear a tag that was not active */ 534 - 535 - assert(host->hw_sg_used >= crq->n_elem); 536 - 537 - host->msg_alloc &= ~(1ULL << crq->tag); 538 - host->hw_sg_used -= crq->n_elem; 539 - host->n_msgs--; 540 - 541 - return 0; 542 - } 543 - 544 - static struct carm_request *carm_get_special(struct carm_host *host) 545 - { 546 - unsigned long flags; 547 - struct carm_request *crq = NULL; 548 - struct request *rq; 549 - int tries = 5000; 550 - 551 - while (tries-- > 0) { 552 - spin_lock_irqsave(&host->lock, flags); 553 - crq = carm_get_request(host); 554 - spin_unlock_irqrestore(&host->lock, flags); 555 - 556 - if (crq) 557 - break; 558 - msleep(10); 559 - } 560 - 561 - if (!crq) 562 - return NULL; 563 - 564 - rq = blk_get_request(host->oob_q, REQ_OP_DRV_OUT, 0); 565 - if (IS_ERR(rq)) { 566 - spin_lock_irqsave(&host->lock, flags); 567 - carm_put_request(host, crq); 568 - spin_unlock_irqrestore(&host->lock, flags); 569 - return NULL; 570 - } 571 - 572 - crq->rq = rq; 573 - return crq; 574 - } 575 - 576 509 static int carm_array_info (struct carm_host *host, unsigned int array_idx) 577 510 { 578 511 struct carm_msg_ioctl *ioc; 579 - unsigned int idx; 580 512 u32 msg_data; 581 513 dma_addr_t msg_dma; 582 514 struct carm_request *crq; 515 + struct request *rq; 583 516 int rc; 584 517 585 - crq = carm_get_special(host); 586 - if (!crq) { 518 + rq = blk_mq_alloc_request(host->oob_q, REQ_OP_DRV_OUT, 0); 519 + if (IS_ERR(rq)) { 587 520 rc = -ENOMEM; 588 521 goto err_out; 589 522 } 523 + crq = blk_mq_rq_to_pdu(rq); 590 524 591 - idx = crq->tag; 592 - 593 - ioc = carm_ref_msg(host, idx); 594 - msg_dma = carm_ref_msg_dma(host, idx); 525 + ioc = carm_ref_msg(host, rq->tag); 526 + msg_dma = carm_ref_msg_dma(host, rq->tag); 595 527 msg_data = (u32) (msg_dma + sizeof(struct carm_array_info)); 596 528 597 529 crq->msg_type = CARM_MSG_ARRAY; ··· 530 612 ioc->type = CARM_MSG_ARRAY; 531 613 ioc->subtype = CARM_ARRAY_INFO; 532 614 ioc->array_id = (u8) array_idx; 533 - ioc->handle = cpu_to_le32(TAG_ENCODE(idx)); 615 + ioc->handle = cpu_to_le32(TAG_ENCODE(rq->tag)); 534 616 ioc->data_addr = cpu_to_le32(msg_data); 535 617 536 618 spin_lock_irq(&host->lock); ··· 538 620 host->state == HST_DEV_SCAN); 539 621 spin_unlock_irq(&host->lock); 540 622 541 - DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx); 542 - crq->rq->special = crq; 543 - blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL); 623 + DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag); 624 + blk_execute_rq_nowait(host->oob_q, NULL, rq, true, NULL); 544 625 545 626 return 0; 546 627 ··· 554 637 555 638 static int carm_send_special (struct carm_host *host, carm_sspc_t func) 556 639 { 640 + struct request *rq; 557 641 struct carm_request *crq; 558 642 struct carm_msg_ioctl *ioc; 559 643 void *mem; 560 - unsigned int idx, msg_size; 644 + unsigned int msg_size; 561 645 int rc; 562 646 563 - crq = carm_get_special(host); 564 - if (!crq) 647 + rq = blk_mq_alloc_request(host->oob_q, REQ_OP_DRV_OUT, 0); 648 + if (IS_ERR(rq)) 565 649 return -ENOMEM; 650 + crq = blk_mq_rq_to_pdu(rq); 566 651 567 - idx = crq->tag; 652 + mem = carm_ref_msg(host, rq->tag); 568 653 569 - mem = carm_ref_msg(host, idx); 570 - 571 - msg_size = func(host, idx, mem); 654 + msg_size = func(host, rq->tag, mem); 572 655 573 656 ioc = mem; 574 657 crq->msg_type = ioc->type; ··· 577 660 BUG_ON(rc < 0); 578 661 crq->msg_bucket = (u32) rc; 579 662 580 - DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx); 581 - crq->rq->special = crq; 582 - blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL); 663 + DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag); 664 + blk_execute_rq_nowait(host->oob_q, NULL, rq, true, NULL); 583 665 584 666 return 0; 585 667 } ··· 660 744 sizeof(struct carm_fw_ver); 661 745 } 662 746 663 - static inline void carm_end_request_queued(struct carm_host *host, 664 - struct carm_request *crq, 665 - blk_status_t error) 666 - { 667 - struct request *req = crq->rq; 668 - int rc; 669 - 670 - blk_mq_end_request(req, error); 671 - 672 - rc = carm_put_request(host, crq); 673 - assert(rc == 0); 674 - } 675 - 676 747 static inline void carm_push_q (struct carm_host *host, struct request_queue *q) 677 748 { 678 749 unsigned int idx = host->wait_q_prod % CARM_MAX_WAIT_Q; ··· 694 791 } 695 792 } 696 793 697 - static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq, 698 - blk_status_t error) 794 + static inline enum dma_data_direction carm_rq_dir(struct request *rq) 699 795 { 700 - carm_end_request_queued(host, crq, error); 701 - if (max_queue == 1) 702 - carm_round_robin(host); 703 - else if ((host->n_msgs <= CARM_MSG_LOW_WATER) && 704 - (host->hw_sg_used <= CARM_SG_LOW_WATER)) { 705 - carm_round_robin(host); 706 - } 707 - } 708 - 709 - static blk_status_t carm_oob_queue_rq(struct blk_mq_hw_ctx *hctx, 710 - const struct blk_mq_queue_data *bd) 711 - { 712 - struct request_queue *q = hctx->queue; 713 - struct carm_host *host = q->queuedata; 714 - struct carm_request *crq; 715 - int rc; 716 - 717 - blk_mq_start_request(bd->rq); 718 - 719 - spin_lock_irq(&host->lock); 720 - 721 - crq = bd->rq->special; 722 - assert(crq != NULL); 723 - assert(crq->rq == bd->rq); 724 - 725 - crq->n_elem = 0; 726 - 727 - DPRINTK("send req\n"); 728 - rc = carm_send_msg(host, crq); 729 - if (rc) { 730 - carm_push_q(host, q); 731 - spin_unlock_irq(&host->lock); 732 - return BLK_STS_DEV_RESOURCE; 733 - } 734 - 735 - spin_unlock_irq(&host->lock); 736 - return BLK_STS_OK; 796 + return op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 737 797 } 738 798 739 799 static blk_status_t carm_queue_rq(struct blk_mq_hw_ctx *hctx, 740 800 const struct blk_mq_queue_data *bd) 741 801 { 742 802 struct request_queue *q = hctx->queue; 803 + struct request *rq = bd->rq; 743 804 struct carm_port *port = q->queuedata; 744 805 struct carm_host *host = port->host; 806 + struct carm_request *crq = blk_mq_rq_to_pdu(rq); 745 807 struct carm_msg_rw *msg; 746 - struct carm_request *crq; 747 - struct request *rq = bd->rq; 748 808 struct scatterlist *sg; 749 - int writing = 0, pci_dir, i, n_elem, rc; 750 - u32 tmp; 809 + int i, n_elem = 0, rc; 751 810 unsigned int msg_size; 811 + u32 tmp; 812 + 813 + crq->n_elem = 0; 814 + sg_init_table(crq->sg, CARM_MAX_REQ_SG); 752 815 753 816 blk_mq_start_request(rq); 754 817 755 818 spin_lock_irq(&host->lock); 756 - 757 - crq = carm_get_request(host); 758 - if (!crq) { 759 - carm_push_q(host, q); 760 - spin_unlock_irq(&host->lock); 761 - return BLK_STS_DEV_RESOURCE; 762 - } 763 - crq->rq = rq; 764 - 765 - if (rq_data_dir(rq) == WRITE) { 766 - writing = 1; 767 - pci_dir = DMA_TO_DEVICE; 768 - } else { 769 - pci_dir = DMA_FROM_DEVICE; 770 - } 819 + if (req_op(rq) == REQ_OP_DRV_OUT) 820 + goto send_msg; 771 821 772 822 /* get scatterlist from block layer */ 773 823 sg = &crq->sg[0]; 774 824 n_elem = blk_rq_map_sg(q, rq, sg); 775 - if (n_elem <= 0) { 776 - /* request with no s/g entries? */ 777 - carm_end_rq(host, crq, BLK_STS_IOERR); 778 - spin_unlock_irq(&host->lock); 779 - return BLK_STS_IOERR; 780 - } 825 + if (n_elem <= 0) 826 + goto out_ioerr; 781 827 782 828 /* map scatterlist to PCI bus addresses */ 783 - n_elem = dma_map_sg(&host->pdev->dev, sg, n_elem, pci_dir); 784 - if (n_elem <= 0) { 785 - /* request with no s/g entries? */ 786 - carm_end_rq(host, crq, BLK_STS_IOERR); 787 - spin_unlock_irq(&host->lock); 788 - return BLK_STS_IOERR; 789 - } 829 + n_elem = dma_map_sg(&host->pdev->dev, sg, n_elem, carm_rq_dir(rq)); 830 + if (n_elem <= 0) 831 + goto out_ioerr; 832 + 833 + /* obey global hardware limit on S/G entries */ 834 + if (host->hw_sg_used >= CARM_MAX_HOST_SG - n_elem) 835 + goto out_resource; 836 + 790 837 crq->n_elem = n_elem; 791 - crq->port = port; 792 838 host->hw_sg_used += n_elem; 793 839 794 840 /* ··· 745 893 */ 746 894 747 895 VPRINTK("build msg\n"); 748 - msg = (struct carm_msg_rw *) carm_ref_msg(host, crq->tag); 896 + msg = (struct carm_msg_rw *) carm_ref_msg(host, rq->tag); 749 897 750 - if (writing) { 898 + if (rq_data_dir(rq) == WRITE) { 751 899 msg->type = CARM_MSG_WRITE; 752 900 crq->msg_type = CARM_MSG_WRITE; 753 901 } else { ··· 758 906 msg->id = port->port_no; 759 907 msg->sg_count = n_elem; 760 908 msg->sg_type = SGT_32BIT; 761 - msg->handle = cpu_to_le32(TAG_ENCODE(crq->tag)); 909 + msg->handle = cpu_to_le32(TAG_ENCODE(rq->tag)); 762 910 msg->lba = cpu_to_le32(blk_rq_pos(rq) & 0xffffffff); 763 911 tmp = (blk_rq_pos(rq) >> 16) >> 16; 764 912 msg->lba_high = cpu_to_le16( (u16) tmp ); ··· 775 923 rc = carm_lookup_bucket(msg_size); 776 924 BUG_ON(rc < 0); 777 925 crq->msg_bucket = (u32) rc; 778 - 926 + send_msg: 779 927 /* 780 928 * queue read/write message to hardware 781 929 */ 782 - 783 - VPRINTK("send msg, tag == %u\n", crq->tag); 784 - rc = carm_send_msg(host, crq); 930 + VPRINTK("send msg, tag == %u\n", rq->tag); 931 + rc = carm_send_msg(host, crq, rq->tag); 785 932 if (rc) { 786 - carm_put_request(host, crq); 787 - carm_push_q(host, q); 788 - spin_unlock_irq(&host->lock); 789 - return BLK_STS_DEV_RESOURCE; 933 + host->hw_sg_used -= n_elem; 934 + goto out_resource; 790 935 } 791 936 792 937 spin_unlock_irq(&host->lock); 793 938 return BLK_STS_OK; 939 + out_resource: 940 + dma_unmap_sg(&host->pdev->dev, &crq->sg[0], n_elem, carm_rq_dir(rq)); 941 + carm_push_q(host, q); 942 + spin_unlock_irq(&host->lock); 943 + return BLK_STS_DEV_RESOURCE; 944 + out_ioerr: 945 + carm_round_robin(host); 946 + spin_unlock_irq(&host->lock); 947 + return BLK_STS_IOERR; 794 948 } 795 949 796 950 static void carm_handle_array_info(struct carm_host *host, ··· 811 953 size_t slen; 812 954 813 955 DPRINTK("ENTER\n"); 814 - 815 - carm_end_rq(host, crq, error); 816 956 817 957 if (error) 818 958 goto out; ··· 867 1011 868 1012 DPRINTK("ENTER\n"); 869 1013 870 - carm_end_rq(host, crq, error); 871 - 872 1014 if (error) { 873 1015 new_state = HST_ERROR; 874 1016 goto out; ··· 894 1040 { 895 1041 DPRINTK("ENTER\n"); 896 1042 897 - carm_end_rq(host, crq, error); 898 - 899 1043 assert(host->state == cur_state); 900 1044 if (error) 901 1045 host->state = HST_ERROR; ··· 902 1050 schedule_work(&host->fsm_task); 903 1051 } 904 1052 905 - static inline void carm_handle_rw(struct carm_host *host, 906 - struct carm_request *crq, blk_status_t error) 907 - { 908 - int pci_dir; 909 - 910 - VPRINTK("ENTER\n"); 911 - 912 - if (rq_data_dir(crq->rq) == WRITE) 913 - pci_dir = DMA_TO_DEVICE; 914 - else 915 - pci_dir = DMA_FROM_DEVICE; 916 - 917 - dma_unmap_sg(&host->pdev->dev, &crq->sg[0], crq->n_elem, pci_dir); 918 - 919 - carm_end_rq(host, crq, error); 920 - } 921 - 922 1053 static inline void carm_handle_resp(struct carm_host *host, 923 1054 __le32 ret_handle_le, u32 status) 924 1055 { 925 1056 u32 handle = le32_to_cpu(ret_handle_le); 926 1057 unsigned int msg_idx; 1058 + struct request *rq; 927 1059 struct carm_request *crq; 928 1060 blk_status_t error = (status == RMSG_OK) ? 0 : BLK_STS_IOERR; 929 1061 u8 *mem; ··· 923 1087 msg_idx = TAG_DECODE(handle); 924 1088 VPRINTK("tag == %u\n", msg_idx); 925 1089 926 - crq = &host->req[msg_idx]; 1090 + rq = blk_mq_tag_to_rq(host->tag_set.tags[0], msg_idx); 1091 + crq = blk_mq_rq_to_pdu(rq); 927 1092 928 1093 /* fast path */ 929 1094 if (likely(crq->msg_type == CARM_MSG_READ || 930 1095 crq->msg_type == CARM_MSG_WRITE)) { 931 - carm_handle_rw(host, crq, error); 932 - return; 1096 + dma_unmap_sg(&host->pdev->dev, &crq->sg[0], crq->n_elem, 1097 + carm_rq_dir(rq)); 1098 + goto done; 933 1099 } 934 1100 935 1101 mem = carm_ref_msg(host, msg_idx); ··· 941 1103 switch (crq->msg_subtype) { 942 1104 case CARM_IOC_SCAN_CHAN: 943 1105 carm_handle_scan_chan(host, crq, mem, error); 944 - break; 1106 + goto done; 945 1107 default: 946 1108 /* unknown / invalid response */ 947 1109 goto err_out; ··· 954 1116 case MISC_ALLOC_MEM: 955 1117 carm_handle_generic(host, crq, error, 956 1118 HST_ALLOC_BUF, HST_SYNC_TIME); 957 - break; 1119 + goto done; 958 1120 case MISC_SET_TIME: 959 1121 carm_handle_generic(host, crq, error, 960 1122 HST_SYNC_TIME, HST_GET_FW_VER); 961 - break; 1123 + goto done; 962 1124 case MISC_GET_FW_VER: { 963 1125 struct carm_fw_ver *ver = (struct carm_fw_ver *) 964 1126 (mem + sizeof(struct carm_msg_get_fw_ver)); ··· 968 1130 } 969 1131 carm_handle_generic(host, crq, error, 970 1132 HST_GET_FW_VER, HST_PORT_SCAN); 971 - break; 1133 + goto done; 972 1134 } 973 1135 default: 974 1136 /* unknown / invalid response */ ··· 999 1161 err_out: 1000 1162 printk(KERN_WARNING DRV_NAME "(%s): BUG: unhandled message type %d/%d\n", 1001 1163 pci_name(host->pdev), crq->msg_type, crq->msg_subtype); 1002 - carm_end_rq(host, crq, BLK_STS_IOERR); 1164 + error = BLK_STS_IOERR; 1165 + done: 1166 + host->hw_sg_used -= crq->n_elem; 1167 + blk_mq_end_request(blk_mq_rq_from_pdu(crq), error); 1168 + 1169 + if (host->hw_sg_used <= CARM_SG_LOW_WATER) 1170 + carm_round_robin(host); 1003 1171 } 1004 1172 1005 1173 static inline void carm_handle_responses(struct carm_host *host) ··· 1335 1491 return 0; 1336 1492 } 1337 1493 1338 - static const struct blk_mq_ops carm_oob_mq_ops = { 1339 - .queue_rq = carm_oob_queue_rq, 1340 - }; 1341 - 1342 1494 static const struct blk_mq_ops carm_mq_ops = { 1343 1495 .queue_rq = carm_queue_rq, 1344 1496 }; ··· 1360 1520 disk->fops = &carm_bd_ops; 1361 1521 disk->private_data = port; 1362 1522 1363 - q = blk_mq_init_sq_queue(&port->tag_set, &carm_mq_ops, 1364 - max_queue, BLK_MQ_F_SHOULD_MERGE); 1523 + q = blk_mq_init_queue(&host->tag_set); 1365 1524 if (IS_ERR(q)) 1366 1525 return PTR_ERR(q); 1367 - disk->queue = q; 1526 + 1368 1527 blk_queue_max_segments(q, CARM_MAX_REQ_SG); 1369 1528 blk_queue_segment_boundary(q, CARM_SG_BOUNDARY); 1370 1529 1371 1530 q->queuedata = port; 1531 + disk->queue = q; 1372 1532 return 0; 1373 1533 } 1374 1534 ··· 1382 1542 1383 1543 if (disk->flags & GENHD_FL_UP) 1384 1544 del_gendisk(disk); 1385 - if (disk->queue) { 1386 - blk_mq_free_tag_set(&port->tag_set); 1545 + if (disk->queue) 1387 1546 blk_cleanup_queue(disk->queue); 1388 - } 1389 1547 put_disk(disk); 1390 1548 } 1391 1549 ··· 1440 1602 INIT_WORK(&host->fsm_task, carm_fsm_task); 1441 1603 init_completion(&host->probe_comp); 1442 1604 1443 - for (i = 0; i < ARRAY_SIZE(host->req); i++) 1444 - host->req[i].tag = i; 1445 - 1446 1605 host->mmio = ioremap(pci_resource_start(pdev, 0), 1447 1606 pci_resource_len(pdev, 0)); 1448 1607 if (!host->mmio) { ··· 1456 1621 goto err_out_iounmap; 1457 1622 } 1458 1623 1459 - q = blk_mq_init_sq_queue(&host->tag_set, &carm_oob_mq_ops, 1, 1460 - BLK_MQ_F_NO_SCHED); 1624 + memset(&host->tag_set, 0, sizeof(host->tag_set)); 1625 + host->tag_set.ops = &carm_mq_ops; 1626 + host->tag_set.cmd_size = sizeof(struct carm_request); 1627 + host->tag_set.nr_hw_queues = 1; 1628 + host->tag_set.nr_maps = 1; 1629 + host->tag_set.queue_depth = max_queue; 1630 + host->tag_set.numa_node = NUMA_NO_NODE; 1631 + host->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 1632 + 1633 + rc = blk_mq_alloc_tag_set(&host->tag_set); 1634 + if (rc) 1635 + goto err_out_dma_free; 1636 + 1637 + q = blk_mq_init_queue(&host->tag_set); 1461 1638 if (IS_ERR(q)) { 1462 - printk(KERN_ERR DRV_NAME "(%s): OOB queue alloc failure\n", 1463 - pci_name(pdev)); 1464 1639 rc = PTR_ERR(q); 1640 + blk_mq_free_tag_set(&host->tag_set); 1465 1641 goto err_out_dma_free; 1466 1642 } 1643 + 1467 1644 host->oob_q = q; 1468 1645 q->queuedata = host; 1469 1646