Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

scsi: core: Add a dma_alignment field to the host and host template

Get drivers out of the business of having to call the block layer DMA
alignment limits helpers themselves.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20240409143748.980206-8-hch@lst.de
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: John Garry <john.g.garry@oracle.com>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

authored by

Christoph Hellwig and committed by
Martin K. Petersen
5b7dfbef 6248d7f7

+42 -53
-6
drivers/firewire/sbp2.c
··· 1500 1500 1501 1501 sdev->allow_restart = 1; 1502 1502 1503 - /* 1504 - * SBP-2 does not require any alignment, but we set it anyway 1505 - * for compatibility with earlier versions of this driver. 1506 - */ 1507 - blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1); 1508 - 1509 1503 if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36) 1510 1504 sdev->inquiry_len = 36; 1511 1505
+1
drivers/message/fusion/mptfc.c
··· 129 129 .sg_tablesize = MPT_SCSI_SG_DEPTH, 130 130 .max_sectors = 8192, 131 131 .cmd_per_lun = 7, 132 + .dma_alignment = 511, 132 133 .shost_groups = mptscsih_host_attr_groups, 133 134 }; 134 135
+1
drivers/message/fusion/mptsas.c
··· 2020 2020 .sg_tablesize = MPT_SCSI_SG_DEPTH, 2021 2021 .max_sectors = 8192, 2022 2022 .cmd_per_lun = 7, 2023 + .dma_alignment = 511, 2023 2024 .shost_groups = mptscsih_host_attr_groups, 2024 2025 .no_write_same = 1, 2025 2026 };
-2
drivers/message/fusion/mptscsih.c
··· 2438 2438 "tagged %d, simple %d\n", 2439 2439 ioc->name,sdev->tagged_supported, sdev->simple_tags)); 2440 2440 2441 - blk_queue_dma_alignment (sdev->request_queue, 512 - 1); 2442 - 2443 2441 return 0; 2444 2442 } 2445 2443
+1
drivers/message/fusion/mptspi.c
··· 843 843 .sg_tablesize = MPT_SCSI_SG_DEPTH, 844 844 .max_sectors = 8192, 845 845 .cmd_per_lun = 7, 846 + .dma_alignment = 511, 846 847 .shost_groups = mptscsih_host_attr_groups, 847 848 }; 848 849
+6
drivers/scsi/hosts.c
··· 478 478 else 479 479 shost->max_segment_size = BLK_MAX_SEGMENT_SIZE; 480 480 481 + /* 32-byte (dword) is a common minimum for HBAs. */ 482 + if (sht->dma_alignment) 483 + shost->dma_alignment = sht->dma_alignment; 484 + else 485 + shost->dma_alignment = 3; 486 + 481 487 /* 482 488 * assume a 4GB boundary, if not set 483 489 */
+1 -1
drivers/scsi/iscsi_tcp.c
··· 943 943 shost->max_id = 0; 944 944 shost->max_channel = 0; 945 945 shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE; 946 + shost->dma_alignment = 0; 946 947 947 948 rc = iscsi_host_get_max_scsi_cmds(shost, cmds_max); 948 949 if (rc < 0) ··· 1066 1065 if (conn->datadgst_en) 1067 1066 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, 1068 1067 sdev->request_queue); 1069 - blk_queue_dma_alignment(sdev->request_queue, 0); 1070 1068 return 0; 1071 1069 } 1072 1070
+3 -3
drivers/scsi/qla2xxx/qla_os.c
··· 1957 1957 scsi_qla_host_t *vha = shost_priv(sdev->host); 1958 1958 struct req_que *req = vha->req; 1959 1959 1960 - if (IS_T10_PI_CAPABLE(vha->hw)) 1961 - blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1962 - 1963 1960 scsi_change_queue_depth(sdev, req->max_q_depth); 1964 1961 return 0; 1965 1962 } ··· 3571 3574 host->sg_tablesize = (ha->mr.extended_io_enabled) ? 3572 3575 QLA_SG_ALL : 128; 3573 3576 } 3577 + 3578 + if (IS_T10_PI_CAPABLE(base_vha->hw)) 3579 + host->dma_alignment = 0x7; 3574 3580 3575 3581 ret = scsi_add_host(host, &pdev->dev); 3576 3582 if (ret)
+2 -9
drivers/scsi/scsi_lib.c
··· 1985 1985 lim->seg_boundary_mask = shost->dma_boundary; 1986 1986 lim->max_segment_size = shost->max_segment_size; 1987 1987 lim->virt_boundary_mask = shost->virt_boundary_mask; 1988 - 1989 - /* 1990 - * Set a reasonable default alignment: The larger of 32-byte (dword), 1991 - * which is a common minimum for HBAs, and the minimum DMA alignment, 1992 - * which is set by the platform. 1993 - * 1994 - * Devices that require a bigger alignment can increase it later. 1995 - */ 1996 - lim->dma_alignment = max(4, dma_get_cache_alignment()) - 1; 1988 + lim->dma_alignment = max_t(unsigned int, 1989 + shost->dma_alignment, dma_get_cache_alignment() - 1); 1997 1990 1998 1991 if (shost->no_highmem) 1999 1992 lim->bounce = BLK_BOUNCE_HIGH;
+12 -12
drivers/staging/rts5208/rtsx.c
··· 70 70 71 71 static int slave_configure(struct scsi_device *sdev) 72 72 { 73 - /* 74 - * Scatter-gather buffers (all but the last) must have a length 75 - * divisible by the bulk maxpacket size. Otherwise a data packet 76 - * would end up being short, causing a premature end to the data 77 - * transfer. Since high-speed bulk pipes have a maxpacket size 78 - * of 512, we'll use that as the scsi device queue's DMA alignment 79 - * mask. Guaranteeing proper alignment of the first buffer will 80 - * have the desired effect because, except at the beginning and 81 - * the end, scatter-gather buffers follow page boundaries. 82 - */ 83 - blk_queue_dma_alignment(sdev->request_queue, (512 - 1)); 84 - 85 73 /* Set the SCSI level to at least 2. We'll leave it at 3 if that's 86 74 * what is originally reported. We need this to avoid confusing 87 75 * the SCSI layer with devices that report 0 or 1, but need 10-byte ··· 206 218 207 219 /* limit the total size of a transfer to 120 KB */ 208 220 .max_sectors = 240, 221 + 222 + /* 223 + * Scatter-gather buffers (all but the last) must have a length 224 + * divisible by the bulk maxpacket size. Otherwise a data packet 225 + * would end up being short, causing a premature end to the data 226 + * transfer. Since high-speed bulk pipes have a maxpacket size 227 + * of 512, we'll use that as the scsi device queue's DMA alignment 228 + * mask. Guaranteeing proper alignment of the first buffer will 229 + * have the desired effect because, except at the beginning and 230 + * the end, scatter-gather buffers follow page boundaries. 231 + */ 232 + .dma_alignment = 511, 209 233 210 234 /* emulated HBA */ 211 235 .emulated = 1,
+1 -7
drivers/usb/image/microtek.c
··· 328 328 return 0; 329 329 } 330 330 331 - static int mts_slave_configure (struct scsi_device *s) 332 - { 333 - blk_queue_dma_alignment(s->request_queue, (512 - 1)); 334 - return 0; 335 - } 336 - 337 331 static int mts_scsi_abort(struct scsi_cmnd *srb) 338 332 { 339 333 struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]); ··· 625 631 .can_queue = 1, 626 632 .this_id = -1, 627 633 .emulated = 1, 634 + .dma_alignment = 511, 628 635 .slave_alloc = mts_slave_alloc, 629 - .slave_configure = mts_slave_configure, 630 636 .max_sectors= 256, /* 128 K */ 631 637 }; 632 638
+5 -6
drivers/usb/storage/scsiglue.c
··· 75 75 */ 76 76 sdev->inquiry_len = 36; 77 77 78 - /* 79 - * Some host controllers may have alignment requirements. 80 - * We'll play it safe by requiring 512-byte alignment always. 81 - */ 82 - blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1)); 83 - 84 78 /* Tell the SCSI layer if we know there is more than one LUN */ 85 79 if (us->protocol == USB_PR_BULK && us->max_lun > 0) 86 80 sdev->sdev_bflags |= BLIST_FORCELUN; ··· 632 638 /* lots of sg segments can be handled */ 633 639 .sg_tablesize = SG_MAX_SEGMENTS, 634 640 641 + /* 642 + * Some host controllers may have alignment requirements. 643 + * We'll play it safe by requiring 512-byte alignment always. 644 + */ 645 + .dma_alignment = 511, 635 646 636 647 /* 637 648 * Limit the total size of a transfer to 120 KB.
+6 -7
drivers/usb/storage/uas.c
··· 824 824 825 825 sdev->hostdata = devinfo; 826 826 827 - /* 828 - * The protocol has no requirements on alignment in the strict sense. 829 - * Controllers may or may not have alignment restrictions. 830 - * As this is not exported, we use an extremely conservative guess. 831 - */ 832 - blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1)); 833 - 834 827 if (devinfo->flags & US_FL_MAX_SECTORS_64) 835 828 blk_queue_max_hw_sectors(sdev->request_queue, 64); 836 829 else if (devinfo->flags & US_FL_MAX_SECTORS_240) ··· 905 912 .eh_device_reset_handler = uas_eh_device_reset_handler, 906 913 .this_id = -1, 907 914 .skip_settle_delay = 1, 915 + /* 916 + * The protocol has no requirements on alignment in the strict sense. 917 + * Controllers may or may not have alignment restrictions. 918 + * As this is not exported, we use an extremely conservative guess. 919 + */ 920 + .dma_alignment = 511, 908 921 .dma_boundary = PAGE_SIZE - 1, 909 922 .cmd_size = sizeof(struct uas_cmd_info), 910 923 };
+3
include/scsi/scsi_host.h
··· 405 405 */ 406 406 unsigned int max_segment_size; 407 407 408 + unsigned int dma_alignment; 409 + 408 410 /* 409 411 * DMA scatter gather segment boundary limit. A segment crossing this 410 412 * boundary will be split in two. ··· 616 614 unsigned int max_sectors; 617 615 unsigned int opt_sectors; 618 616 unsigned int max_segment_size; 617 + unsigned int dma_alignment; 619 618 unsigned long dma_boundary; 620 619 unsigned long virt_boundary_mask; 621 620 /*