Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'ntb-5.3' of git://github.com/jonmason/ntb

Pull NTB updates from Jon Mason:
"New feature to add support for NTB virtual MSI interrupts, the ability
to test and use this feature in the NTB transport layer.

Also, bug fixes for the AMD and Switchtec drivers, as well as some
general patches"

* tag 'ntb-5.3' of git://github.com/jonmason/ntb: (22 commits)
NTB: Describe the ntb_msi_test client in the documentation.
NTB: Add MSI interrupt support to ntb_transport
NTB: Add ntb_msi_test support to ntb_test
NTB: Introduce NTB MSI Test Client
NTB: Introduce MSI library
NTB: Rename ntb.c to support multiple source files in the module
NTB: Introduce functions to calculate multi-port resource index
NTB: Introduce helper functions to calculate logical port number
PCI/switchtec: Add module parameter to request more interrupts
PCI/MSI: Support allocating virtual MSI interrupts
ntb_hw_switchtec: Fix setup MW with failure bug
ntb_hw_switchtec: Skip unnecessary re-setup of shared memory window for crosslink case
ntb_hw_switchtec: Remove redundant steps of switchtec_ntb_reinit_peer() function
NTB: correct ntb_dev_ops and ntb_dev comment typos
NTB: amd: Silence shift wrapping warning in amd_ntb_db_vector_mask()
ntb_hw_switchtec: potential shift wrapping bug in switchtec_ntb_init_sndev()
NTB: ntb_transport: Ensure qp->tx_mw_dma_addr is initaliazed
NTB: ntb_hw_amd: set peer limit register
NTB: ntb_perf: Clear stale values in doorbell and command SPAD register
NTB: ntb_perf: Disable NTB link after clearing peer XLAT registers
...

+1458 -60
+27
Documentation/driver-api/ntb.rst
··· 200 200 This file is used to read and write peer scratchpads. See 201 201 *spad* for details. 202 202 203 + NTB MSI Test Client (ntb\_msi\_test) 204 + ------------------------------------ 205 + 206 + The MSI test client serves to test and debug the MSI library which 207 + allows for passing MSI interrupts across NTB memory windows. The 208 + test client is interacted with through the debugfs filesystem: 209 + 210 + * *debugfs*/ntb\_tool/*hw*/ 211 + A directory in debugfs will be created for each 212 + NTB device probed by the tool. This directory is shortened to *hw* 213 + below. 214 + * *hw*/port 215 + This file describes the local port number 216 + * *hw*/irq*_occurrences 217 + One occurrences file exists for each interrupt and, when read, 218 + returns the number of times the interrupt has been triggered. 219 + * *hw*/peer*/port 220 + This file describes the port number for each peer 221 + * *hw*/peer*/count 222 + This file describes the number of interrupts that can be 223 + triggered on each peer 224 + * *hw*/peer*/trigger 225 + Writing an interrupt number (any number less than the value 226 + specified in count) will trigger the interrupt on the 227 + specified peer. That peer's interrupt's occurrence file 228 + should be incremented. 229 + 203 230 NTB Hardware Drivers 204 231 ==================== 205 232
+11
drivers/ntb/Kconfig
··· 13 13 14 14 if NTB 15 15 16 + config NTB_MSI 17 + bool "MSI Interrupt Support" 18 + depends on PCI_MSI 19 + help 20 + Support using MSI interrupt forwarding instead of (or in addition to) 21 + hardware doorbells. MSI interrupts typically offer lower latency 22 + than doorbells and more MSI interrupts can be made available to 23 + clients. However this requires an extra memory window and support 24 + in the hardware driver for creating the MSI interrupts. 25 + 26 + If unsure, say N. 16 27 source "drivers/ntb/hw/Kconfig" 17 28 18 29 source "drivers/ntb/test/Kconfig"
+3
drivers/ntb/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 obj-$(CONFIG_NTB) += ntb.o hw/ test/ 3 3 obj-$(CONFIG_NTB_TRANSPORT) += ntb_transport.o 4 + 5 + ntb-y := core.o 6 + ntb-$(CONFIG_NTB_MSI) += msi.o
+5 -5
drivers/ntb/hw/amd/ntb_hw_amd.c
··· 160 160 } 161 161 162 162 /* set and verify setting the limit */ 163 - write64(limit, mmio + limit_reg); 164 - reg_val = read64(mmio + limit_reg); 163 + write64(limit, peer_mmio + limit_reg); 164 + reg_val = read64(peer_mmio + limit_reg); 165 165 if (reg_val != limit) { 166 166 write64(base_addr, mmio + limit_reg); 167 167 write64(0, peer_mmio + xlat_reg); ··· 183 183 } 184 184 185 185 /* set and verify setting the limit */ 186 - writel(limit, mmio + limit_reg); 187 - reg_val = readl(mmio + limit_reg); 186 + writel(limit, peer_mmio + limit_reg); 187 + reg_val = readl(peer_mmio + limit_reg); 188 188 if (reg_val != limit) { 189 189 writel(base_addr, mmio + limit_reg); 190 190 writel(0, peer_mmio + xlat_reg); ··· 333 333 if (db_vector < 0 || db_vector > ndev->db_count) 334 334 return 0; 335 335 336 - return ntb_ndev(ntb)->db_valid_mask & (1 << db_vector); 336 + return ntb_ndev(ntb)->db_valid_mask & (1ULL << db_vector); 337 337 } 338 338 339 339 static u64 amd_ntb_db_read(struct ntb_dev *ntb)
+3 -3
drivers/ntb/hw/intel/ntb_hw_gen3.c
··· 532 532 return 0; 533 533 } 534 534 535 - int intel_ntb3_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr, 536 - resource_size_t *db_size, 537 - u64 *db_data, int db_bit) 535 + static int intel_ntb3_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr, 536 + resource_size_t *db_size, 537 + u64 *db_data, int db_bit) 538 538 { 539 539 phys_addr_t db_addr_base; 540 540 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+50 -32
drivers/ntb/hw/mscc/ntb_hw_switchtec.c
··· 86 86 bool link_is_up; 87 87 enum ntb_speed link_speed; 88 88 enum ntb_width link_width; 89 - struct work_struct link_reinit_work; 89 + struct work_struct check_link_status_work; 90 + bool link_force_down; 90 91 }; 91 92 92 93 static struct switchtec_ntb *ntb_sndev(struct ntb_dev *ntb) ··· 486 485 487 486 static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev); 488 487 489 - static void link_reinit_work(struct work_struct *work) 490 - { 491 - struct switchtec_ntb *sndev; 492 - 493 - sndev = container_of(work, struct switchtec_ntb, link_reinit_work); 494 - 495 - switchtec_ntb_reinit_peer(sndev); 496 - } 497 - 498 - static void switchtec_ntb_check_link(struct switchtec_ntb *sndev, 499 - enum switchtec_msg msg) 488 + static void switchtec_ntb_link_status_update(struct switchtec_ntb *sndev) 500 489 { 501 490 int link_sta; 502 491 int old = sndev->link_is_up; 503 - 504 - if (msg == MSG_LINK_FORCE_DOWN) { 505 - schedule_work(&sndev->link_reinit_work); 506 - 507 - if (sndev->link_is_up) { 508 - sndev->link_is_up = 0; 509 - ntb_link_event(&sndev->ntb); 510 - dev_info(&sndev->stdev->dev, "ntb link forced down\n"); 511 - } 512 - 513 - return; 514 - } 515 492 516 493 link_sta = sndev->self_shared->link_sta; 517 494 if (link_sta) { ··· 513 534 if (link_sta) 514 535 crosslink_init_dbmsgs(sndev); 515 536 } 537 + } 538 + 539 + static void check_link_status_work(struct work_struct *work) 540 + { 541 + struct switchtec_ntb *sndev; 542 + 543 + sndev = container_of(work, struct switchtec_ntb, 544 + check_link_status_work); 545 + 546 + if (sndev->link_force_down) { 547 + sndev->link_force_down = false; 548 + switchtec_ntb_reinit_peer(sndev); 549 + 550 + if (sndev->link_is_up) { 551 + sndev->link_is_up = 0; 552 + ntb_link_event(&sndev->ntb); 553 + dev_info(&sndev->stdev->dev, "ntb link forced down\n"); 554 + } 555 + 556 + return; 557 + } 558 + 559 + switchtec_ntb_link_status_update(sndev); 560 + } 561 + 562 + static void switchtec_ntb_check_link(struct switchtec_ntb *sndev, 563 + enum switchtec_msg msg) 564 + { 565 + if (msg == MSG_LINK_FORCE_DOWN) 566 + sndev->link_force_down = true; 567 + 568 + schedule_work(&sndev->check_link_status_work); 516 569 } 517 570 518 571 static void switchtec_ntb_link_notification(struct switchtec_dev *stdev) ··· 579 568 sndev->self_shared->link_sta = 1; 580 569 switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP); 581 570 582 - switchtec_ntb_check_link(sndev, MSG_CHECK_LINK); 571 + switchtec_ntb_link_status_update(sndev); 583 572 584 573 return 0; 585 574 } ··· 593 582 sndev->self_shared->link_sta = 0; 594 583 switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_DOWN); 595 584 596 - switchtec_ntb_check_link(sndev, MSG_CHECK_LINK); 585 + switchtec_ntb_link_status_update(sndev); 597 586 598 587 return 0; 599 588 } ··· 846 835 sndev->ntb.topo = NTB_TOPO_SWITCH; 847 836 sndev->ntb.ops = &switchtec_ntb_ops; 848 837 849 - INIT_WORK(&sndev->link_reinit_work, link_reinit_work); 838 + INIT_WORK(&sndev->check_link_status_work, check_link_status_work); 839 + sndev->link_force_down = false; 850 840 851 841 sndev->self_partition = sndev->stdev->partition; 852 842 ··· 884 872 } 885 873 886 874 sndev->peer_partition = ffs(tpart_vec) - 1; 887 - if (!(part_map & (1 << sndev->peer_partition))) { 875 + if (!(part_map & (1ULL << sndev->peer_partition))) { 888 876 dev_err(&sndev->stdev->dev, 889 877 "ntb target partition is not NT partition\n"); 890 878 return -ENODEV; ··· 1460 1448 1461 1449 static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev) 1462 1450 { 1463 - dev_info(&sndev->stdev->dev, "peer reinitialized\n"); 1464 - switchtec_ntb_deinit_shared_mw(sndev); 1465 - switchtec_ntb_init_mw(sndev); 1466 - return switchtec_ntb_init_shared_mw(sndev); 1451 + int rc; 1452 + 1453 + if (crosslink_is_enabled(sndev)) 1454 + return 0; 1455 + 1456 + dev_info(&sndev->stdev->dev, "reinitialize shared memory window\n"); 1457 + rc = config_rsvd_lut_win(sndev, sndev->mmio_peer_ctrl, 0, 1458 + sndev->self_partition, 1459 + sndev->self_shared_dma); 1460 + return rc; 1467 1461 } 1468 1462 1469 1463 static int switchtec_ntb_add(struct device *dev,
+415
drivers/ntb/msi.c
··· 1 + // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 2 + 3 + #include <linux/irq.h> 4 + #include <linux/module.h> 5 + #include <linux/ntb.h> 6 + #include <linux/msi.h> 7 + #include <linux/pci.h> 8 + 9 + MODULE_LICENSE("Dual BSD/GPL"); 10 + MODULE_VERSION("0.1"); 11 + MODULE_AUTHOR("Logan Gunthorpe <logang@deltatee.com>"); 12 + MODULE_DESCRIPTION("NTB MSI Interrupt Library"); 13 + 14 + struct ntb_msi { 15 + u64 base_addr; 16 + u64 end_addr; 17 + 18 + void (*desc_changed)(void *ctx); 19 + 20 + u32 __iomem *peer_mws[]; 21 + }; 22 + 23 + /** 24 + * ntb_msi_init() - Initialize the MSI context 25 + * @ntb: NTB device context 26 + * 27 + * This function must be called before any other ntb_msi function. 28 + * It initializes the context for MSI operations and maps 29 + * the peer memory windows. 30 + * 31 + * This function reserves the last N outbound memory windows (where N 32 + * is the number of peers). 33 + * 34 + * Return: Zero on success, otherwise a negative error number. 35 + */ 36 + int ntb_msi_init(struct ntb_dev *ntb, 37 + void (*desc_changed)(void *ctx)) 38 + { 39 + phys_addr_t mw_phys_addr; 40 + resource_size_t mw_size; 41 + size_t struct_size; 42 + int peer_widx; 43 + int peers; 44 + int ret; 45 + int i; 46 + 47 + peers = ntb_peer_port_count(ntb); 48 + if (peers <= 0) 49 + return -EINVAL; 50 + 51 + struct_size = sizeof(*ntb->msi) + sizeof(*ntb->msi->peer_mws) * peers; 52 + 53 + ntb->msi = devm_kzalloc(&ntb->dev, struct_size, GFP_KERNEL); 54 + if (!ntb->msi) 55 + return -ENOMEM; 56 + 57 + ntb->msi->desc_changed = desc_changed; 58 + 59 + for (i = 0; i < peers; i++) { 60 + peer_widx = ntb_peer_mw_count(ntb) - 1 - i; 61 + 62 + ret = ntb_peer_mw_get_addr(ntb, peer_widx, &mw_phys_addr, 63 + &mw_size); 64 + if (ret) 65 + goto unroll; 66 + 67 + ntb->msi->peer_mws[i] = devm_ioremap(&ntb->dev, mw_phys_addr, 68 + mw_size); 69 + if (!ntb->msi->peer_mws[i]) { 70 + ret = -EFAULT; 71 + goto unroll; 72 + } 73 + } 74 + 75 + return 0; 76 + 77 + unroll: 78 + for (i = 0; i < peers; i++) 79 + if (ntb->msi->peer_mws[i]) 80 + devm_iounmap(&ntb->dev, ntb->msi->peer_mws[i]); 81 + 82 + devm_kfree(&ntb->dev, ntb->msi); 83 + ntb->msi = NULL; 84 + return ret; 85 + } 86 + EXPORT_SYMBOL(ntb_msi_init); 87 + 88 + /** 89 + * ntb_msi_setup_mws() - Initialize the MSI inbound memory windows 90 + * @ntb: NTB device context 91 + * 92 + * This function sets up the required inbound memory windows. It should be 93 + * called from a work function after a link up event. 94 + * 95 + * Over the entire network, this function will reserves the last N 96 + * inbound memory windows for each peer (where N is the number of peers). 97 + * 98 + * ntb_msi_init() must be called before this function. 99 + * 100 + * Return: Zero on success, otherwise a negative error number. 101 + */ 102 + int ntb_msi_setup_mws(struct ntb_dev *ntb) 103 + { 104 + struct msi_desc *desc; 105 + u64 addr; 106 + int peer, peer_widx; 107 + resource_size_t addr_align, size_align, size_max; 108 + resource_size_t mw_size = SZ_32K; 109 + resource_size_t mw_min_size = mw_size; 110 + int i; 111 + int ret; 112 + 113 + if (!ntb->msi) 114 + return -EINVAL; 115 + 116 + desc = first_msi_entry(&ntb->pdev->dev); 117 + addr = desc->msg.address_lo + ((uint64_t)desc->msg.address_hi << 32); 118 + 119 + for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) { 120 + peer_widx = ntb_peer_highest_mw_idx(ntb, peer); 121 + if (peer_widx < 0) 122 + return peer_widx; 123 + 124 + ret = ntb_mw_get_align(ntb, peer, peer_widx, &addr_align, 125 + NULL, NULL); 126 + if (ret) 127 + return ret; 128 + 129 + addr &= ~(addr_align - 1); 130 + } 131 + 132 + for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) { 133 + peer_widx = ntb_peer_highest_mw_idx(ntb, peer); 134 + if (peer_widx < 0) { 135 + ret = peer_widx; 136 + goto error_out; 137 + } 138 + 139 + ret = ntb_mw_get_align(ntb, peer, peer_widx, NULL, 140 + &size_align, &size_max); 141 + if (ret) 142 + goto error_out; 143 + 144 + mw_size = round_up(mw_size, size_align); 145 + mw_size = max(mw_size, size_max); 146 + if (mw_size < mw_min_size) 147 + mw_min_size = mw_size; 148 + 149 + ret = ntb_mw_set_trans(ntb, peer, peer_widx, 150 + addr, mw_size); 151 + if (ret) 152 + goto error_out; 153 + } 154 + 155 + ntb->msi->base_addr = addr; 156 + ntb->msi->end_addr = addr + mw_min_size; 157 + 158 + return 0; 159 + 160 + error_out: 161 + for (i = 0; i < peer; i++) { 162 + peer_widx = ntb_peer_highest_mw_idx(ntb, peer); 163 + if (peer_widx < 0) 164 + continue; 165 + 166 + ntb_mw_clear_trans(ntb, i, peer_widx); 167 + } 168 + 169 + return ret; 170 + } 171 + EXPORT_SYMBOL(ntb_msi_setup_mws); 172 + 173 + /** 174 + * ntb_msi_clear_mws() - Clear all inbound memory windows 175 + * @ntb: NTB device context 176 + * 177 + * This function tears down the resources used by ntb_msi_setup_mws(). 178 + */ 179 + void ntb_msi_clear_mws(struct ntb_dev *ntb) 180 + { 181 + int peer; 182 + int peer_widx; 183 + 184 + for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) { 185 + peer_widx = ntb_peer_highest_mw_idx(ntb, peer); 186 + if (peer_widx < 0) 187 + continue; 188 + 189 + ntb_mw_clear_trans(ntb, peer, peer_widx); 190 + } 191 + } 192 + EXPORT_SYMBOL(ntb_msi_clear_mws); 193 + 194 + struct ntb_msi_devres { 195 + struct ntb_dev *ntb; 196 + struct msi_desc *entry; 197 + struct ntb_msi_desc *msi_desc; 198 + }; 199 + 200 + static int ntb_msi_set_desc(struct ntb_dev *ntb, struct msi_desc *entry, 201 + struct ntb_msi_desc *msi_desc) 202 + { 203 + u64 addr; 204 + 205 + addr = entry->msg.address_lo + 206 + ((uint64_t)entry->msg.address_hi << 32); 207 + 208 + if (addr < ntb->msi->base_addr || addr >= ntb->msi->end_addr) { 209 + dev_warn_once(&ntb->dev, 210 + "IRQ %d: MSI Address not within the memory window (%llx, [%llx %llx])\n", 211 + entry->irq, addr, ntb->msi->base_addr, 212 + ntb->msi->end_addr); 213 + return -EFAULT; 214 + } 215 + 216 + msi_desc->addr_offset = addr - ntb->msi->base_addr; 217 + msi_desc->data = entry->msg.data; 218 + 219 + return 0; 220 + } 221 + 222 + static void ntb_msi_write_msg(struct msi_desc *entry, void *data) 223 + { 224 + struct ntb_msi_devres *dr = data; 225 + 226 + WARN_ON(ntb_msi_set_desc(dr->ntb, entry, dr->msi_desc)); 227 + 228 + if (dr->ntb->msi->desc_changed) 229 + dr->ntb->msi->desc_changed(dr->ntb->ctx); 230 + } 231 + 232 + static void ntbm_msi_callback_release(struct device *dev, void *res) 233 + { 234 + struct ntb_msi_devres *dr = res; 235 + 236 + dr->entry->write_msi_msg = NULL; 237 + dr->entry->write_msi_msg_data = NULL; 238 + } 239 + 240 + static int ntbm_msi_setup_callback(struct ntb_dev *ntb, struct msi_desc *entry, 241 + struct ntb_msi_desc *msi_desc) 242 + { 243 + struct ntb_msi_devres *dr; 244 + 245 + dr = devres_alloc(ntbm_msi_callback_release, 246 + sizeof(struct ntb_msi_devres), GFP_KERNEL); 247 + if (!dr) 248 + return -ENOMEM; 249 + 250 + dr->ntb = ntb; 251 + dr->entry = entry; 252 + dr->msi_desc = msi_desc; 253 + 254 + devres_add(&ntb->dev, dr); 255 + 256 + dr->entry->write_msi_msg = ntb_msi_write_msg; 257 + dr->entry->write_msi_msg_data = dr; 258 + 259 + return 0; 260 + } 261 + 262 + /** 263 + * ntbm_msi_request_threaded_irq() - allocate an MSI interrupt 264 + * @ntb: NTB device context 265 + * @handler: Function to be called when the IRQ occurs 266 + * @thread_fn: Function to be called in a threaded interrupt context. NULL 267 + * for clients which handle everything in @handler 268 + * @devname: An ascii name for the claiming device, dev_name(dev) if NULL 269 + * @dev_id: A cookie passed back to the handler function 270 + * 271 + * This function assigns an interrupt handler to an unused 272 + * MSI interrupt and returns the descriptor used to trigger 273 + * it. The descriptor can then be sent to a peer to trigger 274 + * the interrupt. 275 + * 276 + * The interrupt resource is managed with devres so it will 277 + * be automatically freed when the NTB device is torn down. 278 + * 279 + * If an IRQ allocated with this function needs to be freed 280 + * separately, ntbm_free_irq() must be used. 281 + * 282 + * Return: IRQ number assigned on success, otherwise a negative error number. 283 + */ 284 + int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler, 285 + irq_handler_t thread_fn, 286 + const char *name, void *dev_id, 287 + struct ntb_msi_desc *msi_desc) 288 + { 289 + struct msi_desc *entry; 290 + struct irq_desc *desc; 291 + int ret; 292 + 293 + if (!ntb->msi) 294 + return -EINVAL; 295 + 296 + for_each_pci_msi_entry(entry, ntb->pdev) { 297 + desc = irq_to_desc(entry->irq); 298 + if (desc->action) 299 + continue; 300 + 301 + ret = devm_request_threaded_irq(&ntb->dev, entry->irq, handler, 302 + thread_fn, 0, name, dev_id); 303 + if (ret) 304 + continue; 305 + 306 + if (ntb_msi_set_desc(ntb, entry, msi_desc)) { 307 + devm_free_irq(&ntb->dev, entry->irq, dev_id); 308 + continue; 309 + } 310 + 311 + ret = ntbm_msi_setup_callback(ntb, entry, msi_desc); 312 + if (ret) { 313 + devm_free_irq(&ntb->dev, entry->irq, dev_id); 314 + return ret; 315 + } 316 + 317 + 318 + return entry->irq; 319 + } 320 + 321 + return -ENODEV; 322 + } 323 + EXPORT_SYMBOL(ntbm_msi_request_threaded_irq); 324 + 325 + static int ntbm_msi_callback_match(struct device *dev, void *res, void *data) 326 + { 327 + struct ntb_dev *ntb = dev_ntb(dev); 328 + struct ntb_msi_devres *dr = res; 329 + 330 + return dr->ntb == ntb && dr->entry == data; 331 + } 332 + 333 + /** 334 + * ntbm_msi_free_irq() - free an interrupt 335 + * @ntb: NTB device context 336 + * @irq: Interrupt line to free 337 + * @dev_id: Device identity to free 338 + * 339 + * This function should be used to manually free IRQs allocated with 340 + * ntbm_request_[threaded_]irq(). 341 + */ 342 + void ntbm_msi_free_irq(struct ntb_dev *ntb, unsigned int irq, void *dev_id) 343 + { 344 + struct msi_desc *entry = irq_get_msi_desc(irq); 345 + 346 + entry->write_msi_msg = NULL; 347 + entry->write_msi_msg_data = NULL; 348 + 349 + WARN_ON(devres_destroy(&ntb->dev, ntbm_msi_callback_release, 350 + ntbm_msi_callback_match, entry)); 351 + 352 + devm_free_irq(&ntb->dev, irq, dev_id); 353 + } 354 + EXPORT_SYMBOL(ntbm_msi_free_irq); 355 + 356 + /** 357 + * ntb_msi_peer_trigger() - Trigger an interrupt handler on a peer 358 + * @ntb: NTB device context 359 + * @peer: Peer index 360 + * @desc: MSI descriptor data which triggers the interrupt 361 + * 362 + * This function triggers an interrupt on a peer. It requires 363 + * the descriptor structure to have been passed from that peer 364 + * by some other means. 365 + * 366 + * Return: Zero on success, otherwise a negative error number. 367 + */ 368 + int ntb_msi_peer_trigger(struct ntb_dev *ntb, int peer, 369 + struct ntb_msi_desc *desc) 370 + { 371 + int idx; 372 + 373 + if (!ntb->msi) 374 + return -EINVAL; 375 + 376 + idx = desc->addr_offset / sizeof(*ntb->msi->peer_mws[peer]); 377 + 378 + iowrite32(desc->data, &ntb->msi->peer_mws[peer][idx]); 379 + 380 + return 0; 381 + } 382 + EXPORT_SYMBOL(ntb_msi_peer_trigger); 383 + 384 + /** 385 + * ntb_msi_peer_addr() - Get the DMA address to trigger a peer's MSI interrupt 386 + * @ntb: NTB device context 387 + * @peer: Peer index 388 + * @desc: MSI descriptor data which triggers the interrupt 389 + * @msi_addr: Physical address to trigger the interrupt 390 + * 391 + * This function allows using DMA engines to trigger an interrupt 392 + * (for example, trigger an interrupt to process the data after 393 + * sending it). To trigger the interrupt, write @desc.data to the address 394 + * returned in @msi_addr 395 + * 396 + * Return: Zero on success, otherwise a negative error number. 397 + */ 398 + int ntb_msi_peer_addr(struct ntb_dev *ntb, int peer, 399 + struct ntb_msi_desc *desc, 400 + phys_addr_t *msi_addr) 401 + { 402 + int peer_widx = ntb_peer_mw_count(ntb) - 1 - peer; 403 + phys_addr_t mw_phys_addr; 404 + int ret; 405 + 406 + ret = ntb_peer_mw_get_addr(ntb, peer_widx, &mw_phys_addr, NULL); 407 + if (ret) 408 + return ret; 409 + 410 + if (msi_addr) 411 + *msi_addr = mw_phys_addr + desc->addr_offset; 412 + 413 + return 0; 414 + } 415 + EXPORT_SYMBOL(ntb_msi_peer_addr);
drivers/ntb/ntb.c drivers/ntb/core.c
+169 -1
drivers/ntb/ntb_transport.c
··· 93 93 module_param(use_dma, bool, 0644); 94 94 MODULE_PARM_DESC(use_dma, "Use DMA engine to perform large data copy"); 95 95 96 + static bool use_msi; 97 + #ifdef CONFIG_NTB_MSI 98 + module_param(use_msi, bool, 0644); 99 + MODULE_PARM_DESC(use_msi, "Use MSI interrupts instead of doorbells"); 100 + #endif 101 + 96 102 static struct dentry *nt_debugfs_dir; 97 103 98 104 /* Only two-ports NTB devices are supported */ ··· 194 188 u64 tx_err_no_buf; 195 189 u64 tx_memcpy; 196 190 u64 tx_async; 191 + 192 + bool use_msi; 193 + int msi_irq; 194 + struct ntb_msi_desc msi_desc; 195 + struct ntb_msi_desc peer_msi_desc; 197 196 }; 198 197 199 198 struct ntb_transport_mw { ··· 231 220 unsigned int qp_count; 232 221 u64 qp_bitmap; 233 222 u64 qp_bitmap_free; 223 + 224 + bool use_msi; 225 + unsigned int msi_spad_offset; 226 + u64 msi_db_mask; 234 227 235 228 bool link_is_up; 236 229 struct delayed_work link_work; ··· 682 667 return 0; 683 668 } 684 669 670 + static irqreturn_t ntb_transport_isr(int irq, void *dev) 671 + { 672 + struct ntb_transport_qp *qp = dev; 673 + 674 + tasklet_schedule(&qp->rxc_db_work); 675 + 676 + return IRQ_HANDLED; 677 + } 678 + 679 + static void ntb_transport_setup_qp_peer_msi(struct ntb_transport_ctx *nt, 680 + unsigned int qp_num) 681 + { 682 + struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; 683 + int spad = qp_num * 2 + nt->msi_spad_offset; 684 + 685 + if (!nt->use_msi) 686 + return; 687 + 688 + if (spad >= ntb_spad_count(nt->ndev)) 689 + return; 690 + 691 + qp->peer_msi_desc.addr_offset = 692 + ntb_peer_spad_read(qp->ndev, PIDX, spad); 693 + qp->peer_msi_desc.data = 694 + ntb_peer_spad_read(qp->ndev, PIDX, spad + 1); 695 + 696 + dev_dbg(&qp->ndev->pdev->dev, "QP%d Peer MSI addr=%x data=%x\n", 697 + qp_num, qp->peer_msi_desc.addr_offset, qp->peer_msi_desc.data); 698 + 699 + if (qp->peer_msi_desc.addr_offset) { 700 + qp->use_msi = true; 701 + dev_info(&qp->ndev->pdev->dev, 702 + "Using MSI interrupts for QP%d\n", qp_num); 703 + } 704 + } 705 + 706 + static void ntb_transport_setup_qp_msi(struct ntb_transport_ctx *nt, 707 + unsigned int qp_num) 708 + { 709 + struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; 710 + int spad = qp_num * 2 + nt->msi_spad_offset; 711 + int rc; 712 + 713 + if (!nt->use_msi) 714 + return; 715 + 716 + if (spad >= ntb_spad_count(nt->ndev)) { 717 + dev_warn_once(&qp->ndev->pdev->dev, 718 + "Not enough SPADS to use MSI interrupts\n"); 719 + return; 720 + } 721 + 722 + ntb_spad_write(qp->ndev, spad, 0); 723 + ntb_spad_write(qp->ndev, spad + 1, 0); 724 + 725 + if (!qp->msi_irq) { 726 + qp->msi_irq = ntbm_msi_request_irq(qp->ndev, ntb_transport_isr, 727 + KBUILD_MODNAME, qp, 728 + &qp->msi_desc); 729 + if (qp->msi_irq < 0) { 730 + dev_warn(&qp->ndev->pdev->dev, 731 + "Unable to allocate MSI interrupt for qp%d\n", 732 + qp_num); 733 + return; 734 + } 735 + } 736 + 737 + rc = ntb_spad_write(qp->ndev, spad, qp->msi_desc.addr_offset); 738 + if (rc) 739 + goto err_free_interrupt; 740 + 741 + rc = ntb_spad_write(qp->ndev, spad + 1, qp->msi_desc.data); 742 + if (rc) 743 + goto err_free_interrupt; 744 + 745 + dev_dbg(&qp->ndev->pdev->dev, "QP%d MSI %d addr=%x data=%x\n", 746 + qp_num, qp->msi_irq, qp->msi_desc.addr_offset, 747 + qp->msi_desc.data); 748 + 749 + return; 750 + 751 + err_free_interrupt: 752 + devm_free_irq(&nt->ndev->dev, qp->msi_irq, qp); 753 + } 754 + 755 + static void ntb_transport_msi_peer_desc_changed(struct ntb_transport_ctx *nt) 756 + { 757 + int i; 758 + 759 + dev_dbg(&nt->ndev->pdev->dev, "Peer MSI descriptors changed"); 760 + 761 + for (i = 0; i < nt->qp_count; i++) 762 + ntb_transport_setup_qp_peer_msi(nt, i); 763 + } 764 + 765 + static void ntb_transport_msi_desc_changed(void *data) 766 + { 767 + struct ntb_transport_ctx *nt = data; 768 + int i; 769 + 770 + dev_dbg(&nt->ndev->pdev->dev, "MSI descriptors changed"); 771 + 772 + for (i = 0; i < nt->qp_count; i++) 773 + ntb_transport_setup_qp_msi(nt, i); 774 + 775 + ntb_peer_db_set(nt->ndev, nt->msi_db_mask); 776 + } 777 + 685 778 static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw) 686 779 { 687 780 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; ··· 1028 905 int rc = 0, i, spad; 1029 906 1030 907 /* send the local info, in the opposite order of the way we read it */ 908 + 909 + if (nt->use_msi) { 910 + rc = ntb_msi_setup_mws(ndev); 911 + if (rc) { 912 + dev_warn(&pdev->dev, 913 + "Failed to register MSI memory window: %d\n", 914 + rc); 915 + nt->use_msi = false; 916 + } 917 + } 918 + 919 + for (i = 0; i < nt->qp_count; i++) 920 + ntb_transport_setup_qp_msi(nt, i); 921 + 1031 922 for (i = 0; i < nt->mw_count; i++) { 1032 923 size = nt->mw_vec[i].phys_size; 1033 924 ··· 1099 962 struct ntb_transport_qp *qp = &nt->qp_vec[i]; 1100 963 1101 964 ntb_transport_setup_qp_mw(nt, i); 965 + ntb_transport_setup_qp_peer_msi(nt, i); 1102 966 1103 967 if (qp->client_ready) 1104 968 schedule_delayed_work(&qp->link_work, 0); ··· 1273 1135 return -ENOMEM; 1274 1136 1275 1137 nt->ndev = ndev; 1138 + 1139 + /* 1140 + * If we are using MSI, and have at least one extra memory window, 1141 + * we will reserve the last MW for the MSI window. 1142 + */ 1143 + if (use_msi && mw_count > 1) { 1144 + rc = ntb_msi_init(ndev, ntb_transport_msi_desc_changed); 1145 + if (!rc) { 1146 + mw_count -= 1; 1147 + nt->use_msi = true; 1148 + } 1149 + } 1150 + 1276 1151 spad_count = ntb_spad_count(ndev); 1277 1152 1278 1153 /* Limit the MW's based on the availability of scratchpads */ ··· 1298 1147 1299 1148 max_mw_count_for_spads = (spad_count - MW0_SZ_HIGH) / 2; 1300 1149 nt->mw_count = min(mw_count, max_mw_count_for_spads); 1150 + 1151 + nt->msi_spad_offset = nt->mw_count * 2 + MW0_SZ_HIGH; 1301 1152 1302 1153 nt->mw_vec = kcalloc_node(mw_count, sizeof(*nt->mw_vec), 1303 1154 GFP_KERNEL, node); ··· 1331 1178 qp_bitmap = ntb_db_valid_mask(ndev); 1332 1179 1333 1180 qp_count = ilog2(qp_bitmap); 1181 + if (nt->use_msi) { 1182 + qp_count -= 1; 1183 + nt->msi_db_mask = 1 << qp_count; 1184 + ntb_db_clear_mask(ndev, nt->msi_db_mask); 1185 + } 1186 + 1334 1187 if (max_num_clients && max_num_clients < qp_count) 1335 1188 qp_count = max_num_clients; 1336 1189 else if (nt->mw_count < qp_count) ··· 1760 1601 1761 1602 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags); 1762 1603 1763 - ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num)); 1604 + if (qp->use_msi) 1605 + ntb_msi_peer_trigger(qp->ndev, PIDX, &qp->peer_msi_desc); 1606 + else 1607 + ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num)); 1764 1608 1765 1609 /* The entry length can only be zero if the packet is intended to be a 1766 1610 * "link down" or similar. Since no payload is being sent in these ··· 2031 1869 qp->rx_dma_chan = NULL; 2032 1870 } 2033 1871 1872 + qp->tx_mw_dma_addr = 0; 2034 1873 if (qp->tx_dma_chan) { 2035 1874 qp->tx_mw_dma_addr = 2036 1875 dma_map_resource(qp->tx_dma_chan->device->dev, ··· 2430 2267 struct ntb_transport_qp *qp; 2431 2268 u64 db_bits; 2432 2269 unsigned int qp_num; 2270 + 2271 + if (ntb_db_read(nt->ndev) & nt->msi_db_mask) { 2272 + ntb_transport_msi_peer_desc_changed(nt); 2273 + ntb_db_clear(nt->ndev, nt->msi_db_mask); 2274 + } 2433 2275 2434 2276 db_bits = (nt->qp_bitmap & ~nt->qp_bitmap_free & 2435 2277 ntb_db_vector_mask(nt->ndev, vector));
+9
drivers/ntb/test/Kconfig
··· 26 26 to and from the window without additional software interaction. 27 27 28 28 If unsure, say N. 29 + 30 + config NTB_MSI_TEST 31 + tristate "NTB MSI Test Client" 32 + depends on NTB_MSI 33 + help 34 + This tool demonstrates the use of the NTB MSI library to 35 + send MSI interrupts between peers. 36 + 37 + If unsure, say N.
+1
drivers/ntb/test/Makefile
··· 2 2 obj-$(CONFIG_NTB_PINGPONG) += ntb_pingpong.o 3 3 obj-$(CONFIG_NTB_TOOL) += ntb_tool.o 4 4 obj-$(CONFIG_NTB_PERF) += ntb_perf.o 5 + obj-$(CONFIG_NTB_MSI_TEST) += ntb_msi_test.o
+433
drivers/ntb/test/ntb_msi_test.c
··· 1 + // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 2 + 3 + #include <linux/module.h> 4 + #include <linux/debugfs.h> 5 + #include <linux/ntb.h> 6 + #include <linux/pci.h> 7 + #include <linux/radix-tree.h> 8 + #include <linux/workqueue.h> 9 + 10 + MODULE_LICENSE("Dual BSD/GPL"); 11 + MODULE_VERSION("0.1"); 12 + MODULE_AUTHOR("Logan Gunthorpe <logang@deltatee.com>"); 13 + MODULE_DESCRIPTION("Test for sending MSI interrupts over an NTB memory window"); 14 + 15 + static int num_irqs = 4; 16 + module_param(num_irqs, int, 0644); 17 + MODULE_PARM_DESC(num_irqs, "number of irqs to use"); 18 + 19 + struct ntb_msit_ctx { 20 + struct ntb_dev *ntb; 21 + struct dentry *dbgfs_dir; 22 + struct work_struct setup_work; 23 + 24 + struct ntb_msit_isr_ctx { 25 + int irq_idx; 26 + int irq_num; 27 + int occurrences; 28 + struct ntb_msit_ctx *nm; 29 + struct ntb_msi_desc desc; 30 + } *isr_ctx; 31 + 32 + struct ntb_msit_peer { 33 + struct ntb_msit_ctx *nm; 34 + int pidx; 35 + int num_irqs; 36 + struct completion init_comp; 37 + struct ntb_msi_desc *msi_desc; 38 + } peers[]; 39 + }; 40 + 41 + static struct dentry *ntb_msit_dbgfs_topdir; 42 + 43 + static irqreturn_t ntb_msit_isr(int irq, void *dev) 44 + { 45 + struct ntb_msit_isr_ctx *isr_ctx = dev; 46 + struct ntb_msit_ctx *nm = isr_ctx->nm; 47 + 48 + dev_dbg(&nm->ntb->dev, "Interrupt Occurred: %d", 49 + isr_ctx->irq_idx); 50 + 51 + isr_ctx->occurrences++; 52 + 53 + return IRQ_HANDLED; 54 + } 55 + 56 + static void ntb_msit_setup_work(struct work_struct *work) 57 + { 58 + struct ntb_msit_ctx *nm = container_of(work, struct ntb_msit_ctx, 59 + setup_work); 60 + int irq_count = 0; 61 + int irq; 62 + int ret; 63 + uintptr_t i; 64 + 65 + ret = ntb_msi_setup_mws(nm->ntb); 66 + if (ret) { 67 + dev_err(&nm->ntb->dev, "Unable to setup MSI windows: %d\n", 68 + ret); 69 + return; 70 + } 71 + 72 + for (i = 0; i < num_irqs; i++) { 73 + nm->isr_ctx[i].irq_idx = i; 74 + nm->isr_ctx[i].nm = nm; 75 + 76 + if (!nm->isr_ctx[i].irq_num) { 77 + irq = ntbm_msi_request_irq(nm->ntb, ntb_msit_isr, 78 + KBUILD_MODNAME, 79 + &nm->isr_ctx[i], 80 + &nm->isr_ctx[i].desc); 81 + if (irq < 0) 82 + break; 83 + 84 + nm->isr_ctx[i].irq_num = irq; 85 + } 86 + 87 + ret = ntb_spad_write(nm->ntb, 2 * i + 1, 88 + nm->isr_ctx[i].desc.addr_offset); 89 + if (ret) 90 + break; 91 + 92 + ret = ntb_spad_write(nm->ntb, 2 * i + 2, 93 + nm->isr_ctx[i].desc.data); 94 + if (ret) 95 + break; 96 + 97 + irq_count++; 98 + } 99 + 100 + ntb_spad_write(nm->ntb, 0, irq_count); 101 + ntb_peer_db_set(nm->ntb, BIT(ntb_port_number(nm->ntb))); 102 + } 103 + 104 + static void ntb_msit_desc_changed(void *ctx) 105 + { 106 + struct ntb_msit_ctx *nm = ctx; 107 + int i; 108 + 109 + dev_dbg(&nm->ntb->dev, "MSI Descriptors Changed\n"); 110 + 111 + for (i = 0; i < num_irqs; i++) { 112 + ntb_spad_write(nm->ntb, 2 * i + 1, 113 + nm->isr_ctx[i].desc.addr_offset); 114 + ntb_spad_write(nm->ntb, 2 * i + 2, 115 + nm->isr_ctx[i].desc.data); 116 + } 117 + 118 + ntb_peer_db_set(nm->ntb, BIT(ntb_port_number(nm->ntb))); 119 + } 120 + 121 + static void ntb_msit_link_event(void *ctx) 122 + { 123 + struct ntb_msit_ctx *nm = ctx; 124 + 125 + if (!ntb_link_is_up(nm->ntb, NULL, NULL)) 126 + return; 127 + 128 + schedule_work(&nm->setup_work); 129 + } 130 + 131 + static void ntb_msit_copy_peer_desc(struct ntb_msit_ctx *nm, int peer) 132 + { 133 + int i; 134 + struct ntb_msi_desc *desc = nm->peers[peer].msi_desc; 135 + int irq_count = nm->peers[peer].num_irqs; 136 + 137 + for (i = 0; i < irq_count; i++) { 138 + desc[i].addr_offset = ntb_peer_spad_read(nm->ntb, peer, 139 + 2 * i + 1); 140 + desc[i].data = ntb_peer_spad_read(nm->ntb, peer, 2 * i + 2); 141 + } 142 + 143 + dev_info(&nm->ntb->dev, "Found %d interrupts on peer %d\n", 144 + irq_count, peer); 145 + 146 + complete_all(&nm->peers[peer].init_comp); 147 + } 148 + 149 + static void ntb_msit_db_event(void *ctx, int vec) 150 + { 151 + struct ntb_msit_ctx *nm = ctx; 152 + struct ntb_msi_desc *desc; 153 + u64 peer_mask = ntb_db_read(nm->ntb); 154 + u32 irq_count; 155 + int peer; 156 + 157 + ntb_db_clear(nm->ntb, peer_mask); 158 + 159 + for (peer = 0; peer < sizeof(peer_mask) * 8; peer++) { 160 + if (!(peer_mask & BIT(peer))) 161 + continue; 162 + 163 + irq_count = ntb_peer_spad_read(nm->ntb, peer, 0); 164 + if (irq_count == -1) 165 + continue; 166 + 167 + desc = kcalloc(irq_count, sizeof(*desc), GFP_ATOMIC); 168 + if (!desc) 169 + continue; 170 + 171 + kfree(nm->peers[peer].msi_desc); 172 + nm->peers[peer].msi_desc = desc; 173 + nm->peers[peer].num_irqs = irq_count; 174 + 175 + ntb_msit_copy_peer_desc(nm, peer); 176 + } 177 + } 178 + 179 + static const struct ntb_ctx_ops ntb_msit_ops = { 180 + .link_event = ntb_msit_link_event, 181 + .db_event = ntb_msit_db_event, 182 + }; 183 + 184 + static int ntb_msit_dbgfs_trigger(void *data, u64 idx) 185 + { 186 + struct ntb_msit_peer *peer = data; 187 + 188 + if (idx >= peer->num_irqs) 189 + return -EINVAL; 190 + 191 + dev_dbg(&peer->nm->ntb->dev, "trigger irq %llu on peer %u\n", 192 + idx, peer->pidx); 193 + 194 + return ntb_msi_peer_trigger(peer->nm->ntb, peer->pidx, 195 + &peer->msi_desc[idx]); 196 + } 197 + 198 + DEFINE_DEBUGFS_ATTRIBUTE(ntb_msit_trigger_fops, NULL, 199 + ntb_msit_dbgfs_trigger, "%llu\n"); 200 + 201 + static int ntb_msit_dbgfs_port_get(void *data, u64 *port) 202 + { 203 + struct ntb_msit_peer *peer = data; 204 + 205 + *port = ntb_peer_port_number(peer->nm->ntb, peer->pidx); 206 + 207 + return 0; 208 + } 209 + 210 + DEFINE_DEBUGFS_ATTRIBUTE(ntb_msit_port_fops, ntb_msit_dbgfs_port_get, 211 + NULL, "%llu\n"); 212 + 213 + static int ntb_msit_dbgfs_count_get(void *data, u64 *count) 214 + { 215 + struct ntb_msit_peer *peer = data; 216 + 217 + *count = peer->num_irqs; 218 + 219 + return 0; 220 + } 221 + 222 + DEFINE_DEBUGFS_ATTRIBUTE(ntb_msit_count_fops, ntb_msit_dbgfs_count_get, 223 + NULL, "%llu\n"); 224 + 225 + static int ntb_msit_dbgfs_ready_get(void *data, u64 *ready) 226 + { 227 + struct ntb_msit_peer *peer = data; 228 + 229 + *ready = try_wait_for_completion(&peer->init_comp); 230 + 231 + return 0; 232 + } 233 + 234 + static int ntb_msit_dbgfs_ready_set(void *data, u64 ready) 235 + { 236 + struct ntb_msit_peer *peer = data; 237 + 238 + return wait_for_completion_interruptible(&peer->init_comp); 239 + } 240 + 241 + DEFINE_DEBUGFS_ATTRIBUTE(ntb_msit_ready_fops, ntb_msit_dbgfs_ready_get, 242 + ntb_msit_dbgfs_ready_set, "%llu\n"); 243 + 244 + static int ntb_msit_dbgfs_occurrences_get(void *data, u64 *occurrences) 245 + { 246 + struct ntb_msit_isr_ctx *isr_ctx = data; 247 + 248 + *occurrences = isr_ctx->occurrences; 249 + 250 + return 0; 251 + } 252 + 253 + DEFINE_DEBUGFS_ATTRIBUTE(ntb_msit_occurrences_fops, 254 + ntb_msit_dbgfs_occurrences_get, 255 + NULL, "%llu\n"); 256 + 257 + static int ntb_msit_dbgfs_local_port_get(void *data, u64 *port) 258 + { 259 + struct ntb_msit_ctx *nm = data; 260 + 261 + *port = ntb_port_number(nm->ntb); 262 + 263 + return 0; 264 + } 265 + 266 + DEFINE_DEBUGFS_ATTRIBUTE(ntb_msit_local_port_fops, 267 + ntb_msit_dbgfs_local_port_get, 268 + NULL, "%llu\n"); 269 + 270 + static void ntb_msit_create_dbgfs(struct ntb_msit_ctx *nm) 271 + { 272 + struct pci_dev *pdev = nm->ntb->pdev; 273 + char buf[32]; 274 + int i; 275 + struct dentry *peer_dir; 276 + 277 + nm->dbgfs_dir = debugfs_create_dir(pci_name(pdev), 278 + ntb_msit_dbgfs_topdir); 279 + debugfs_create_file("port", 0400, nm->dbgfs_dir, nm, 280 + &ntb_msit_local_port_fops); 281 + 282 + for (i = 0; i < ntb_peer_port_count(nm->ntb); i++) { 283 + nm->peers[i].pidx = i; 284 + nm->peers[i].nm = nm; 285 + init_completion(&nm->peers[i].init_comp); 286 + 287 + snprintf(buf, sizeof(buf), "peer%d", i); 288 + peer_dir = debugfs_create_dir(buf, nm->dbgfs_dir); 289 + 290 + debugfs_create_file_unsafe("trigger", 0200, peer_dir, 291 + &nm->peers[i], 292 + &ntb_msit_trigger_fops); 293 + 294 + debugfs_create_file_unsafe("port", 0400, peer_dir, 295 + &nm->peers[i], &ntb_msit_port_fops); 296 + 297 + debugfs_create_file_unsafe("count", 0400, peer_dir, 298 + &nm->peers[i], 299 + &ntb_msit_count_fops); 300 + 301 + debugfs_create_file_unsafe("ready", 0600, peer_dir, 302 + &nm->peers[i], 303 + &ntb_msit_ready_fops); 304 + } 305 + 306 + for (i = 0; i < num_irqs; i++) { 307 + snprintf(buf, sizeof(buf), "irq%d_occurrences", i); 308 + debugfs_create_file_unsafe(buf, 0400, nm->dbgfs_dir, 309 + &nm->isr_ctx[i], 310 + &ntb_msit_occurrences_fops); 311 + } 312 + } 313 + 314 + static void ntb_msit_remove_dbgfs(struct ntb_msit_ctx *nm) 315 + { 316 + debugfs_remove_recursive(nm->dbgfs_dir); 317 + } 318 + 319 + static int ntb_msit_probe(struct ntb_client *client, struct ntb_dev *ntb) 320 + { 321 + struct ntb_msit_ctx *nm; 322 + size_t struct_size; 323 + int peers; 324 + int ret; 325 + 326 + peers = ntb_peer_port_count(ntb); 327 + if (peers <= 0) 328 + return -EINVAL; 329 + 330 + if (ntb_spad_is_unsafe(ntb) || ntb_spad_count(ntb) < 2 * num_irqs + 1) { 331 + dev_err(&ntb->dev, "NTB MSI test requires at least %d spads for %d irqs\n", 332 + 2 * num_irqs + 1, num_irqs); 333 + return -EFAULT; 334 + } 335 + 336 + ret = ntb_spad_write(ntb, 0, -1); 337 + if (ret) { 338 + dev_err(&ntb->dev, "Unable to write spads: %d\n", ret); 339 + return ret; 340 + } 341 + 342 + ret = ntb_db_clear_mask(ntb, GENMASK(peers - 1, 0)); 343 + if (ret) { 344 + dev_err(&ntb->dev, "Unable to clear doorbell mask: %d\n", ret); 345 + return ret; 346 + } 347 + 348 + ret = ntb_msi_init(ntb, ntb_msit_desc_changed); 349 + if (ret) { 350 + dev_err(&ntb->dev, "Unable to initialize MSI library: %d\n", 351 + ret); 352 + return ret; 353 + } 354 + 355 + struct_size = sizeof(*nm) + sizeof(*nm->peers) * peers; 356 + 357 + nm = devm_kzalloc(&ntb->dev, struct_size, GFP_KERNEL); 358 + if (!nm) 359 + return -ENOMEM; 360 + 361 + nm->isr_ctx = devm_kcalloc(&ntb->dev, num_irqs, sizeof(*nm->isr_ctx), 362 + GFP_KERNEL); 363 + if (!nm->isr_ctx) 364 + return -ENOMEM; 365 + 366 + INIT_WORK(&nm->setup_work, ntb_msit_setup_work); 367 + nm->ntb = ntb; 368 + 369 + ntb_msit_create_dbgfs(nm); 370 + 371 + ret = ntb_set_ctx(ntb, nm, &ntb_msit_ops); 372 + if (ret) 373 + goto remove_dbgfs; 374 + 375 + if (!nm->isr_ctx) 376 + goto remove_dbgfs; 377 + 378 + ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); 379 + 380 + return 0; 381 + 382 + remove_dbgfs: 383 + ntb_msit_remove_dbgfs(nm); 384 + devm_kfree(&ntb->dev, nm->isr_ctx); 385 + devm_kfree(&ntb->dev, nm); 386 + return ret; 387 + } 388 + 389 + static void ntb_msit_remove(struct ntb_client *client, struct ntb_dev *ntb) 390 + { 391 + struct ntb_msit_ctx *nm = ntb->ctx; 392 + int i; 393 + 394 + ntb_link_disable(ntb); 395 + ntb_db_set_mask(ntb, ntb_db_valid_mask(ntb)); 396 + ntb_msi_clear_mws(ntb); 397 + 398 + for (i = 0; i < ntb_peer_port_count(ntb); i++) 399 + kfree(nm->peers[i].msi_desc); 400 + 401 + ntb_clear_ctx(ntb); 402 + ntb_msit_remove_dbgfs(nm); 403 + } 404 + 405 + static struct ntb_client ntb_msit_client = { 406 + .ops = { 407 + .probe = ntb_msit_probe, 408 + .remove = ntb_msit_remove 409 + } 410 + }; 411 + 412 + static int __init ntb_msit_init(void) 413 + { 414 + int ret; 415 + 416 + if (debugfs_initialized()) 417 + ntb_msit_dbgfs_topdir = debugfs_create_dir(KBUILD_MODNAME, 418 + NULL); 419 + 420 + ret = ntb_register_client(&ntb_msit_client); 421 + if (ret) 422 + debugfs_remove_recursive(ntb_msit_dbgfs_topdir); 423 + 424 + return ret; 425 + } 426 + module_init(ntb_msit_init); 427 + 428 + static void __exit ntb_msit_exit(void) 429 + { 430 + ntb_unregister_client(&ntb_msit_client); 431 + debugfs_remove_recursive(ntb_msit_dbgfs_topdir); 432 + } 433 + module_exit(ntb_msit_exit);
+11 -3
drivers/ntb/test/ntb_perf.c
··· 100 100 #define DMA_TRIES 100 101 101 #define DMA_MDELAY 10 102 102 103 - #define MSG_TRIES 500 103 + #define MSG_TRIES 1000 104 104 #define MSG_UDELAY_LOW 1000 105 105 #define MSG_UDELAY_HIGH 2000 106 106 ··· 734 734 { 735 735 int pidx; 736 736 737 - ntb_link_disable(perf->ntb); 738 - 739 737 if (perf->cmd_send == perf_msg_cmd_send) { 740 738 u64 inbits; 741 739 ··· 750 752 751 753 for (pidx = 0; pidx < perf->pcnt; pidx++) 752 754 flush_work(&perf->peers[pidx].service); 755 + 756 + for (pidx = 0; pidx < perf->pcnt; pidx++) { 757 + struct perf_peer *peer = &perf->peers[pidx]; 758 + 759 + ntb_spad_write(perf->ntb, PERF_SPAD_CMD(peer->gidx), 0); 760 + } 761 + 762 + ntb_db_clear(perf->ntb, PERF_SPAD_NOTIFY(perf->gidx)); 763 + 764 + ntb_link_disable(perf->ntb); 753 765 } 754 766 755 767 /*==============================================================================
+45 -9
drivers/pci/msi.c
··· 192 192 193 193 static void __iomem *pci_msix_desc_addr(struct msi_desc *desc) 194 194 { 195 + if (desc->msi_attrib.is_virtual) 196 + return NULL; 197 + 195 198 return desc->mask_base + 196 199 desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; 197 200 } ··· 209 206 u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag) 210 207 { 211 208 u32 mask_bits = desc->masked; 209 + void __iomem *desc_addr; 212 210 213 211 if (pci_msi_ignore_mask) 212 + return 0; 213 + desc_addr = pci_msix_desc_addr(desc); 214 + if (!desc_addr) 214 215 return 0; 215 216 216 217 mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; 217 218 if (flag) 218 219 mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT; 219 - writel(mask_bits, pci_msix_desc_addr(desc) + PCI_MSIX_ENTRY_VECTOR_CTRL); 220 + 221 + writel(mask_bits, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL); 220 222 221 223 return mask_bits; 222 224 } ··· 281 273 if (entry->msi_attrib.is_msix) { 282 274 void __iomem *base = pci_msix_desc_addr(entry); 283 275 276 + if (!base) { 277 + WARN_ON(1); 278 + return; 279 + } 280 + 284 281 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR); 285 282 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR); 286 283 msg->data = readl(base + PCI_MSIX_ENTRY_DATA); ··· 316 303 } else if (entry->msi_attrib.is_msix) { 317 304 void __iomem *base = pci_msix_desc_addr(entry); 318 305 306 + if (!base) 307 + goto skip; 308 + 319 309 writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); 320 310 writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); 321 311 writel(msg->data, base + PCI_MSIX_ENTRY_DATA); ··· 343 327 msg->data); 344 328 } 345 329 } 330 + 331 + skip: 346 332 entry->msg = *msg; 333 + 334 + if (entry->write_msi_msg) 335 + entry->write_msi_msg(entry, entry->write_msi_msg_data); 336 + 347 337 } 348 338 349 339 void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) ··· 572 550 573 551 entry->msi_attrib.is_msix = 0; 574 552 entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT); 553 + entry->msi_attrib.is_virtual = 0; 575 554 entry->msi_attrib.entry_nr = 0; 576 555 entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT); 577 556 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ ··· 697 674 struct irq_affinity_desc *curmsk, *masks = NULL; 698 675 struct msi_desc *entry; 699 676 int ret, i; 677 + int vec_count = pci_msix_vec_count(dev); 700 678 701 679 if (affd) 702 680 masks = irq_create_affinity_masks(nvec, affd); ··· 720 696 entry->msi_attrib.entry_nr = entries[i].entry; 721 697 else 722 698 entry->msi_attrib.entry_nr = i; 699 + 700 + entry->msi_attrib.is_virtual = 701 + entry->msi_attrib.entry_nr >= vec_count; 702 + 723 703 entry->msi_attrib.default_irq = dev->irq; 724 704 entry->mask_base = base; 725 705 ··· 742 714 { 743 715 struct msi_desc *entry; 744 716 int i = 0; 717 + void __iomem *desc_addr; 745 718 746 719 for_each_pci_msi_entry(entry, dev) { 747 720 if (entries) 748 721 entries[i++].vector = entry->irq; 749 - entry->masked = readl(pci_msix_desc_addr(entry) + 750 - PCI_MSIX_ENTRY_VECTOR_CTRL); 722 + 723 + desc_addr = pci_msix_desc_addr(entry); 724 + if (desc_addr) 725 + entry->masked = readl(desc_addr + 726 + PCI_MSIX_ENTRY_VECTOR_CTRL); 727 + else 728 + entry->masked = 0; 729 + 751 730 msix_mask_irq(entry, 1); 752 731 } 753 732 } ··· 967 932 EXPORT_SYMBOL(pci_msix_vec_count); 968 933 969 934 static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, 970 - int nvec, struct irq_affinity *affd) 935 + int nvec, struct irq_affinity *affd, int flags) 971 936 { 972 937 int nr_entries; 973 938 int i, j; ··· 978 943 nr_entries = pci_msix_vec_count(dev); 979 944 if (nr_entries < 0) 980 945 return nr_entries; 981 - if (nvec > nr_entries) 946 + if (nvec > nr_entries && !(flags & PCI_IRQ_VIRTUAL)) 982 947 return nr_entries; 983 948 984 949 if (entries) { ··· 1114 1079 1115 1080 static int __pci_enable_msix_range(struct pci_dev *dev, 1116 1081 struct msix_entry *entries, int minvec, 1117 - int maxvec, struct irq_affinity *affd) 1082 + int maxvec, struct irq_affinity *affd, 1083 + int flags) 1118 1084 { 1119 1085 int rc, nvec = maxvec; 1120 1086 ··· 1132 1096 return -ENOSPC; 1133 1097 } 1134 1098 1135 - rc = __pci_enable_msix(dev, entries, nvec, affd); 1099 + rc = __pci_enable_msix(dev, entries, nvec, affd, flags); 1136 1100 if (rc == 0) 1137 1101 return nvec; 1138 1102 ··· 1163 1127 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, 1164 1128 int minvec, int maxvec) 1165 1129 { 1166 - return __pci_enable_msix_range(dev, entries, minvec, maxvec, NULL); 1130 + return __pci_enable_msix_range(dev, entries, minvec, maxvec, NULL, 0); 1167 1131 } 1168 1132 EXPORT_SYMBOL(pci_enable_msix_range); 1169 1133 ··· 1203 1167 1204 1168 if (flags & PCI_IRQ_MSIX) { 1205 1169 msix_vecs = __pci_enable_msix_range(dev, NULL, min_vecs, 1206 - max_vecs, affd); 1170 + max_vecs, affd, flags); 1207 1171 if (msix_vecs > 0) 1208 1172 return msix_vecs; 1209 1173 }
+10 -2
drivers/pci/switch/switchtec.c
··· 30 30 MODULE_PARM_DESC(use_dma_mrpc, 31 31 "Enable the use of the DMA MRPC feature"); 32 32 33 + static int nirqs = 32; 34 + module_param(nirqs, int, 0644); 35 + MODULE_PARM_DESC(nirqs, "number of interrupts to allocate (more may be useful for NTB applications)"); 36 + 33 37 static dev_t switchtec_devt; 34 38 static DEFINE_IDA(switchtec_minor_ida); 35 39 ··· 1267 1263 int dma_mrpc_irq; 1268 1264 int rc; 1269 1265 1270 - nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, 4, 1271 - PCI_IRQ_MSIX | PCI_IRQ_MSI); 1266 + if (nirqs < 4) 1267 + nirqs = 4; 1268 + 1269 + nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, nirqs, 1270 + PCI_IRQ_MSIX | PCI_IRQ_MSI | 1271 + PCI_IRQ_VIRTUAL); 1272 1272 if (nvecs < 0) 1273 1273 return nvecs; 1274 1274
+8
include/linux/msi.h
··· 64 64 * @msg: The last set MSI message cached for reuse 65 65 * @affinity: Optional pointer to a cpu affinity mask for this descriptor 66 66 * 67 + * @write_msi_msg: Callback that may be called when the MSI message 68 + * address or data changes 69 + * @write_msi_msg_data: Data parameter for the callback. 70 + * 67 71 * @masked: [PCI MSI/X] Mask bits 68 72 * @is_msix: [PCI MSI/X] True if MSI-X 69 73 * @multiple: [PCI MSI/X] log2 num of messages allocated ··· 94 90 const void *iommu_cookie; 95 91 #endif 96 92 93 + void (*write_msi_msg)(struct msi_desc *entry, void *data); 94 + void *write_msi_msg_data; 95 + 97 96 union { 98 97 /* PCI MSI/X specific data */ 99 98 struct { ··· 107 100 u8 multi_cap : 3; 108 101 u8 maskbit : 1; 109 102 u8 is_64 : 1; 103 + u8 is_virtual : 1; 110 104 u16 entry_nr; 111 105 unsigned default_irq; 112 106 } msi_attrib;
+197 -3
include/linux/ntb.h
··· 58 58 59 59 #include <linux/completion.h> 60 60 #include <linux/device.h> 61 + #include <linux/interrupt.h> 61 62 62 63 struct ntb_client; 63 64 struct ntb_dev; 65 + struct ntb_msi; 64 66 struct pci_dev; 65 67 66 68 /** ··· 207 205 } 208 206 209 207 /** 210 - * struct ntb_ctx_ops - ntb device operations 208 + * struct ntb_dev_ops - ntb device operations 211 209 * @port_number: See ntb_port_number(). 212 210 * @peer_port_count: See ntb_peer_port_count(). 213 211 * @peer_port_number: See ntb_peer_port_number(). ··· 406 404 #define drv_ntb_client(__drv) container_of((__drv), struct ntb_client, drv) 407 405 408 406 /** 409 - * struct ntb_device - ntb device 407 + * struct ntb_dev - ntb device 410 408 * @dev: Linux device object. 411 409 * @pdev: PCI device entry of the ntb. 412 410 * @topo: Detected topology of the ntb. ··· 428 426 spinlock_t ctx_lock; 429 427 /* block unregister until device is fully released */ 430 428 struct completion released; 429 + 430 + #ifdef CONFIG_NTB_MSI 431 + struct ntb_msi *msi; 432 + #endif 431 433 }; 432 434 #define dev_ntb(__dev) container_of((__dev), struct ntb_dev, dev) 433 435 ··· 622 616 623 617 return ntb->ops->port_number(ntb); 624 618 } 625 - 626 619 /** 627 620 * ntb_peer_port_count() - get the number of peer device ports 628 621 * @ntb: NTB device context. ··· 656 651 return ntb_default_peer_port_number(ntb, pidx); 657 652 658 653 return ntb->ops->peer_port_number(ntb, pidx); 654 + } 655 + 656 + /** 657 + * ntb_logical_port_number() - get the logical port number of the local port 658 + * @ntb: NTB device context. 659 + * 660 + * The Logical Port Number is defined to be a unique number for each 661 + * port starting from zero through to the number of ports minus one. 662 + * This is in contrast to the Port Number where each port can be assigned 663 + * any unique physical number by the hardware. 664 + * 665 + * The logical port number is useful for calculating the resource indexes 666 + * used by peers. 667 + * 668 + * Return: the logical port number or negative value indicating an error 669 + */ 670 + static inline int ntb_logical_port_number(struct ntb_dev *ntb) 671 + { 672 + int lport = ntb_port_number(ntb); 673 + int pidx; 674 + 675 + if (lport < 0) 676 + return lport; 677 + 678 + for (pidx = 0; pidx < ntb_peer_port_count(ntb); pidx++) 679 + if (lport <= ntb_peer_port_number(ntb, pidx)) 680 + return pidx; 681 + 682 + return pidx; 683 + } 684 + 685 + /** 686 + * ntb_peer_logical_port_number() - get the logical peer port by given index 687 + * @ntb: NTB device context. 688 + * @pidx: Peer port index. 689 + * 690 + * The Logical Port Number is defined to be a unique number for each 691 + * port starting from zero through to the number of ports minus one. 692 + * This is in contrast to the Port Number where each port can be assigned 693 + * any unique physical number by the hardware. 694 + * 695 + * The logical port number is useful for calculating the resource indexes 696 + * used by peers. 697 + * 698 + * Return: the peer's logical port number or negative value indicating an error 699 + */ 700 + static inline int ntb_peer_logical_port_number(struct ntb_dev *ntb, int pidx) 701 + { 702 + if (ntb_peer_port_number(ntb, pidx) < ntb_port_number(ntb)) 703 + return pidx; 704 + else 705 + return pidx + 1; 659 706 } 660 707 661 708 /** ··· 1561 1504 return -EINVAL; 1562 1505 1563 1506 return ntb->ops->peer_msg_write(ntb, pidx, midx, msg); 1507 + } 1508 + 1509 + /** 1510 + * ntb_peer_resource_idx() - get a resource index for a given peer idx 1511 + * @ntb: NTB device context. 1512 + * @pidx: Peer port index. 1513 + * 1514 + * When constructing a graph of peers, each remote peer must use a different 1515 + * resource index (mw, doorbell, etc) to communicate with each other 1516 + * peer. 1517 + * 1518 + * In a two peer system, this function should always return 0 such that 1519 + * resource 0 points to the remote peer on both ports. 1520 + * 1521 + * In a 5 peer system, this function will return the following matrix 1522 + * 1523 + * pidx \ port 0 1 2 3 4 1524 + * 0 0 0 1 2 3 1525 + * 1 0 1 1 2 3 1526 + * 2 0 1 2 2 3 1527 + * 3 0 1 2 3 3 1528 + * 1529 + * For example, if this function is used to program peer's memory 1530 + * windows, port 0 will program MW 0 on all it's peers to point to itself. 1531 + * port 1 will program MW 0 in port 0 to point to itself and MW 1 on all 1532 + * other ports. etc. 1533 + * 1534 + * For the legacy two host case, ntb_port_number() and ntb_peer_port_number() 1535 + * both return zero and therefore this function will always return zero. 1536 + * So MW 0 on each host would be programmed to point to the other host. 1537 + * 1538 + * Return: the resource index to use for that peer. 1539 + */ 1540 + static inline int ntb_peer_resource_idx(struct ntb_dev *ntb, int pidx) 1541 + { 1542 + int local_port, peer_port; 1543 + 1544 + if (pidx >= ntb_peer_port_count(ntb)) 1545 + return -EINVAL; 1546 + 1547 + local_port = ntb_logical_port_number(ntb); 1548 + peer_port = ntb_peer_logical_port_number(ntb, pidx); 1549 + 1550 + if (peer_port < local_port) 1551 + return local_port - 1; 1552 + else 1553 + return local_port; 1554 + } 1555 + 1556 + /** 1557 + * ntb_peer_highest_mw_idx() - get a memory window index for a given peer idx 1558 + * using the highest index memory windows first 1559 + * 1560 + * @ntb: NTB device context. 1561 + * @pidx: Peer port index. 1562 + * 1563 + * Like ntb_peer_resource_idx(), except it returns indexes starting with 1564 + * last memory window index. 1565 + * 1566 + * Return: the resource index to use for that peer. 1567 + */ 1568 + static inline int ntb_peer_highest_mw_idx(struct ntb_dev *ntb, int pidx) 1569 + { 1570 + int ret; 1571 + 1572 + ret = ntb_peer_resource_idx(ntb, pidx); 1573 + if (ret < 0) 1574 + return ret; 1575 + 1576 + return ntb_mw_count(ntb, pidx) - ret - 1; 1577 + } 1578 + 1579 + struct ntb_msi_desc { 1580 + u32 addr_offset; 1581 + u32 data; 1582 + }; 1583 + 1584 + #ifdef CONFIG_NTB_MSI 1585 + 1586 + int ntb_msi_init(struct ntb_dev *ntb, void (*desc_changed)(void *ctx)); 1587 + int ntb_msi_setup_mws(struct ntb_dev *ntb); 1588 + void ntb_msi_clear_mws(struct ntb_dev *ntb); 1589 + int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler, 1590 + irq_handler_t thread_fn, 1591 + const char *name, void *dev_id, 1592 + struct ntb_msi_desc *msi_desc); 1593 + void ntbm_msi_free_irq(struct ntb_dev *ntb, unsigned int irq, void *dev_id); 1594 + int ntb_msi_peer_trigger(struct ntb_dev *ntb, int peer, 1595 + struct ntb_msi_desc *desc); 1596 + int ntb_msi_peer_addr(struct ntb_dev *ntb, int peer, 1597 + struct ntb_msi_desc *desc, 1598 + phys_addr_t *msi_addr); 1599 + 1600 + #else /* not CONFIG_NTB_MSI */ 1601 + 1602 + static inline int ntb_msi_init(struct ntb_dev *ntb, 1603 + void (*desc_changed)(void *ctx)) 1604 + { 1605 + return -EOPNOTSUPP; 1606 + } 1607 + static inline int ntb_msi_setup_mws(struct ntb_dev *ntb) 1608 + { 1609 + return -EOPNOTSUPP; 1610 + } 1611 + static inline void ntb_msi_clear_mws(struct ntb_dev *ntb) {} 1612 + static inline int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, 1613 + irq_handler_t handler, 1614 + irq_handler_t thread_fn, 1615 + const char *name, void *dev_id, 1616 + struct ntb_msi_desc *msi_desc) 1617 + { 1618 + return -EOPNOTSUPP; 1619 + } 1620 + static inline void ntbm_msi_free_irq(struct ntb_dev *ntb, unsigned int irq, 1621 + void *dev_id) {} 1622 + static inline int ntb_msi_peer_trigger(struct ntb_dev *ntb, int peer, 1623 + struct ntb_msi_desc *desc) 1624 + { 1625 + return -EOPNOTSUPP; 1626 + } 1627 + static inline int ntb_msi_peer_addr(struct ntb_dev *ntb, int peer, 1628 + struct ntb_msi_desc *desc, 1629 + phys_addr_t *msi_addr) 1630 + { 1631 + return -EOPNOTSUPP; 1632 + 1633 + } 1634 + 1635 + #endif /* CONFIG_NTB_MSI */ 1636 + 1637 + static inline int ntbm_msi_request_irq(struct ntb_dev *ntb, 1638 + irq_handler_t handler, 1639 + const char *name, void *dev_id, 1640 + struct ntb_msi_desc *msi_desc) 1641 + { 1642 + return ntbm_msi_request_threaded_irq(ntb, handler, NULL, name, 1643 + dev_id, msi_desc); 1564 1644 } 1565 1645 1566 1646 #endif
+9
include/linux/pci.h
··· 1412 1412 #define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */ 1413 1413 #define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */ 1414 1414 #define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */ 1415 + 1416 + /* 1417 + * Virtual interrupts allow for more interrupts to be allocated 1418 + * than the device has interrupts for. These are not programmed 1419 + * into the device's MSI-X table and must be handled by some 1420 + * other driver means. 1421 + */ 1422 + #define PCI_IRQ_VIRTUAL (1 << 4) 1423 + 1415 1424 #define PCI_IRQ_ALL_TYPES \ 1416 1425 (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX) 1417 1426
+52 -2
tools/testing/selftests/ntb/ntb_test.sh
··· 78 78 79 79 function _modprobe() 80 80 { 81 - modprobe "$@" 81 + modprobe "$@" || return 1 82 82 83 83 if [[ "$REMOTE_HOST" != "" ]]; then 84 - ssh "$REMOTE_HOST" modprobe "$@" 84 + ssh "$REMOTE_HOST" modprobe "$@" || return 1 85 85 fi 86 86 } 87 87 ··· 442 442 echo " Passed" 443 443 } 444 444 445 + function msi_test() 446 + { 447 + LOC=$1 448 + REM=$2 449 + 450 + write_file 1 $LOC/ready 451 + 452 + echo "Running MSI interrupt tests on: $(subdirname $LOC) / $(subdirname $REM)" 453 + 454 + CNT=$(read_file "$LOC/count") 455 + for ((i = 0; i < $CNT; i++)); do 456 + START=$(read_file $REM/../irq${i}_occurrences) 457 + write_file $i $LOC/trigger 458 + END=$(read_file $REM/../irq${i}_occurrences) 459 + 460 + if [[ $(($END - $START)) != 1 ]]; then 461 + echo "MSI did not trigger the interrupt on the remote side!" >&2 462 + exit 1 463 + fi 464 + done 465 + 466 + echo " Passed" 467 + } 468 + 445 469 function perf_test() 446 470 { 447 471 USE_DMA=$1 ··· 544 520 _modprobe -r ntb_pingpong 545 521 } 546 522 523 + function ntb_msi_tests() 524 + { 525 + LOCAL_MSI="$DEBUGFS/ntb_msi_test/$LOCAL_DEV" 526 + REMOTE_MSI="$REMOTE_HOST:$DEBUGFS/ntb_msi_test/$REMOTE_DEV" 527 + 528 + echo "Starting ntb_msi_test tests..." 529 + 530 + if ! _modprobe ntb_msi_test 2> /dev/null; then 531 + echo " Not doing MSI tests seeing the module is not available." 532 + return 533 + fi 534 + 535 + port_test $LOCAL_MSI $REMOTE_MSI 536 + 537 + LOCAL_PEER="$LOCAL_MSI/peer$LOCAL_PIDX" 538 + REMOTE_PEER="$REMOTE_MSI/peer$REMOTE_PIDX" 539 + 540 + msi_test $LOCAL_PEER $REMOTE_PEER 541 + msi_test $REMOTE_PEER $LOCAL_PEER 542 + 543 + _modprobe -r ntb_msi_test 544 + } 545 + 547 546 function ntb_perf_tests() 548 547 { 549 548 LOCAL_PERF="$DEBUGFS/ntb_perf/$LOCAL_DEV" ··· 588 541 _modprobe -r ntb_perf 2> /dev/null 589 542 _modprobe -r ntb_pingpong 2> /dev/null 590 543 _modprobe -r ntb_transport 2> /dev/null 544 + _modprobe -r ntb_msi_test 2> /dev/null 591 545 set -e 592 546 } 593 547 ··· 624 576 ntb_tool_tests 625 577 echo 626 578 ntb_pingpong_tests 579 + echo 580 + ntb_msi_tests 627 581 echo 628 582 ntb_perf_tests 629 583 echo