Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'ntb-4.15' of git://github.com/jonmason/ntb

Pull ntb updates from Jon Mason:
"Support for the switchtec ntb and related changes. Also, a couple of
bug fixes"

[ The timing isn't great. I had asked people to send me pull requests
before my family vacation, and this code has not even been in
linux-next as far as I can tell. But Logan Gunthorpe pleaded for its
inclusion because the Switchtec driver has apparently been around for
a while, just never in linux-next - Linus ]

* tag 'ntb-4.15' of git://github.com/jonmason/ntb:
ntb: intel: remove b2b memory window workaround for Skylake NTB
NTB: make idt_89hpes_cfg const
NTB: switchtec_ntb: Update switchtec documentation with notes for NTB
NTB: switchtec_ntb: Add memory window support
NTB: switchtec_ntb: Implement scratchpad registers
NTB: switchtec_ntb: Implement doorbell registers
NTB: switchtec_ntb: Add link management
NTB: switchtec_ntb: Add skeleton NTB driver
NTB: switchtec_ntb: Initialize hardware for doorbells and messages
NTB: switchtec_ntb: Initialize hardware for memory windows
NTB: switchtec_ntb: Introduce initial NTB driver
NTB: Add check and comment for link up to mw_count() and mw_get_align()
NTB: Ensure ntb_mw_get_align() is only called when the link is up
NTB: switchtec: Add link event notifier callback
NTB: switchtec: Add NTB hardware register definitions
NTB: switchtec: Export class symbol for use in upper layer driver
NTB: switchtec: Move structure definitions into a common header
ntb: update maintainer list for Intel NTB driver

+1715 -367
+12
Documentation/switchtec.txt
··· 78 78 between PCI Function Framework number (used by the event system) 79 79 and Switchtec Logic Port ID and Partition number (which is more 80 80 user friendly). 81 + 82 + 83 + Non-Transparent Bridge (NTB) Driver 84 + =================================== 85 + 86 + An NTB driver is provided for the switchtec hardware in switchtec_ntb. 87 + Currently, it only supports switches configured with exactly 2 88 + partitions. It also requires the following configuration settings: 89 + 90 + * Both partitions must be able to access each other's GAS spaces. 91 + Thus, the bits in the GAS Access Vector under Management Settings 92 + must be set to support this.
+4 -3
MAINTAINERS
··· 9726 9726 F: drivers/ntb/hw/idt/ 9727 9727 9728 9728 NTB INTEL DRIVER 9729 - M: Jon Mason <jdmason@kudzu.us> 9730 9729 M: Dave Jiang <dave.jiang@intel.com> 9731 9730 L: linux-ntb@googlegroups.com 9732 9731 S: Supported 9733 - W: https://github.com/jonmason/ntb/wiki 9734 - T: git git://github.com/jonmason/ntb.git 9732 + W: https://github.com/davejiang/linux/wiki 9733 + T: git https://github.com/davejiang/linux.git 9735 9734 F: drivers/ntb/hw/intel/ 9736 9735 9737 9736 NTFS FILESYSTEM ··· 10442 10443 F: Documentation/ABI/testing/sysfs-class-switchtec 10443 10444 F: drivers/pci/switch/switchtec* 10444 10445 F: include/uapi/linux/switchtec_ioctl.h 10446 + F: include/linux/switchtec.h 10447 + F: drivers/ntb/hw/mscc/ 10445 10448 10446 10449 PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support) 10447 10450 M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+1
drivers/ntb/hw/Kconfig
··· 1 1 source "drivers/ntb/hw/amd/Kconfig" 2 2 source "drivers/ntb/hw/idt/Kconfig" 3 3 source "drivers/ntb/hw/intel/Kconfig" 4 + source "drivers/ntb/hw/mscc/Kconfig"
+1
drivers/ntb/hw/Makefile
··· 1 1 obj-$(CONFIG_NTB_AMD) += amd/ 2 2 obj-$(CONFIG_NTB_IDT) += idt/ 3 3 obj-$(CONFIG_NTB_INTEL) += intel/ 4 + obj-$(CONFIG_NTB_SWITCHTEC) += mscc/
+8 -8
drivers/ntb/hw/idt/ntb_hw_idt.c
··· 2628 2628 /* 2629 2629 * IDT PCIe-switch models ports configuration structures 2630 2630 */ 2631 - static struct idt_89hpes_cfg idt_89hpes24nt6ag2_config = { 2631 + static const struct idt_89hpes_cfg idt_89hpes24nt6ag2_config = { 2632 2632 .name = "89HPES24NT6AG2", 2633 2633 .port_cnt = 6, .ports = {0, 2, 4, 6, 8, 12} 2634 2634 }; 2635 - static struct idt_89hpes_cfg idt_89hpes32nt8ag2_config = { 2635 + static const struct idt_89hpes_cfg idt_89hpes32nt8ag2_config = { 2636 2636 .name = "89HPES32NT8AG2", 2637 2637 .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} 2638 2638 }; 2639 - static struct idt_89hpes_cfg idt_89hpes32nt8bg2_config = { 2639 + static const struct idt_89hpes_cfg idt_89hpes32nt8bg2_config = { 2640 2640 .name = "89HPES32NT8BG2", 2641 2641 .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} 2642 2642 }; 2643 - static struct idt_89hpes_cfg idt_89hpes12nt12g2_config = { 2643 + static const struct idt_89hpes_cfg idt_89hpes12nt12g2_config = { 2644 2644 .name = "89HPES12NT12G2", 2645 2645 .port_cnt = 3, .ports = {0, 8, 16} 2646 2646 }; 2647 - static struct idt_89hpes_cfg idt_89hpes16nt16g2_config = { 2647 + static const struct idt_89hpes_cfg idt_89hpes16nt16g2_config = { 2648 2648 .name = "89HPES16NT16G2", 2649 2649 .port_cnt = 4, .ports = {0, 8, 12, 16} 2650 2650 }; 2651 - static struct idt_89hpes_cfg idt_89hpes24nt24g2_config = { 2651 + static const struct idt_89hpes_cfg idt_89hpes24nt24g2_config = { 2652 2652 .name = "89HPES24NT24G2", 2653 2653 .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} 2654 2654 }; 2655 - static struct idt_89hpes_cfg idt_89hpes32nt24ag2_config = { 2655 + static const struct idt_89hpes_cfg idt_89hpes32nt24ag2_config = { 2656 2656 .name = "89HPES32NT24AG2", 2657 2657 .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} 2658 2658 }; 2659 - static struct idt_89hpes_cfg idt_89hpes32nt24bg2_config = { 2659 + static const struct idt_89hpes_cfg idt_89hpes32nt24bg2_config = { 2660 2660 .name = "89HPES32NT24BG2", 2661 2661 .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} 2662 2662 };
+2 -73
drivers/ntb/hw/intel/ntb_hw_intel.c
··· 1742 1742 { 1743 1743 struct pci_dev *pdev; 1744 1744 void __iomem *mmio; 1745 - resource_size_t bar_size; 1746 1745 phys_addr_t bar_addr; 1747 - int b2b_bar; 1748 - u8 bar_sz; 1749 1746 1750 1747 pdev = ndev->ntb.pdev; 1751 1748 mmio = ndev->self_mmio; 1752 1749 1753 - if (ndev->b2b_idx == UINT_MAX) { 1754 - dev_dbg(&pdev->dev, "not using b2b mw\n"); 1755 - b2b_bar = 0; 1756 - ndev->b2b_off = 0; 1757 - } else { 1758 - b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx); 1759 - if (b2b_bar < 0) 1760 - return -EIO; 1761 - 1762 - dev_dbg(&pdev->dev, "using b2b mw bar %d\n", b2b_bar); 1763 - 1764 - bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar); 1765 - 1766 - dev_dbg(&pdev->dev, "b2b bar size %#llx\n", bar_size); 1767 - 1768 - if (b2b_mw_share && ((bar_size >> 1) >= XEON_B2B_MIN_SIZE)) { 1769 - dev_dbg(&pdev->dev, "b2b using first half of bar\n"); 1770 - ndev->b2b_off = bar_size >> 1; 1771 - } else if (bar_size >= XEON_B2B_MIN_SIZE) { 1772 - dev_dbg(&pdev->dev, "b2b using whole bar\n"); 1773 - ndev->b2b_off = 0; 1774 - --ndev->mw_count; 1775 - } else { 1776 - dev_dbg(&pdev->dev, "b2b bar size is too small\n"); 1777 - return -EIO; 1778 - } 1779 - } 1780 - 1781 - /* 1782 - * Reset the secondary bar sizes to match the primary bar sizes, 1783 - * except disable or halve the size of the b2b secondary bar. 1784 - */ 1785 - pci_read_config_byte(pdev, SKX_IMBAR1SZ_OFFSET, &bar_sz); 1786 - dev_dbg(&pdev->dev, "IMBAR1SZ %#x\n", bar_sz); 1787 - if (b2b_bar == 1) { 1788 - if (ndev->b2b_off) 1789 - bar_sz -= 1; 1790 - else 1791 - bar_sz = 0; 1792 - } 1793 - 1794 - pci_write_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, bar_sz); 1795 - pci_read_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, &bar_sz); 1796 - dev_dbg(&pdev->dev, "EMBAR1SZ %#x\n", bar_sz); 1797 - 1798 - pci_read_config_byte(pdev, SKX_IMBAR2SZ_OFFSET, &bar_sz); 1799 - dev_dbg(&pdev->dev, "IMBAR2SZ %#x\n", bar_sz); 1800 - if (b2b_bar == 2) { 1801 - if (ndev->b2b_off) 1802 - bar_sz -= 1; 1803 - else 1804 - bar_sz = 0; 1805 - } 1806 - 1807 - pci_write_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, bar_sz); 1808 - pci_read_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, &bar_sz); 1809 - dev_dbg(&pdev->dev, "EMBAR2SZ %#x\n", bar_sz); 1810 - 1811 - /* SBAR01 hit by first part of the b2b bar */ 1812 - if (b2b_bar == 0) 1813 - bar_addr = addr->bar0_addr; 1814 - else if (b2b_bar == 1) 1815 - bar_addr = addr->bar2_addr64; 1816 - else if (b2b_bar == 2) 1817 - bar_addr = addr->bar4_addr64; 1818 - else 1819 - return -EIO; 1820 - 1821 1750 /* setup incoming bar limits == base addrs (zero length windows) */ 1822 - bar_addr = addr->bar2_addr64 + (b2b_bar == 1 ? ndev->b2b_off : 0); 1751 + bar_addr = addr->bar2_addr64; 1823 1752 iowrite64(bar_addr, mmio + SKX_IMBAR1XLMT_OFFSET); 1824 1753 bar_addr = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET); 1825 1754 dev_dbg(&pdev->dev, "IMBAR1XLMT %#018llx\n", bar_addr); 1826 1755 1827 - bar_addr = addr->bar4_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0); 1756 + bar_addr = addr->bar4_addr64; 1828 1757 iowrite64(bar_addr, mmio + SKX_IMBAR2XLMT_OFFSET); 1829 1758 bar_addr = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET); 1830 1759 dev_dbg(&pdev->dev, "IMBAR2XLMT %#018llx\n", bar_addr);
+9
drivers/ntb/hw/mscc/Kconfig
··· 1 + config NTB_SWITCHTEC 2 + tristate "MicroSemi Switchtec Non-Transparent Bridge Support" 3 + select PCI_SW_SWITCHTEC 4 + help 5 + Enables NTB support for Switchtec PCI switches. This also 6 + selects the Switchtec management driver as they share the same 7 + hardware interface. 8 + 9 + If unsure, say N.
+1
drivers/ntb/hw/mscc/Makefile
··· 1 + obj-$(CONFIG_NTB_SWITCHTEC) += ntb_hw_switchtec.o
+1216
drivers/ntb/hw/mscc/ntb_hw_switchtec.c
··· 1 + /* 2 + * Microsemi Switchtec(tm) PCIe Management Driver 3 + * Copyright (c) 2017, Microsemi Corporation 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms and conditions of the GNU General Public License, 7 + * version 2, as published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope it will be useful, but WITHOUT 10 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 + * more details. 13 + * 14 + */ 15 + 16 + #include <linux/switchtec.h> 17 + #include <linux/module.h> 18 + #include <linux/delay.h> 19 + #include <linux/kthread.h> 20 + #include <linux/interrupt.h> 21 + #include <linux/ntb.h> 22 + 23 + MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver"); 24 + MODULE_VERSION("0.1"); 25 + MODULE_LICENSE("GPL"); 26 + MODULE_AUTHOR("Microsemi Corporation"); 27 + 28 + static ulong max_mw_size = SZ_2M; 29 + module_param(max_mw_size, ulong, 0644); 30 + MODULE_PARM_DESC(max_mw_size, 31 + "Max memory window size reported to the upper layer"); 32 + 33 + static bool use_lut_mws; 34 + module_param(use_lut_mws, bool, 0644); 35 + MODULE_PARM_DESC(use_lut_mws, 36 + "Enable the use of the LUT based memory windows"); 37 + 38 + #ifndef ioread64 39 + #ifdef readq 40 + #define ioread64 readq 41 + #else 42 + #define ioread64 _ioread64 43 + static inline u64 _ioread64(void __iomem *mmio) 44 + { 45 + u64 low, high; 46 + 47 + low = ioread32(mmio); 48 + high = ioread32(mmio + sizeof(u32)); 49 + return low | (high << 32); 50 + } 51 + #endif 52 + #endif 53 + 54 + #ifndef iowrite64 55 + #ifdef writeq 56 + #define iowrite64 writeq 57 + #else 58 + #define iowrite64 _iowrite64 59 + static inline void _iowrite64(u64 val, void __iomem *mmio) 60 + { 61 + iowrite32(val, mmio); 62 + iowrite32(val >> 32, mmio + sizeof(u32)); 63 + } 64 + #endif 65 + #endif 66 + 67 + #define SWITCHTEC_NTB_MAGIC 0x45CC0001 68 + #define MAX_MWS 128 69 + 70 + struct shared_mw { 71 + u32 magic; 72 + u32 link_sta; 73 + u32 partition_id; 74 + u64 mw_sizes[MAX_MWS]; 75 + u32 spad[128]; 76 + }; 77 + 78 + #define MAX_DIRECT_MW ARRAY_SIZE(((struct ntb_ctrl_regs *)(0))->bar_entry) 79 + #define LUT_SIZE SZ_64K 80 + 81 + struct switchtec_ntb { 82 + struct ntb_dev ntb; 83 + struct switchtec_dev *stdev; 84 + 85 + int self_partition; 86 + int peer_partition; 87 + 88 + int doorbell_irq; 89 + int message_irq; 90 + 91 + struct ntb_info_regs __iomem *mmio_ntb; 92 + struct ntb_ctrl_regs __iomem *mmio_ctrl; 93 + struct ntb_dbmsg_regs __iomem *mmio_dbmsg; 94 + struct ntb_ctrl_regs __iomem *mmio_self_ctrl; 95 + struct ntb_ctrl_regs __iomem *mmio_peer_ctrl; 96 + struct ntb_dbmsg_regs __iomem *mmio_self_dbmsg; 97 + 98 + struct shared_mw *self_shared; 99 + struct shared_mw __iomem *peer_shared; 100 + dma_addr_t self_shared_dma; 101 + 102 + u64 db_mask; 103 + u64 db_valid_mask; 104 + int db_shift; 105 + int db_peer_shift; 106 + 107 + /* synchronize rmw access of db_mask and hw reg */ 108 + spinlock_t db_mask_lock; 109 + 110 + int nr_direct_mw; 111 + int nr_lut_mw; 112 + int direct_mw_to_bar[MAX_DIRECT_MW]; 113 + 114 + int peer_nr_direct_mw; 115 + int peer_nr_lut_mw; 116 + int peer_direct_mw_to_bar[MAX_DIRECT_MW]; 117 + 118 + bool link_is_up; 119 + enum ntb_speed link_speed; 120 + enum ntb_width link_width; 121 + }; 122 + 123 + static struct switchtec_ntb *ntb_sndev(struct ntb_dev *ntb) 124 + { 125 + return container_of(ntb, struct switchtec_ntb, ntb); 126 + } 127 + 128 + static int switchtec_ntb_part_op(struct switchtec_ntb *sndev, 129 + struct ntb_ctrl_regs __iomem *ctl, 130 + u32 op, int wait_status) 131 + { 132 + static const char * const op_text[] = { 133 + [NTB_CTRL_PART_OP_LOCK] = "lock", 134 + [NTB_CTRL_PART_OP_CFG] = "configure", 135 + [NTB_CTRL_PART_OP_RESET] = "reset", 136 + }; 137 + 138 + int i; 139 + u32 ps; 140 + int status; 141 + 142 + switch (op) { 143 + case NTB_CTRL_PART_OP_LOCK: 144 + status = NTB_CTRL_PART_STATUS_LOCKING; 145 + break; 146 + case NTB_CTRL_PART_OP_CFG: 147 + status = NTB_CTRL_PART_STATUS_CONFIGURING; 148 + break; 149 + case NTB_CTRL_PART_OP_RESET: 150 + status = NTB_CTRL_PART_STATUS_RESETTING; 151 + break; 152 + default: 153 + return -EINVAL; 154 + } 155 + 156 + iowrite32(op, &ctl->partition_op); 157 + 158 + for (i = 0; i < 1000; i++) { 159 + if (msleep_interruptible(50) != 0) { 160 + iowrite32(NTB_CTRL_PART_OP_RESET, &ctl->partition_op); 161 + return -EINTR; 162 + } 163 + 164 + ps = ioread32(&ctl->partition_status) & 0xFFFF; 165 + 166 + if (ps != status) 167 + break; 168 + } 169 + 170 + if (ps == wait_status) 171 + return 0; 172 + 173 + if (ps == status) { 174 + dev_err(&sndev->stdev->dev, 175 + "Timed out while peforming %s (%d). (%08x)", 176 + op_text[op], op, 177 + ioread32(&ctl->partition_status)); 178 + 179 + return -ETIMEDOUT; 180 + } 181 + 182 + return -EIO; 183 + } 184 + 185 + static int switchtec_ntb_send_msg(struct switchtec_ntb *sndev, int idx, 186 + u32 val) 187 + { 188 + if (idx < 0 || idx >= ARRAY_SIZE(sndev->mmio_self_dbmsg->omsg)) 189 + return -EINVAL; 190 + 191 + iowrite32(val, &sndev->mmio_self_dbmsg->omsg[idx].msg); 192 + 193 + return 0; 194 + } 195 + 196 + static int switchtec_ntb_mw_count(struct ntb_dev *ntb, int pidx) 197 + { 198 + struct switchtec_ntb *sndev = ntb_sndev(ntb); 199 + int nr_direct_mw = sndev->peer_nr_direct_mw; 200 + int nr_lut_mw = sndev->peer_nr_lut_mw - 1; 201 + 202 + if (pidx != NTB_DEF_PEER_IDX) 203 + return -EINVAL; 204 + 205 + if (!use_lut_mws) 206 + nr_lut_mw = 0; 207 + 208 + return nr_direct_mw + nr_lut_mw; 209 + } 210 + 211 + static int lut_index(struct switchtec_ntb *sndev, int mw_idx) 212 + { 213 + return mw_idx - sndev->nr_direct_mw + 1; 214 + } 215 + 216 + static int peer_lut_index(struct switchtec_ntb *sndev, int mw_idx) 217 + { 218 + return mw_idx - sndev->peer_nr_direct_mw + 1; 219 + } 220 + 221 + static int switchtec_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, 222 + int widx, resource_size_t *addr_align, 223 + resource_size_t *size_align, 224 + resource_size_t *size_max) 225 + { 226 + struct switchtec_ntb *sndev = ntb_sndev(ntb); 227 + int lut; 228 + resource_size_t size; 229 + 230 + if (pidx != NTB_DEF_PEER_IDX) 231 + return -EINVAL; 232 + 233 + lut = widx >= sndev->peer_nr_direct_mw; 234 + size = ioread64(&sndev->peer_shared->mw_sizes[widx]); 235 + 236 + if (size == 0) 237 + return -EINVAL; 238 + 239 + if (addr_align) 240 + *addr_align = lut ? size : SZ_4K; 241 + 242 + if (size_align) 243 + *size_align = lut ? size : SZ_4K; 244 + 245 + if (size_max) 246 + *size_max = size; 247 + 248 + return 0; 249 + } 250 + 251 + static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx) 252 + { 253 + struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl; 254 + int bar = sndev->peer_direct_mw_to_bar[idx]; 255 + u32 ctl_val; 256 + 257 + ctl_val = ioread32(&ctl->bar_entry[bar].ctl); 258 + ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN; 259 + iowrite32(ctl_val, &ctl->bar_entry[bar].ctl); 260 + iowrite32(0, &ctl->bar_entry[bar].win_size); 261 + iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr); 262 + } 263 + 264 + static void switchtec_ntb_mw_clr_lut(struct switchtec_ntb *sndev, int idx) 265 + { 266 + struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl; 267 + 268 + iowrite64(0, &ctl->lut_entry[peer_lut_index(sndev, idx)]); 269 + } 270 + 271 + static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx, 272 + dma_addr_t addr, resource_size_t size) 273 + { 274 + int xlate_pos = ilog2(size); 275 + int bar = sndev->peer_direct_mw_to_bar[idx]; 276 + struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl; 277 + u32 ctl_val; 278 + 279 + ctl_val = ioread32(&ctl->bar_entry[bar].ctl); 280 + ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN; 281 + 282 + iowrite32(ctl_val, &ctl->bar_entry[bar].ctl); 283 + iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size); 284 + iowrite64(sndev->self_partition | addr, 285 + &ctl->bar_entry[bar].xlate_addr); 286 + } 287 + 288 + static void switchtec_ntb_mw_set_lut(struct switchtec_ntb *sndev, int idx, 289 + dma_addr_t addr, resource_size_t size) 290 + { 291 + struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl; 292 + 293 + iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) | addr), 294 + &ctl->lut_entry[peer_lut_index(sndev, idx)]); 295 + } 296 + 297 + static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx, 298 + dma_addr_t addr, resource_size_t size) 299 + { 300 + struct switchtec_ntb *sndev = ntb_sndev(ntb); 301 + struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl; 302 + int xlate_pos = ilog2(size); 303 + int nr_direct_mw = sndev->peer_nr_direct_mw; 304 + int rc; 305 + 306 + if (pidx != NTB_DEF_PEER_IDX) 307 + return -EINVAL; 308 + 309 + dev_dbg(&sndev->stdev->dev, "MW %d: part %d addr %pad size %pap", 310 + widx, pidx, &addr, &size); 311 + 312 + if (widx >= switchtec_ntb_mw_count(ntb, pidx)) 313 + return -EINVAL; 314 + 315 + if (xlate_pos < 12) 316 + return -EINVAL; 317 + 318 + rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK, 319 + NTB_CTRL_PART_STATUS_LOCKED); 320 + if (rc) 321 + return rc; 322 + 323 + if (addr == 0 || size == 0) { 324 + if (widx < nr_direct_mw) 325 + switchtec_ntb_mw_clr_direct(sndev, widx); 326 + else 327 + switchtec_ntb_mw_clr_lut(sndev, widx); 328 + } else { 329 + if (widx < nr_direct_mw) 330 + switchtec_ntb_mw_set_direct(sndev, widx, addr, size); 331 + else 332 + switchtec_ntb_mw_set_lut(sndev, widx, addr, size); 333 + } 334 + 335 + rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG, 336 + NTB_CTRL_PART_STATUS_NORMAL); 337 + 338 + if (rc == -EIO) { 339 + dev_err(&sndev->stdev->dev, 340 + "Hardware reported an error configuring mw %d: %08x", 341 + widx, ioread32(&ctl->bar_error)); 342 + 343 + if (widx < nr_direct_mw) 344 + switchtec_ntb_mw_clr_direct(sndev, widx); 345 + else 346 + switchtec_ntb_mw_clr_lut(sndev, widx); 347 + 348 + switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG, 349 + NTB_CTRL_PART_STATUS_NORMAL); 350 + } 351 + 352 + return rc; 353 + } 354 + 355 + static int switchtec_ntb_peer_mw_count(struct ntb_dev *ntb) 356 + { 357 + struct switchtec_ntb *sndev = ntb_sndev(ntb); 358 + 359 + return sndev->nr_direct_mw + (use_lut_mws ? sndev->nr_lut_mw - 1 : 0); 360 + } 361 + 362 + static int switchtec_ntb_direct_get_addr(struct switchtec_ntb *sndev, 363 + int idx, phys_addr_t *base, 364 + resource_size_t *size) 365 + { 366 + int bar = sndev->direct_mw_to_bar[idx]; 367 + size_t offset = 0; 368 + 369 + if (bar < 0) 370 + return -EINVAL; 371 + 372 + if (idx == 0) { 373 + /* 374 + * This is the direct BAR shared with the LUTs 375 + * which means the actual window will be offset 376 + * by the size of all the LUT entries. 377 + */ 378 + 379 + offset = LUT_SIZE * sndev->nr_lut_mw; 380 + } 381 + 382 + if (base) 383 + *base = pci_resource_start(sndev->ntb.pdev, bar) + offset; 384 + 385 + if (size) { 386 + *size = pci_resource_len(sndev->ntb.pdev, bar) - offset; 387 + if (offset && *size > offset) 388 + *size = offset; 389 + 390 + if (*size > max_mw_size) 391 + *size = max_mw_size; 392 + } 393 + 394 + return 0; 395 + } 396 + 397 + static int switchtec_ntb_lut_get_addr(struct switchtec_ntb *sndev, 398 + int idx, phys_addr_t *base, 399 + resource_size_t *size) 400 + { 401 + int bar = sndev->direct_mw_to_bar[0]; 402 + int offset; 403 + 404 + offset = LUT_SIZE * lut_index(sndev, idx); 405 + 406 + if (base) 407 + *base = pci_resource_start(sndev->ntb.pdev, bar) + offset; 408 + 409 + if (size) 410 + *size = LUT_SIZE; 411 + 412 + return 0; 413 + } 414 + 415 + static int switchtec_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx, 416 + phys_addr_t *base, 417 + resource_size_t *size) 418 + { 419 + struct switchtec_ntb *sndev = ntb_sndev(ntb); 420 + 421 + if (idx < sndev->nr_direct_mw) 422 + return switchtec_ntb_direct_get_addr(sndev, idx, base, size); 423 + else if (idx < switchtec_ntb_peer_mw_count(ntb)) 424 + return switchtec_ntb_lut_get_addr(sndev, idx, base, size); 425 + else 426 + return -EINVAL; 427 + } 428 + 429 + static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev, 430 + int partition, 431 + enum ntb_speed *speed, 432 + enum ntb_width *width) 433 + { 434 + struct switchtec_dev *stdev = sndev->stdev; 435 + 436 + u32 pff = ioread32(&stdev->mmio_part_cfg[partition].vep_pff_inst_id); 437 + u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]); 438 + 439 + if (speed) 440 + *speed = (linksta >> 16) & 0xF; 441 + 442 + if (width) 443 + *width = (linksta >> 20) & 0x3F; 444 + } 445 + 446 + static void switchtec_ntb_set_link_speed(struct switchtec_ntb *sndev) 447 + { 448 + enum ntb_speed self_speed, peer_speed; 449 + enum ntb_width self_width, peer_width; 450 + 451 + if (!sndev->link_is_up) { 452 + sndev->link_speed = NTB_SPEED_NONE; 453 + sndev->link_width = NTB_WIDTH_NONE; 454 + return; 455 + } 456 + 457 + switchtec_ntb_part_link_speed(sndev, sndev->self_partition, 458 + &self_speed, &self_width); 459 + switchtec_ntb_part_link_speed(sndev, sndev->peer_partition, 460 + &peer_speed, &peer_width); 461 + 462 + sndev->link_speed = min(self_speed, peer_speed); 463 + sndev->link_width = min(self_width, peer_width); 464 + } 465 + 466 + enum { 467 + LINK_MESSAGE = 0, 468 + MSG_LINK_UP = 1, 469 + MSG_LINK_DOWN = 2, 470 + MSG_CHECK_LINK = 3, 471 + }; 472 + 473 + static void switchtec_ntb_check_link(struct switchtec_ntb *sndev) 474 + { 475 + int link_sta; 476 + int old = sndev->link_is_up; 477 + 478 + link_sta = sndev->self_shared->link_sta; 479 + if (link_sta) { 480 + u64 peer = ioread64(&sndev->peer_shared->magic); 481 + 482 + if ((peer & 0xFFFFFFFF) == SWITCHTEC_NTB_MAGIC) 483 + link_sta = peer >> 32; 484 + else 485 + link_sta = 0; 486 + } 487 + 488 + sndev->link_is_up = link_sta; 489 + switchtec_ntb_set_link_speed(sndev); 490 + 491 + if (link_sta != old) { 492 + switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_CHECK_LINK); 493 + ntb_link_event(&sndev->ntb); 494 + dev_info(&sndev->stdev->dev, "ntb link %s", 495 + link_sta ? "up" : "down"); 496 + } 497 + } 498 + 499 + static void switchtec_ntb_link_notification(struct switchtec_dev *stdev) 500 + { 501 + struct switchtec_ntb *sndev = stdev->sndev; 502 + 503 + switchtec_ntb_check_link(sndev); 504 + } 505 + 506 + static u64 switchtec_ntb_link_is_up(struct ntb_dev *ntb, 507 + enum ntb_speed *speed, 508 + enum ntb_width *width) 509 + { 510 + struct switchtec_ntb *sndev = ntb_sndev(ntb); 511 + 512 + if (speed) 513 + *speed = sndev->link_speed; 514 + if (width) 515 + *width = sndev->link_width; 516 + 517 + return sndev->link_is_up; 518 + } 519 + 520 + static int switchtec_ntb_link_enable(struct ntb_dev *ntb, 521 + enum ntb_speed max_speed, 522 + enum ntb_width max_width) 523 + { 524 + struct switchtec_ntb *sndev = ntb_sndev(ntb); 525 + 526 + dev_dbg(&sndev->stdev->dev, "enabling link"); 527 + 528 + sndev->self_shared->link_sta = 1; 529 + switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP); 530 + 531 + switchtec_ntb_check_link(sndev); 532 + 533 + return 0; 534 + } 535 + 536 + static int switchtec_ntb_link_disable(struct ntb_dev *ntb) 537 + { 538 + struct switchtec_ntb *sndev = ntb_sndev(ntb); 539 + 540 + dev_dbg(&sndev->stdev->dev, "disabling link"); 541 + 542 + sndev->self_shared->link_sta = 0; 543 + switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP); 544 + 545 + switchtec_ntb_check_link(sndev); 546 + 547 + return 0; 548 + } 549 + 550 + static u64 switchtec_ntb_db_valid_mask(struct ntb_dev *ntb) 551 + { 552 + struct switchtec_ntb *sndev = ntb_sndev(ntb); 553 + 554 + return sndev->db_valid_mask; 555 + } 556 + 557 + static int switchtec_ntb_db_vector_count(struct ntb_dev *ntb) 558 + { 559 + return 1; 560 + } 561 + 562 + static u64 switchtec_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector) 563 + { 564 + struct switchtec_ntb *sndev = ntb_sndev(ntb); 565 + 566 + if (db_vector < 0 || db_vector > 1) 567 + return 0; 568 + 569 + return sndev->db_valid_mask; 570 + } 571 + 572 + static u64 switchtec_ntb_db_read(struct ntb_dev *ntb) 573 + { 574 + u64 ret; 575 + struct switchtec_ntb *sndev = ntb_sndev(ntb); 576 + 577 + ret = ioread64(&sndev->mmio_self_dbmsg->idb) >> sndev->db_shift; 578 + 579 + return ret & sndev->db_valid_mask; 580 + } 581 + 582 + static int switchtec_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits) 583 + { 584 + struct switchtec_ntb *sndev = ntb_sndev(ntb); 585 + 586 + iowrite64(db_bits << sndev->db_shift, &sndev->mmio_self_dbmsg->idb); 587 + 588 + return 0; 589 + } 590 + 591 + static int switchtec_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits) 592 + { 593 + unsigned long irqflags; 594 + struct switchtec_ntb *sndev = ntb_sndev(ntb); 595 + 596 + if (db_bits & ~sndev->db_valid_mask) 597 + return -EINVAL; 598 + 599 + spin_lock_irqsave(&sndev->db_mask_lock, irqflags); 600 + 601 + sndev->db_mask |= db_bits << sndev->db_shift; 602 + iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask); 603 + 604 + spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags); 605 + 606 + return 0; 607 + } 608 + 609 + static int switchtec_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) 610 + { 611 + unsigned long irqflags; 612 + struct switchtec_ntb *sndev = ntb_sndev(ntb); 613 + 614 + if (db_bits & ~sndev->db_valid_mask) 615 + return -EINVAL; 616 + 617 + spin_lock_irqsave(&sndev->db_mask_lock, irqflags); 618 + 619 + sndev->db_mask &= ~(db_bits << sndev->db_shift); 620 + iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask); 621 + 622 + spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags); 623 + 624 + return 0; 625 + } 626 + 627 + static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb) 628 + { 629 + struct switchtec_ntb *sndev = ntb_sndev(ntb); 630 + 631 + return (sndev->db_mask >> sndev->db_shift) & sndev->db_valid_mask; 632 + } 633 + 634 + static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb, 635 + phys_addr_t *db_addr, 636 + resource_size_t *db_size) 637 + { 638 + struct switchtec_ntb *sndev = ntb_sndev(ntb); 639 + unsigned long offset; 640 + 641 + offset = (unsigned long)sndev->mmio_self_dbmsg->odb - 642 + (unsigned long)sndev->stdev->mmio; 643 + 644 + offset += sndev->db_shift / 8; 645 + 646 + if (db_addr) 647 + *db_addr = pci_resource_start(ntb->pdev, 0) + offset; 648 + if (db_size) 649 + *db_size = sizeof(u32); 650 + 651 + return 0; 652 + } 653 + 654 + static int switchtec_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits) 655 + { 656 + struct switchtec_ntb *sndev = ntb_sndev(ntb); 657 + 658 + iowrite64(db_bits << sndev->db_peer_shift, 659 + &sndev->mmio_self_dbmsg->odb); 660 + 661 + return 0; 662 + } 663 + 664 + static int switchtec_ntb_spad_count(struct ntb_dev *ntb) 665 + { 666 + struct switchtec_ntb *sndev = ntb_sndev(ntb); 667 + 668 + return ARRAY_SIZE(sndev->self_shared->spad); 669 + } 670 + 671 + static u32 switchtec_ntb_spad_read(struct ntb_dev *ntb, int idx) 672 + { 673 + struct switchtec_ntb *sndev = ntb_sndev(ntb); 674 + 675 + if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad)) 676 + return 0; 677 + 678 + if (!sndev->self_shared) 679 + return 0; 680 + 681 + return sndev->self_shared->spad[idx]; 682 + } 683 + 684 + static int switchtec_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val) 685 + { 686 + struct switchtec_ntb *sndev = ntb_sndev(ntb); 687 + 688 + if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad)) 689 + return -EINVAL; 690 + 691 + if (!sndev->self_shared) 692 + return -EIO; 693 + 694 + sndev->self_shared->spad[idx] = val; 695 + 696 + return 0; 697 + } 698 + 699 + static u32 switchtec_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, 700 + int sidx) 701 + { 702 + struct switchtec_ntb *sndev = ntb_sndev(ntb); 703 + 704 + if (pidx != NTB_DEF_PEER_IDX) 705 + return -EINVAL; 706 + 707 + if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad)) 708 + return 0; 709 + 710 + if (!sndev->peer_shared) 711 + return 0; 712 + 713 + return ioread32(&sndev->peer_shared->spad[sidx]); 714 + } 715 + 716 + static int switchtec_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, 717 + int sidx, u32 val) 718 + { 719 + struct switchtec_ntb *sndev = ntb_sndev(ntb); 720 + 721 + if (pidx != NTB_DEF_PEER_IDX) 722 + return -EINVAL; 723 + 724 + if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad)) 725 + return -EINVAL; 726 + 727 + if (!sndev->peer_shared) 728 + return -EIO; 729 + 730 + iowrite32(val, &sndev->peer_shared->spad[sidx]); 731 + 732 + return 0; 733 + } 734 + 735 + static int switchtec_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, 736 + int sidx, phys_addr_t *spad_addr) 737 + { 738 + struct switchtec_ntb *sndev = ntb_sndev(ntb); 739 + unsigned long offset; 740 + 741 + if (pidx != NTB_DEF_PEER_IDX) 742 + return -EINVAL; 743 + 744 + offset = (unsigned long)&sndev->peer_shared->spad[sidx] - 745 + (unsigned long)sndev->stdev->mmio; 746 + 747 + if (spad_addr) 748 + *spad_addr = pci_resource_start(ntb->pdev, 0) + offset; 749 + 750 + return 0; 751 + } 752 + 753 + static const struct ntb_dev_ops switchtec_ntb_ops = { 754 + .mw_count = switchtec_ntb_mw_count, 755 + .mw_get_align = switchtec_ntb_mw_get_align, 756 + .mw_set_trans = switchtec_ntb_mw_set_trans, 757 + .peer_mw_count = switchtec_ntb_peer_mw_count, 758 + .peer_mw_get_addr = switchtec_ntb_peer_mw_get_addr, 759 + .link_is_up = switchtec_ntb_link_is_up, 760 + .link_enable = switchtec_ntb_link_enable, 761 + .link_disable = switchtec_ntb_link_disable, 762 + .db_valid_mask = switchtec_ntb_db_valid_mask, 763 + .db_vector_count = switchtec_ntb_db_vector_count, 764 + .db_vector_mask = switchtec_ntb_db_vector_mask, 765 + .db_read = switchtec_ntb_db_read, 766 + .db_clear = switchtec_ntb_db_clear, 767 + .db_set_mask = switchtec_ntb_db_set_mask, 768 + .db_clear_mask = switchtec_ntb_db_clear_mask, 769 + .db_read_mask = switchtec_ntb_db_read_mask, 770 + .peer_db_addr = switchtec_ntb_peer_db_addr, 771 + .peer_db_set = switchtec_ntb_peer_db_set, 772 + .spad_count = switchtec_ntb_spad_count, 773 + .spad_read = switchtec_ntb_spad_read, 774 + .spad_write = switchtec_ntb_spad_write, 775 + .peer_spad_read = switchtec_ntb_peer_spad_read, 776 + .peer_spad_write = switchtec_ntb_peer_spad_write, 777 + .peer_spad_addr = switchtec_ntb_peer_spad_addr, 778 + }; 779 + 780 + static void switchtec_ntb_init_sndev(struct switchtec_ntb *sndev) 781 + { 782 + u64 part_map; 783 + 784 + sndev->ntb.pdev = sndev->stdev->pdev; 785 + sndev->ntb.topo = NTB_TOPO_SWITCH; 786 + sndev->ntb.ops = &switchtec_ntb_ops; 787 + 788 + sndev->self_partition = sndev->stdev->partition; 789 + 790 + sndev->mmio_ntb = sndev->stdev->mmio_ntb; 791 + part_map = ioread64(&sndev->mmio_ntb->ep_map); 792 + part_map &= ~(1 << sndev->self_partition); 793 + sndev->peer_partition = ffs(part_map) - 1; 794 + 795 + dev_dbg(&sndev->stdev->dev, "Partition ID %d of %d (%llx)", 796 + sndev->self_partition, sndev->stdev->partition_count, 797 + part_map); 798 + 799 + sndev->mmio_ctrl = (void * __iomem)sndev->mmio_ntb + 800 + SWITCHTEC_NTB_REG_CTRL_OFFSET; 801 + sndev->mmio_dbmsg = (void * __iomem)sndev->mmio_ntb + 802 + SWITCHTEC_NTB_REG_DBMSG_OFFSET; 803 + 804 + sndev->mmio_self_ctrl = &sndev->mmio_ctrl[sndev->self_partition]; 805 + sndev->mmio_peer_ctrl = &sndev->mmio_ctrl[sndev->peer_partition]; 806 + sndev->mmio_self_dbmsg = &sndev->mmio_dbmsg[sndev->self_partition]; 807 + } 808 + 809 + static int map_bars(int *map, struct ntb_ctrl_regs __iomem *ctrl) 810 + { 811 + int i; 812 + int cnt = 0; 813 + 814 + for (i = 0; i < ARRAY_SIZE(ctrl->bar_entry); i++) { 815 + u32 r = ioread32(&ctrl->bar_entry[i].ctl); 816 + 817 + if (r & NTB_CTRL_BAR_VALID) 818 + map[cnt++] = i; 819 + } 820 + 821 + return cnt; 822 + } 823 + 824 + static void switchtec_ntb_init_mw(struct switchtec_ntb *sndev) 825 + { 826 + sndev->nr_direct_mw = map_bars(sndev->direct_mw_to_bar, 827 + sndev->mmio_self_ctrl); 828 + 829 + sndev->nr_lut_mw = ioread16(&sndev->mmio_self_ctrl->lut_table_entries); 830 + sndev->nr_lut_mw = rounddown_pow_of_two(sndev->nr_lut_mw); 831 + 832 + dev_dbg(&sndev->stdev->dev, "MWs: %d direct, %d lut", 833 + sndev->nr_direct_mw, sndev->nr_lut_mw); 834 + 835 + sndev->peer_nr_direct_mw = map_bars(sndev->peer_direct_mw_to_bar, 836 + sndev->mmio_peer_ctrl); 837 + 838 + sndev->peer_nr_lut_mw = 839 + ioread16(&sndev->mmio_peer_ctrl->lut_table_entries); 840 + sndev->peer_nr_lut_mw = rounddown_pow_of_two(sndev->peer_nr_lut_mw); 841 + 842 + dev_dbg(&sndev->stdev->dev, "Peer MWs: %d direct, %d lut", 843 + sndev->peer_nr_direct_mw, sndev->peer_nr_lut_mw); 844 + 845 + } 846 + 847 + /* 848 + * There are 64 doorbells in the switch hardware but this is 849 + * shared among all partitions. So we must split them in half 850 + * (32 for each partition). However, the message interrupts are 851 + * also shared with the top 4 doorbells so we just limit this to 852 + * 28 doorbells per partition 853 + */ 854 + static void switchtec_ntb_init_db(struct switchtec_ntb *sndev) 855 + { 856 + sndev->db_valid_mask = 0x0FFFFFFF; 857 + 858 + if (sndev->self_partition < sndev->peer_partition) { 859 + sndev->db_shift = 0; 860 + sndev->db_peer_shift = 32; 861 + } else { 862 + sndev->db_shift = 32; 863 + sndev->db_peer_shift = 0; 864 + } 865 + 866 + sndev->db_mask = 0x0FFFFFFFFFFFFFFFULL; 867 + iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask); 868 + iowrite64(sndev->db_valid_mask << sndev->db_peer_shift, 869 + &sndev->mmio_self_dbmsg->odb_mask); 870 + } 871 + 872 + static void switchtec_ntb_init_msgs(struct switchtec_ntb *sndev) 873 + { 874 + int i; 875 + u32 msg_map = 0; 876 + 877 + for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) { 878 + int m = i | sndev->peer_partition << 2; 879 + 880 + msg_map |= m << i * 8; 881 + } 882 + 883 + iowrite32(msg_map, &sndev->mmio_self_dbmsg->msg_map); 884 + 885 + for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) 886 + iowrite64(NTB_DBMSG_IMSG_STATUS | NTB_DBMSG_IMSG_MASK, 887 + &sndev->mmio_self_dbmsg->imsg[i]); 888 + } 889 + 890 + static int switchtec_ntb_init_req_id_table(struct switchtec_ntb *sndev) 891 + { 892 + int rc = 0; 893 + u16 req_id; 894 + u32 error; 895 + 896 + req_id = ioread16(&sndev->mmio_ntb->requester_id); 897 + 898 + if (ioread32(&sndev->mmio_self_ctrl->req_id_table_size) < 2) { 899 + dev_err(&sndev->stdev->dev, 900 + "Not enough requester IDs available."); 901 + return -EFAULT; 902 + } 903 + 904 + rc = switchtec_ntb_part_op(sndev, sndev->mmio_self_ctrl, 905 + NTB_CTRL_PART_OP_LOCK, 906 + NTB_CTRL_PART_STATUS_LOCKED); 907 + if (rc) 908 + return rc; 909 + 910 + iowrite32(NTB_PART_CTRL_ID_PROT_DIS, 911 + &sndev->mmio_self_ctrl->partition_ctrl); 912 + 913 + /* 914 + * Root Complex Requester ID (which is 0:00.0) 915 + */ 916 + iowrite32(0 << 16 | NTB_CTRL_REQ_ID_EN, 917 + &sndev->mmio_self_ctrl->req_id_table[0]); 918 + 919 + /* 920 + * Host Bridge Requester ID (as read from the mmap address) 921 + */ 922 + iowrite32(req_id << 16 | NTB_CTRL_REQ_ID_EN, 923 + &sndev->mmio_self_ctrl->req_id_table[1]); 924 + 925 + rc = switchtec_ntb_part_op(sndev, sndev->mmio_self_ctrl, 926 + NTB_CTRL_PART_OP_CFG, 927 + NTB_CTRL_PART_STATUS_NORMAL); 928 + if (rc == -EIO) { 929 + error = ioread32(&sndev->mmio_self_ctrl->req_id_error); 930 + dev_err(&sndev->stdev->dev, 931 + "Error setting up the requester ID table: %08x", 932 + error); 933 + } 934 + 935 + return rc; 936 + } 937 + 938 + static void switchtec_ntb_init_shared(struct switchtec_ntb *sndev) 939 + { 940 + int i; 941 + 942 + memset(sndev->self_shared, 0, LUT_SIZE); 943 + sndev->self_shared->magic = SWITCHTEC_NTB_MAGIC; 944 + sndev->self_shared->partition_id = sndev->stdev->partition; 945 + 946 + for (i = 0; i < sndev->nr_direct_mw; i++) { 947 + int bar = sndev->direct_mw_to_bar[i]; 948 + resource_size_t sz = pci_resource_len(sndev->stdev->pdev, bar); 949 + 950 + if (i == 0) 951 + sz = min_t(resource_size_t, sz, 952 + LUT_SIZE * sndev->nr_lut_mw); 953 + 954 + sndev->self_shared->mw_sizes[i] = sz; 955 + } 956 + 957 + for (i = 0; i < sndev->nr_lut_mw; i++) { 958 + int idx = sndev->nr_direct_mw + i; 959 + 960 + sndev->self_shared->mw_sizes[idx] = LUT_SIZE; 961 + } 962 + } 963 + 964 + static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev) 965 + { 966 + struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl; 967 + int bar = sndev->direct_mw_to_bar[0]; 968 + u32 ctl_val; 969 + int rc; 970 + 971 + sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev, 972 + LUT_SIZE, 973 + &sndev->self_shared_dma, 974 + GFP_KERNEL); 975 + if (!sndev->self_shared) { 976 + dev_err(&sndev->stdev->dev, 977 + "unable to allocate memory for shared mw"); 978 + return -ENOMEM; 979 + } 980 + 981 + switchtec_ntb_init_shared(sndev); 982 + 983 + rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK, 984 + NTB_CTRL_PART_STATUS_LOCKED); 985 + if (rc) 986 + goto unalloc_and_exit; 987 + 988 + ctl_val = ioread32(&ctl->bar_entry[bar].ctl); 989 + ctl_val &= 0xFF; 990 + ctl_val |= NTB_CTRL_BAR_LUT_WIN_EN; 991 + ctl_val |= ilog2(LUT_SIZE) << 8; 992 + ctl_val |= (sndev->nr_lut_mw - 1) << 14; 993 + iowrite32(ctl_val, &ctl->bar_entry[bar].ctl); 994 + 995 + iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) | 996 + sndev->self_shared_dma), 997 + &ctl->lut_entry[0]); 998 + 999 + rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG, 1000 + NTB_CTRL_PART_STATUS_NORMAL); 1001 + if (rc) { 1002 + u32 bar_error, lut_error; 1003 + 1004 + bar_error = ioread32(&ctl->bar_error); 1005 + lut_error = ioread32(&ctl->lut_error); 1006 + dev_err(&sndev->stdev->dev, 1007 + "Error setting up shared MW: %08x / %08x", 1008 + bar_error, lut_error); 1009 + goto unalloc_and_exit; 1010 + } 1011 + 1012 + sndev->peer_shared = pci_iomap(sndev->stdev->pdev, bar, LUT_SIZE); 1013 + if (!sndev->peer_shared) { 1014 + rc = -ENOMEM; 1015 + goto unalloc_and_exit; 1016 + } 1017 + 1018 + dev_dbg(&sndev->stdev->dev, "Shared MW Ready"); 1019 + return 0; 1020 + 1021 + unalloc_and_exit: 1022 + dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE, 1023 + sndev->self_shared, sndev->self_shared_dma); 1024 + 1025 + return rc; 1026 + } 1027 + 1028 + static void switchtec_ntb_deinit_shared_mw(struct switchtec_ntb *sndev) 1029 + { 1030 + if (sndev->peer_shared) 1031 + pci_iounmap(sndev->stdev->pdev, sndev->peer_shared); 1032 + 1033 + if (sndev->self_shared) 1034 + dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE, 1035 + sndev->self_shared, 1036 + sndev->self_shared_dma); 1037 + } 1038 + 1039 + static irqreturn_t switchtec_ntb_doorbell_isr(int irq, void *dev) 1040 + { 1041 + struct switchtec_ntb *sndev = dev; 1042 + 1043 + dev_dbg(&sndev->stdev->dev, "doorbell\n"); 1044 + 1045 + ntb_db_event(&sndev->ntb, 0); 1046 + 1047 + return IRQ_HANDLED; 1048 + } 1049 + 1050 + static irqreturn_t switchtec_ntb_message_isr(int irq, void *dev) 1051 + { 1052 + int i; 1053 + struct switchtec_ntb *sndev = dev; 1054 + 1055 + for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) { 1056 + u64 msg = ioread64(&sndev->mmio_self_dbmsg->imsg[i]); 1057 + 1058 + if (msg & NTB_DBMSG_IMSG_STATUS) { 1059 + dev_dbg(&sndev->stdev->dev, "message: %d %08x\n", i, 1060 + (u32)msg); 1061 + iowrite8(1, &sndev->mmio_self_dbmsg->imsg[i].status); 1062 + 1063 + if (i == LINK_MESSAGE) 1064 + switchtec_ntb_check_link(sndev); 1065 + } 1066 + } 1067 + 1068 + return IRQ_HANDLED; 1069 + } 1070 + 1071 + static int switchtec_ntb_init_db_msg_irq(struct switchtec_ntb *sndev) 1072 + { 1073 + int i; 1074 + int rc; 1075 + int doorbell_irq = 0; 1076 + int message_irq = 0; 1077 + int event_irq; 1078 + int idb_vecs = sizeof(sndev->mmio_self_dbmsg->idb_vec_map); 1079 + 1080 + event_irq = ioread32(&sndev->stdev->mmio_part_cfg->vep_vector_number); 1081 + 1082 + while (doorbell_irq == event_irq) 1083 + doorbell_irq++; 1084 + while (message_irq == doorbell_irq || 1085 + message_irq == event_irq) 1086 + message_irq++; 1087 + 1088 + dev_dbg(&sndev->stdev->dev, "irqs - event: %d, db: %d, msgs: %d", 1089 + event_irq, doorbell_irq, message_irq); 1090 + 1091 + for (i = 0; i < idb_vecs - 4; i++) 1092 + iowrite8(doorbell_irq, 1093 + &sndev->mmio_self_dbmsg->idb_vec_map[i]); 1094 + 1095 + for (; i < idb_vecs; i++) 1096 + iowrite8(message_irq, 1097 + &sndev->mmio_self_dbmsg->idb_vec_map[i]); 1098 + 1099 + sndev->doorbell_irq = pci_irq_vector(sndev->stdev->pdev, doorbell_irq); 1100 + sndev->message_irq = pci_irq_vector(sndev->stdev->pdev, message_irq); 1101 + 1102 + rc = request_irq(sndev->doorbell_irq, 1103 + switchtec_ntb_doorbell_isr, 0, 1104 + "switchtec_ntb_doorbell", sndev); 1105 + if (rc) 1106 + return rc; 1107 + 1108 + rc = request_irq(sndev->message_irq, 1109 + switchtec_ntb_message_isr, 0, 1110 + "switchtec_ntb_message", sndev); 1111 + if (rc) { 1112 + free_irq(sndev->doorbell_irq, sndev); 1113 + return rc; 1114 + } 1115 + 1116 + return 0; 1117 + } 1118 + 1119 + static void switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb *sndev) 1120 + { 1121 + free_irq(sndev->doorbell_irq, sndev); 1122 + free_irq(sndev->message_irq, sndev); 1123 + } 1124 + 1125 + static int switchtec_ntb_add(struct device *dev, 1126 + struct class_interface *class_intf) 1127 + { 1128 + struct switchtec_dev *stdev = to_stdev(dev); 1129 + struct switchtec_ntb *sndev; 1130 + int rc; 1131 + 1132 + stdev->sndev = NULL; 1133 + 1134 + if (stdev->pdev->class != MICROSEMI_NTB_CLASSCODE) 1135 + return -ENODEV; 1136 + 1137 + if (stdev->partition_count != 2) 1138 + dev_warn(dev, "ntb driver only supports 2 partitions"); 1139 + 1140 + sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev)); 1141 + if (!sndev) 1142 + return -ENOMEM; 1143 + 1144 + sndev->stdev = stdev; 1145 + switchtec_ntb_init_sndev(sndev); 1146 + switchtec_ntb_init_mw(sndev); 1147 + switchtec_ntb_init_db(sndev); 1148 + switchtec_ntb_init_msgs(sndev); 1149 + 1150 + rc = switchtec_ntb_init_req_id_table(sndev); 1151 + if (rc) 1152 + goto free_and_exit; 1153 + 1154 + rc = switchtec_ntb_init_shared_mw(sndev); 1155 + if (rc) 1156 + goto free_and_exit; 1157 + 1158 + rc = switchtec_ntb_init_db_msg_irq(sndev); 1159 + if (rc) 1160 + goto deinit_shared_and_exit; 1161 + 1162 + rc = ntb_register_device(&sndev->ntb); 1163 + if (rc) 1164 + goto deinit_and_exit; 1165 + 1166 + stdev->sndev = sndev; 1167 + stdev->link_notifier = switchtec_ntb_link_notification; 1168 + dev_info(dev, "NTB device registered"); 1169 + 1170 + return 0; 1171 + 1172 + deinit_and_exit: 1173 + switchtec_ntb_deinit_db_msg_irq(sndev); 1174 + deinit_shared_and_exit: 1175 + switchtec_ntb_deinit_shared_mw(sndev); 1176 + free_and_exit: 1177 + kfree(sndev); 1178 + dev_err(dev, "failed to register ntb device: %d", rc); 1179 + return rc; 1180 + } 1181 + 1182 + void switchtec_ntb_remove(struct device *dev, 1183 + struct class_interface *class_intf) 1184 + { 1185 + struct switchtec_dev *stdev = to_stdev(dev); 1186 + struct switchtec_ntb *sndev = stdev->sndev; 1187 + 1188 + if (!sndev) 1189 + return; 1190 + 1191 + stdev->link_notifier = NULL; 1192 + stdev->sndev = NULL; 1193 + ntb_unregister_device(&sndev->ntb); 1194 + switchtec_ntb_deinit_db_msg_irq(sndev); 1195 + switchtec_ntb_deinit_shared_mw(sndev); 1196 + kfree(sndev); 1197 + dev_info(dev, "ntb device unregistered"); 1198 + } 1199 + 1200 + static struct class_interface switchtec_interface = { 1201 + .add_dev = switchtec_ntb_add, 1202 + .remove_dev = switchtec_ntb_remove, 1203 + }; 1204 + 1205 + static int __init switchtec_ntb_init(void) 1206 + { 1207 + switchtec_interface.class = switchtec_class; 1208 + return class_interface_register(&switchtec_interface); 1209 + } 1210 + module_init(switchtec_ntb_init); 1211 + 1212 + static void __exit switchtec_ntb_exit(void) 1213 + { 1214 + class_interface_unregister(&switchtec_interface); 1215 + } 1216 + module_exit(switchtec_ntb_exit);
+10 -10
drivers/ntb/ntb_transport.c
··· 191 191 struct ntb_transport_mw { 192 192 phys_addr_t phys_addr; 193 193 resource_size_t phys_size; 194 - resource_size_t xlat_align; 195 - resource_size_t xlat_align_size; 196 194 void __iomem *vbase; 197 195 size_t xlat_size; 198 196 size_t buff_size; ··· 685 687 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; 686 688 struct pci_dev *pdev = nt->ndev->pdev; 687 689 size_t xlat_size, buff_size; 690 + resource_size_t xlat_align; 691 + resource_size_t xlat_align_size; 688 692 int rc; 689 693 690 694 if (!size) 691 695 return -EINVAL; 692 696 693 - xlat_size = round_up(size, mw->xlat_align_size); 694 - buff_size = round_up(size, mw->xlat_align); 697 + rc = ntb_mw_get_align(nt->ndev, PIDX, num_mw, &xlat_align, 698 + &xlat_align_size, NULL); 699 + if (rc) 700 + return rc; 701 + 702 + xlat_size = round_up(size, xlat_align_size); 703 + buff_size = round_up(size, xlat_align); 695 704 696 705 /* No need to re-setup */ 697 706 if (mw->xlat_size == xlat_size) ··· 727 722 * is a requirement of the hardware. It is recommended to setup CMA 728 723 * for BAR sizes equal or greater than 4MB. 729 724 */ 730 - if (!IS_ALIGNED(mw->dma_addr, mw->xlat_align)) { 725 + if (!IS_ALIGNED(mw->dma_addr, xlat_align)) { 731 726 dev_err(&pdev->dev, "DMA memory %pad is not aligned\n", 732 727 &mw->dma_addr); 733 728 ntb_free_mw(nt, num_mw); ··· 1108 1103 1109 1104 for (i = 0; i < mw_count; i++) { 1110 1105 mw = &nt->mw_vec[i]; 1111 - 1112 - rc = ntb_mw_get_align(ndev, PIDX, i, &mw->xlat_align, 1113 - &mw->xlat_align_size, NULL); 1114 - if (rc) 1115 - goto err1; 1116 1106 1117 1107 rc = ntb_peer_mw_get_addr(ndev, i, &mw->phys_addr, 1118 1108 &mw->phys_size);
+9 -9
drivers/ntb/test/ntb_perf.c
··· 108 108 struct perf_mw { 109 109 phys_addr_t phys_addr; 110 110 resource_size_t phys_size; 111 - resource_size_t xlat_align; 112 - resource_size_t xlat_align_size; 113 111 void __iomem *vbase; 114 112 size_t xlat_size; 115 113 size_t buf_size; ··· 470 472 { 471 473 struct perf_mw *mw = &perf->mw; 472 474 size_t xlat_size, buf_size; 475 + resource_size_t xlat_align; 476 + resource_size_t xlat_align_size; 473 477 int rc; 474 478 475 479 if (!size) 476 480 return -EINVAL; 477 481 478 - xlat_size = round_up(size, mw->xlat_align_size); 479 - buf_size = round_up(size, mw->xlat_align); 482 + rc = ntb_mw_get_align(perf->ntb, PIDX, 0, &xlat_align, 483 + &xlat_align_size, NULL); 484 + if (rc) 485 + return rc; 486 + 487 + xlat_size = round_up(size, xlat_align_size); 488 + buf_size = round_up(size, xlat_align); 480 489 481 490 if (mw->xlat_size == xlat_size) 482 491 return 0; ··· 571 566 int rc; 572 567 573 568 mw = &perf->mw; 574 - 575 - rc = ntb_mw_get_align(ntb, PIDX, 0, &mw->xlat_align, 576 - &mw->xlat_align_size, NULL); 577 - if (rc) 578 - return rc; 579 569 580 570 rc = ntb_peer_mw_get_addr(ntb, 0, &mw->phys_addr, &mw->phys_size); 581 571 if (rc)
+3 -3
drivers/ntb/test/ntb_tool.c
··· 753 753 754 754 phys_addr_t base; 755 755 resource_size_t mw_size; 756 - resource_size_t align_addr; 757 - resource_size_t align_size; 758 - resource_size_t max_size; 756 + resource_size_t align_addr = 0; 757 + resource_size_t align_size = 0; 758 + resource_size_t max_size = 0; 759 759 760 760 buf_size = min_t(size_t, size, 512); 761 761
+57 -259
drivers/pci/switch/switchtec.c
··· 13 13 * 14 14 */ 15 15 16 + #include <linux/switchtec.h> 16 17 #include <linux/switchtec_ioctl.h> 17 18 18 19 #include <linux/interrupt.h> ··· 21 20 #include <linux/fs.h> 22 21 #include <linux/uaccess.h> 23 22 #include <linux/poll.h> 24 - #include <linux/pci.h> 25 - #include <linux/cdev.h> 26 23 #include <linux/wait.h> 27 24 28 25 MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver"); ··· 33 34 MODULE_PARM_DESC(max_devices, "max number of switchtec device instances"); 34 35 35 36 static dev_t switchtec_devt; 36 - static struct class *switchtec_class; 37 37 static DEFINE_IDA(switchtec_minor_ida); 38 38 39 - #define MICROSEMI_VENDOR_ID 0x11f8 40 - #define MICROSEMI_NTB_CLASSCODE 0x068000 41 - #define MICROSEMI_MGMT_CLASSCODE 0x058000 42 - 43 - #define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024 44 - #define SWITCHTEC_MAX_PFF_CSR 48 45 - 46 - #define SWITCHTEC_EVENT_OCCURRED BIT(0) 47 - #define SWITCHTEC_EVENT_CLEAR BIT(0) 48 - #define SWITCHTEC_EVENT_EN_LOG BIT(1) 49 - #define SWITCHTEC_EVENT_EN_CLI BIT(2) 50 - #define SWITCHTEC_EVENT_EN_IRQ BIT(3) 51 - #define SWITCHTEC_EVENT_FATAL BIT(4) 52 - 53 - enum { 54 - SWITCHTEC_GAS_MRPC_OFFSET = 0x0000, 55 - SWITCHTEC_GAS_TOP_CFG_OFFSET = 0x1000, 56 - SWITCHTEC_GAS_SW_EVENT_OFFSET = 0x1800, 57 - SWITCHTEC_GAS_SYS_INFO_OFFSET = 0x2000, 58 - SWITCHTEC_GAS_FLASH_INFO_OFFSET = 0x2200, 59 - SWITCHTEC_GAS_PART_CFG_OFFSET = 0x4000, 60 - SWITCHTEC_GAS_NTB_OFFSET = 0x10000, 61 - SWITCHTEC_GAS_PFF_CSR_OFFSET = 0x134000, 62 - }; 63 - 64 - struct mrpc_regs { 65 - u8 input_data[SWITCHTEC_MRPC_PAYLOAD_SIZE]; 66 - u8 output_data[SWITCHTEC_MRPC_PAYLOAD_SIZE]; 67 - u32 cmd; 68 - u32 status; 69 - u32 ret_value; 70 - } __packed; 71 - 72 - enum mrpc_status { 73 - SWITCHTEC_MRPC_STATUS_INPROGRESS = 1, 74 - SWITCHTEC_MRPC_STATUS_DONE = 2, 75 - SWITCHTEC_MRPC_STATUS_ERROR = 0xFF, 76 - SWITCHTEC_MRPC_STATUS_INTERRUPTED = 0x100, 77 - }; 78 - 79 - struct sw_event_regs { 80 - u64 event_report_ctrl; 81 - u64 reserved1; 82 - u64 part_event_bitmap; 83 - u64 reserved2; 84 - u32 global_summary; 85 - u32 reserved3[3]; 86 - u32 stack_error_event_hdr; 87 - u32 stack_error_event_data; 88 - u32 reserved4[4]; 89 - u32 ppu_error_event_hdr; 90 - u32 ppu_error_event_data; 91 - u32 reserved5[4]; 92 - u32 isp_error_event_hdr; 93 - u32 isp_error_event_data; 94 - u32 reserved6[4]; 95 - u32 sys_reset_event_hdr; 96 - u32 reserved7[5]; 97 - u32 fw_exception_hdr; 98 - u32 reserved8[5]; 99 - u32 fw_nmi_hdr; 100 - u32 reserved9[5]; 101 - u32 fw_non_fatal_hdr; 102 - u32 reserved10[5]; 103 - u32 fw_fatal_hdr; 104 - u32 reserved11[5]; 105 - u32 twi_mrpc_comp_hdr; 106 - u32 twi_mrpc_comp_data; 107 - u32 reserved12[4]; 108 - u32 twi_mrpc_comp_async_hdr; 109 - u32 twi_mrpc_comp_async_data; 110 - u32 reserved13[4]; 111 - u32 cli_mrpc_comp_hdr; 112 - u32 cli_mrpc_comp_data; 113 - u32 reserved14[4]; 114 - u32 cli_mrpc_comp_async_hdr; 115 - u32 cli_mrpc_comp_async_data; 116 - u32 reserved15[4]; 117 - u32 gpio_interrupt_hdr; 118 - u32 gpio_interrupt_data; 119 - u32 reserved16[4]; 120 - } __packed; 121 - 122 - enum { 123 - SWITCHTEC_CFG0_RUNNING = 0x04, 124 - SWITCHTEC_CFG1_RUNNING = 0x05, 125 - SWITCHTEC_IMG0_RUNNING = 0x03, 126 - SWITCHTEC_IMG1_RUNNING = 0x07, 127 - }; 128 - 129 - struct sys_info_regs { 130 - u32 device_id; 131 - u32 device_version; 132 - u32 firmware_version; 133 - u32 reserved1; 134 - u32 vendor_table_revision; 135 - u32 table_format_version; 136 - u32 partition_id; 137 - u32 cfg_file_fmt_version; 138 - u16 cfg_running; 139 - u16 img_running; 140 - u32 reserved2[57]; 141 - char vendor_id[8]; 142 - char product_id[16]; 143 - char product_revision[4]; 144 - char component_vendor[8]; 145 - u16 component_id; 146 - u8 component_revision; 147 - } __packed; 148 - 149 - struct flash_info_regs { 150 - u32 flash_part_map_upd_idx; 151 - 152 - struct active_partition_info { 153 - u32 address; 154 - u32 build_version; 155 - u32 build_string; 156 - } active_img; 157 - 158 - struct active_partition_info active_cfg; 159 - struct active_partition_info inactive_img; 160 - struct active_partition_info inactive_cfg; 161 - 162 - u32 flash_length; 163 - 164 - struct partition_info { 165 - u32 address; 166 - u32 length; 167 - } cfg0; 168 - 169 - struct partition_info cfg1; 170 - struct partition_info img0; 171 - struct partition_info img1; 172 - struct partition_info nvlog; 173 - struct partition_info vendor[8]; 174 - }; 175 - 176 - struct ntb_info_regs { 177 - u8 partition_count; 178 - u8 partition_id; 179 - u16 reserved1; 180 - u64 ep_map; 181 - u16 requester_id; 182 - } __packed; 183 - 184 - struct part_cfg_regs { 185 - u32 status; 186 - u32 state; 187 - u32 port_cnt; 188 - u32 usp_port_mode; 189 - u32 usp_pff_inst_id; 190 - u32 vep_pff_inst_id; 191 - u32 dsp_pff_inst_id[47]; 192 - u32 reserved1[11]; 193 - u16 vep_vector_number; 194 - u16 usp_vector_number; 195 - u32 port_event_bitmap; 196 - u32 reserved2[3]; 197 - u32 part_event_summary; 198 - u32 reserved3[3]; 199 - u32 part_reset_hdr; 200 - u32 part_reset_data[5]; 201 - u32 mrpc_comp_hdr; 202 - u32 mrpc_comp_data[5]; 203 - u32 mrpc_comp_async_hdr; 204 - u32 mrpc_comp_async_data[5]; 205 - u32 dyn_binding_hdr; 206 - u32 dyn_binding_data[5]; 207 - u32 reserved4[159]; 208 - } __packed; 209 - 210 - enum { 211 - SWITCHTEC_PART_CFG_EVENT_RESET = 1 << 0, 212 - SWITCHTEC_PART_CFG_EVENT_MRPC_CMP = 1 << 1, 213 - SWITCHTEC_PART_CFG_EVENT_MRPC_ASYNC_CMP = 1 << 2, 214 - SWITCHTEC_PART_CFG_EVENT_DYN_PART_CMP = 1 << 3, 215 - }; 216 - 217 - struct pff_csr_regs { 218 - u16 vendor_id; 219 - u16 device_id; 220 - u32 pci_cfg_header[15]; 221 - u32 pci_cap_region[48]; 222 - u32 pcie_cap_region[448]; 223 - u32 indirect_gas_window[128]; 224 - u32 indirect_gas_window_off; 225 - u32 reserved[127]; 226 - u32 pff_event_summary; 227 - u32 reserved2[3]; 228 - u32 aer_in_p2p_hdr; 229 - u32 aer_in_p2p_data[5]; 230 - u32 aer_in_vep_hdr; 231 - u32 aer_in_vep_data[5]; 232 - u32 dpc_hdr; 233 - u32 dpc_data[5]; 234 - u32 cts_hdr; 235 - u32 cts_data[5]; 236 - u32 reserved3[6]; 237 - u32 hotplug_hdr; 238 - u32 hotplug_data[5]; 239 - u32 ier_hdr; 240 - u32 ier_data[5]; 241 - u32 threshold_hdr; 242 - u32 threshold_data[5]; 243 - u32 power_mgmt_hdr; 244 - u32 power_mgmt_data[5]; 245 - u32 tlp_throttling_hdr; 246 - u32 tlp_throttling_data[5]; 247 - u32 force_speed_hdr; 248 - u32 force_speed_data[5]; 249 - u32 credit_timeout_hdr; 250 - u32 credit_timeout_data[5]; 251 - u32 link_state_hdr; 252 - u32 link_state_data[5]; 253 - u32 reserved4[174]; 254 - } __packed; 255 - 256 - struct switchtec_dev { 257 - struct pci_dev *pdev; 258 - struct device dev; 259 - struct cdev cdev; 260 - 261 - int partition; 262 - int partition_count; 263 - int pff_csr_count; 264 - char pff_local[SWITCHTEC_MAX_PFF_CSR]; 265 - 266 - void __iomem *mmio; 267 - struct mrpc_regs __iomem *mmio_mrpc; 268 - struct sw_event_regs __iomem *mmio_sw_event; 269 - struct sys_info_regs __iomem *mmio_sys_info; 270 - struct flash_info_regs __iomem *mmio_flash_info; 271 - struct ntb_info_regs __iomem *mmio_ntb; 272 - struct part_cfg_regs __iomem *mmio_part_cfg; 273 - struct part_cfg_regs __iomem *mmio_part_cfg_all; 274 - struct pff_csr_regs __iomem *mmio_pff_csr; 275 - 276 - /* 277 - * The mrpc mutex must be held when accessing the other 278 - * mrpc_ fields, alive flag and stuser->state field 279 - */ 280 - struct mutex mrpc_mutex; 281 - struct list_head mrpc_queue; 282 - int mrpc_busy; 283 - struct work_struct mrpc_work; 284 - struct delayed_work mrpc_timeout; 285 - bool alive; 286 - 287 - wait_queue_head_t event_wq; 288 - atomic_t event_cnt; 289 - }; 290 - 291 - static struct switchtec_dev *to_stdev(struct device *dev) 292 - { 293 - return container_of(dev, struct switchtec_dev, dev); 294 - } 39 + struct class *switchtec_class; 40 + EXPORT_SYMBOL_GPL(switchtec_class); 295 41 296 42 enum mrpc_state { 297 43 MRPC_IDLE = 0, ··· 978 1234 .compat_ioctl = switchtec_dev_ioctl, 979 1235 }; 980 1236 1237 + static void link_event_work(struct work_struct *work) 1238 + { 1239 + struct switchtec_dev *stdev; 1240 + 1241 + stdev = container_of(work, struct switchtec_dev, link_event_work); 1242 + 1243 + if (stdev->link_notifier) 1244 + stdev->link_notifier(stdev); 1245 + } 1246 + 1247 + static void check_link_state_events(struct switchtec_dev *stdev) 1248 + { 1249 + int idx; 1250 + u32 reg; 1251 + int count; 1252 + int occurred = 0; 1253 + 1254 + for (idx = 0; idx < stdev->pff_csr_count; idx++) { 1255 + reg = ioread32(&stdev->mmio_pff_csr[idx].link_state_hdr); 1256 + dev_dbg(&stdev->dev, "link_state: %d->%08x\n", idx, reg); 1257 + count = (reg >> 5) & 0xFF; 1258 + 1259 + if (count != stdev->link_event_count[idx]) { 1260 + occurred = 1; 1261 + stdev->link_event_count[idx] = count; 1262 + } 1263 + } 1264 + 1265 + if (occurred) 1266 + schedule_work(&stdev->link_event_work); 1267 + } 1268 + 1269 + static void enable_link_state_events(struct switchtec_dev *stdev) 1270 + { 1271 + int idx; 1272 + 1273 + for (idx = 0; idx < stdev->pff_csr_count; idx++) { 1274 + iowrite32(SWITCHTEC_EVENT_CLEAR | 1275 + SWITCHTEC_EVENT_EN_IRQ, 1276 + &stdev->mmio_pff_csr[idx].link_state_hdr); 1277 + } 1278 + } 1279 + 981 1280 static void stdev_release(struct device *dev) 982 1281 { 983 1282 struct switchtec_dev *stdev = to_stdev(dev); ··· 1073 1286 stdev->mrpc_busy = 0; 1074 1287 INIT_WORK(&stdev->mrpc_work, mrpc_event_work); 1075 1288 INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work); 1289 + INIT_WORK(&stdev->link_event_work, link_event_work); 1076 1290 init_waitqueue_head(&stdev->event_wq); 1077 1291 atomic_set(&stdev->event_cnt, 0); 1078 1292 ··· 1117 1329 if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ)) 1118 1330 return 0; 1119 1331 1332 + if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE) 1333 + return 0; 1334 + 1120 1335 dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr); 1121 1336 hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED); 1122 1337 iowrite32(hdr, hdr_reg); ··· 1139 1348 for (idx = 0; idx < stdev->pff_csr_count; idx++) { 1140 1349 if (!stdev->pff_local[idx]) 1141 1350 continue; 1351 + 1142 1352 count += mask_event(stdev, eid, idx); 1143 1353 } 1144 1354 } else { ··· 1163 1371 schedule_work(&stdev->mrpc_work); 1164 1372 iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr); 1165 1373 } 1374 + 1375 + check_link_state_events(stdev); 1166 1376 1167 1377 for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++) 1168 1378 event_count += mask_all_events(stdev, eid); ··· 1275 1481 struct switchtec_dev *stdev; 1276 1482 int rc; 1277 1483 1484 + if (pdev->class == MICROSEMI_NTB_CLASSCODE) 1485 + request_module_nowait("ntb_hw_switchtec"); 1486 + 1278 1487 stdev = stdev_create(pdev); 1279 1488 if (IS_ERR(stdev)) 1280 1489 return PTR_ERR(stdev); ··· 1295 1498 iowrite32(SWITCHTEC_EVENT_CLEAR | 1296 1499 SWITCHTEC_EVENT_EN_IRQ, 1297 1500 &stdev->mmio_part_cfg->mrpc_comp_hdr); 1501 + enable_link_state_events(stdev); 1298 1502 1299 1503 rc = cdev_device_add(&stdev->cdev, &stdev->dev); 1300 1504 if (rc)
+9 -2
include/linux/ntb.h
··· 70 70 * @NTB_TOPO_SEC: On secondary side of remote ntb. 71 71 * @NTB_TOPO_B2B_USD: On primary side of local ntb upstream of remote ntb. 72 72 * @NTB_TOPO_B2B_DSD: On primary side of local ntb downstream of remote ntb. 73 + * @NTB_TOPO_SWITCH: Connected via a switch which supports ntb. 73 74 */ 74 75 enum ntb_topo { 75 76 NTB_TOPO_NONE = -1, ··· 78 77 NTB_TOPO_SEC, 79 78 NTB_TOPO_B2B_USD, 80 79 NTB_TOPO_B2B_DSD, 80 + NTB_TOPO_SWITCH, 81 81 }; 82 82 83 83 static inline int ntb_topo_is_b2b(enum ntb_topo topo) ··· 99 97 case NTB_TOPO_SEC: return "NTB_TOPO_SEC"; 100 98 case NTB_TOPO_B2B_USD: return "NTB_TOPO_B2B_USD"; 101 99 case NTB_TOPO_B2B_DSD: return "NTB_TOPO_B2B_DSD"; 100 + case NTB_TOPO_SWITCH: return "NTB_TOPO_SWITCH"; 102 101 } 103 102 return "NTB_TOPO_INVALID"; 104 103 } ··· 733 730 * Hardware and topology may support a different number of memory windows. 734 731 * Moreover different peer devices can support different number of memory 735 732 * windows. Simply speaking this method returns the number of possible inbound 736 - * memory windows to share with specified peer device. 733 + * memory windows to share with specified peer device. Note: this may return 734 + * zero if the link is not up yet. 737 735 * 738 736 * Return: the number of memory windows. 739 737 */ ··· 755 751 * Get the alignments of an inbound memory window with specified index. 756 752 * NULL may be given for any output parameter if the value is not needed. 757 753 * The alignment and size parameters may be used for allocation of proper 758 - * shared memory. 754 + * shared memory. Note: this must only be called when the link is up. 759 755 * 760 756 * Return: Zero on success, otherwise a negative error number. 761 757 */ ··· 764 760 resource_size_t *size_align, 765 761 resource_size_t *size_max) 766 762 { 763 + if (!(ntb_link_is_up(ntb, NULL, NULL) & (1 << pidx))) 764 + return -ENOTCONN; 765 + 767 766 return ntb->ops->mw_get_align(ntb, pidx, widx, addr_align, size_align, 768 767 size_max); 769 768 }
+373
include/linux/switchtec.h
··· 1 + /* 2 + * Microsemi Switchtec PCIe Driver 3 + * Copyright (c) 2017, Microsemi Corporation 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms and conditions of the GNU General Public License, 7 + * version 2, as published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope it will be useful, but WITHOUT 10 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 + * more details. 13 + * 14 + */ 15 + 16 + #ifndef _SWITCHTEC_H 17 + #define _SWITCHTEC_H 18 + 19 + #include <linux/pci.h> 20 + #include <linux/cdev.h> 21 + 22 + #define MICROSEMI_VENDOR_ID 0x11f8 23 + #define MICROSEMI_NTB_CLASSCODE 0x068000 24 + #define MICROSEMI_MGMT_CLASSCODE 0x058000 25 + 26 + #define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024 27 + #define SWITCHTEC_MAX_PFF_CSR 48 28 + 29 + #define SWITCHTEC_EVENT_OCCURRED BIT(0) 30 + #define SWITCHTEC_EVENT_CLEAR BIT(0) 31 + #define SWITCHTEC_EVENT_EN_LOG BIT(1) 32 + #define SWITCHTEC_EVENT_EN_CLI BIT(2) 33 + #define SWITCHTEC_EVENT_EN_IRQ BIT(3) 34 + #define SWITCHTEC_EVENT_FATAL BIT(4) 35 + 36 + enum { 37 + SWITCHTEC_GAS_MRPC_OFFSET = 0x0000, 38 + SWITCHTEC_GAS_TOP_CFG_OFFSET = 0x1000, 39 + SWITCHTEC_GAS_SW_EVENT_OFFSET = 0x1800, 40 + SWITCHTEC_GAS_SYS_INFO_OFFSET = 0x2000, 41 + SWITCHTEC_GAS_FLASH_INFO_OFFSET = 0x2200, 42 + SWITCHTEC_GAS_PART_CFG_OFFSET = 0x4000, 43 + SWITCHTEC_GAS_NTB_OFFSET = 0x10000, 44 + SWITCHTEC_GAS_PFF_CSR_OFFSET = 0x134000, 45 + }; 46 + 47 + struct mrpc_regs { 48 + u8 input_data[SWITCHTEC_MRPC_PAYLOAD_SIZE]; 49 + u8 output_data[SWITCHTEC_MRPC_PAYLOAD_SIZE]; 50 + u32 cmd; 51 + u32 status; 52 + u32 ret_value; 53 + } __packed; 54 + 55 + enum mrpc_status { 56 + SWITCHTEC_MRPC_STATUS_INPROGRESS = 1, 57 + SWITCHTEC_MRPC_STATUS_DONE = 2, 58 + SWITCHTEC_MRPC_STATUS_ERROR = 0xFF, 59 + SWITCHTEC_MRPC_STATUS_INTERRUPTED = 0x100, 60 + }; 61 + 62 + struct sw_event_regs { 63 + u64 event_report_ctrl; 64 + u64 reserved1; 65 + u64 part_event_bitmap; 66 + u64 reserved2; 67 + u32 global_summary; 68 + u32 reserved3[3]; 69 + u32 stack_error_event_hdr; 70 + u32 stack_error_event_data; 71 + u32 reserved4[4]; 72 + u32 ppu_error_event_hdr; 73 + u32 ppu_error_event_data; 74 + u32 reserved5[4]; 75 + u32 isp_error_event_hdr; 76 + u32 isp_error_event_data; 77 + u32 reserved6[4]; 78 + u32 sys_reset_event_hdr; 79 + u32 reserved7[5]; 80 + u32 fw_exception_hdr; 81 + u32 reserved8[5]; 82 + u32 fw_nmi_hdr; 83 + u32 reserved9[5]; 84 + u32 fw_non_fatal_hdr; 85 + u32 reserved10[5]; 86 + u32 fw_fatal_hdr; 87 + u32 reserved11[5]; 88 + u32 twi_mrpc_comp_hdr; 89 + u32 twi_mrpc_comp_data; 90 + u32 reserved12[4]; 91 + u32 twi_mrpc_comp_async_hdr; 92 + u32 twi_mrpc_comp_async_data; 93 + u32 reserved13[4]; 94 + u32 cli_mrpc_comp_hdr; 95 + u32 cli_mrpc_comp_data; 96 + u32 reserved14[4]; 97 + u32 cli_mrpc_comp_async_hdr; 98 + u32 cli_mrpc_comp_async_data; 99 + u32 reserved15[4]; 100 + u32 gpio_interrupt_hdr; 101 + u32 gpio_interrupt_data; 102 + u32 reserved16[4]; 103 + } __packed; 104 + 105 + enum { 106 + SWITCHTEC_CFG0_RUNNING = 0x04, 107 + SWITCHTEC_CFG1_RUNNING = 0x05, 108 + SWITCHTEC_IMG0_RUNNING = 0x03, 109 + SWITCHTEC_IMG1_RUNNING = 0x07, 110 + }; 111 + 112 + struct sys_info_regs { 113 + u32 device_id; 114 + u32 device_version; 115 + u32 firmware_version; 116 + u32 reserved1; 117 + u32 vendor_table_revision; 118 + u32 table_format_version; 119 + u32 partition_id; 120 + u32 cfg_file_fmt_version; 121 + u16 cfg_running; 122 + u16 img_running; 123 + u32 reserved2[57]; 124 + char vendor_id[8]; 125 + char product_id[16]; 126 + char product_revision[4]; 127 + char component_vendor[8]; 128 + u16 component_id; 129 + u8 component_revision; 130 + } __packed; 131 + 132 + struct flash_info_regs { 133 + u32 flash_part_map_upd_idx; 134 + 135 + struct active_partition_info { 136 + u32 address; 137 + u32 build_version; 138 + u32 build_string; 139 + } active_img; 140 + 141 + struct active_partition_info active_cfg; 142 + struct active_partition_info inactive_img; 143 + struct active_partition_info inactive_cfg; 144 + 145 + u32 flash_length; 146 + 147 + struct partition_info { 148 + u32 address; 149 + u32 length; 150 + } cfg0; 151 + 152 + struct partition_info cfg1; 153 + struct partition_info img0; 154 + struct partition_info img1; 155 + struct partition_info nvlog; 156 + struct partition_info vendor[8]; 157 + }; 158 + 159 + enum { 160 + SWITCHTEC_NTB_REG_INFO_OFFSET = 0x0000, 161 + SWITCHTEC_NTB_REG_CTRL_OFFSET = 0x4000, 162 + SWITCHTEC_NTB_REG_DBMSG_OFFSET = 0x64000, 163 + }; 164 + 165 + struct ntb_info_regs { 166 + u8 partition_count; 167 + u8 partition_id; 168 + u16 reserved1; 169 + u64 ep_map; 170 + u16 requester_id; 171 + } __packed; 172 + 173 + struct part_cfg_regs { 174 + u32 status; 175 + u32 state; 176 + u32 port_cnt; 177 + u32 usp_port_mode; 178 + u32 usp_pff_inst_id; 179 + u32 vep_pff_inst_id; 180 + u32 dsp_pff_inst_id[47]; 181 + u32 reserved1[11]; 182 + u16 vep_vector_number; 183 + u16 usp_vector_number; 184 + u32 port_event_bitmap; 185 + u32 reserved2[3]; 186 + u32 part_event_summary; 187 + u32 reserved3[3]; 188 + u32 part_reset_hdr; 189 + u32 part_reset_data[5]; 190 + u32 mrpc_comp_hdr; 191 + u32 mrpc_comp_data[5]; 192 + u32 mrpc_comp_async_hdr; 193 + u32 mrpc_comp_async_data[5]; 194 + u32 dyn_binding_hdr; 195 + u32 dyn_binding_data[5]; 196 + u32 reserved4[159]; 197 + } __packed; 198 + 199 + enum { 200 + NTB_CTRL_PART_OP_LOCK = 0x1, 201 + NTB_CTRL_PART_OP_CFG = 0x2, 202 + NTB_CTRL_PART_OP_RESET = 0x3, 203 + 204 + NTB_CTRL_PART_STATUS_NORMAL = 0x1, 205 + NTB_CTRL_PART_STATUS_LOCKED = 0x2, 206 + NTB_CTRL_PART_STATUS_LOCKING = 0x3, 207 + NTB_CTRL_PART_STATUS_CONFIGURING = 0x4, 208 + NTB_CTRL_PART_STATUS_RESETTING = 0x5, 209 + 210 + NTB_CTRL_BAR_VALID = 1 << 0, 211 + NTB_CTRL_BAR_DIR_WIN_EN = 1 << 4, 212 + NTB_CTRL_BAR_LUT_WIN_EN = 1 << 5, 213 + 214 + NTB_CTRL_REQ_ID_EN = 1 << 0, 215 + 216 + NTB_CTRL_LUT_EN = 1 << 0, 217 + 218 + NTB_PART_CTRL_ID_PROT_DIS = 1 << 0, 219 + }; 220 + 221 + struct ntb_ctrl_regs { 222 + u32 partition_status; 223 + u32 partition_op; 224 + u32 partition_ctrl; 225 + u32 bar_setup; 226 + u32 bar_error; 227 + u16 lut_table_entries; 228 + u16 lut_table_offset; 229 + u32 lut_error; 230 + u16 req_id_table_size; 231 + u16 req_id_table_offset; 232 + u32 req_id_error; 233 + u32 reserved1[7]; 234 + struct { 235 + u32 ctl; 236 + u32 win_size; 237 + u64 xlate_addr; 238 + } bar_entry[6]; 239 + u32 reserved2[216]; 240 + u32 req_id_table[256]; 241 + u32 reserved3[512]; 242 + u64 lut_entry[512]; 243 + } __packed; 244 + 245 + #define NTB_DBMSG_IMSG_STATUS BIT_ULL(32) 246 + #define NTB_DBMSG_IMSG_MASK BIT_ULL(40) 247 + 248 + struct ntb_dbmsg_regs { 249 + u32 reserved1[1024]; 250 + u64 odb; 251 + u64 odb_mask; 252 + u64 idb; 253 + u64 idb_mask; 254 + u8 idb_vec_map[64]; 255 + u32 msg_map; 256 + u32 reserved2; 257 + struct { 258 + u32 msg; 259 + u32 status; 260 + } omsg[4]; 261 + 262 + struct { 263 + u32 msg; 264 + u8 status; 265 + u8 mask; 266 + u8 src; 267 + u8 reserved; 268 + } imsg[4]; 269 + 270 + u8 reserved3[3928]; 271 + u8 msix_table[1024]; 272 + u8 reserved4[3072]; 273 + u8 pba[24]; 274 + u8 reserved5[4072]; 275 + } __packed; 276 + 277 + enum { 278 + SWITCHTEC_PART_CFG_EVENT_RESET = 1 << 0, 279 + SWITCHTEC_PART_CFG_EVENT_MRPC_CMP = 1 << 1, 280 + SWITCHTEC_PART_CFG_EVENT_MRPC_ASYNC_CMP = 1 << 2, 281 + SWITCHTEC_PART_CFG_EVENT_DYN_PART_CMP = 1 << 3, 282 + }; 283 + 284 + struct pff_csr_regs { 285 + u16 vendor_id; 286 + u16 device_id; 287 + u32 pci_cfg_header[15]; 288 + u32 pci_cap_region[48]; 289 + u32 pcie_cap_region[448]; 290 + u32 indirect_gas_window[128]; 291 + u32 indirect_gas_window_off; 292 + u32 reserved[127]; 293 + u32 pff_event_summary; 294 + u32 reserved2[3]; 295 + u32 aer_in_p2p_hdr; 296 + u32 aer_in_p2p_data[5]; 297 + u32 aer_in_vep_hdr; 298 + u32 aer_in_vep_data[5]; 299 + u32 dpc_hdr; 300 + u32 dpc_data[5]; 301 + u32 cts_hdr; 302 + u32 cts_data[5]; 303 + u32 reserved3[6]; 304 + u32 hotplug_hdr; 305 + u32 hotplug_data[5]; 306 + u32 ier_hdr; 307 + u32 ier_data[5]; 308 + u32 threshold_hdr; 309 + u32 threshold_data[5]; 310 + u32 power_mgmt_hdr; 311 + u32 power_mgmt_data[5]; 312 + u32 tlp_throttling_hdr; 313 + u32 tlp_throttling_data[5]; 314 + u32 force_speed_hdr; 315 + u32 force_speed_data[5]; 316 + u32 credit_timeout_hdr; 317 + u32 credit_timeout_data[5]; 318 + u32 link_state_hdr; 319 + u32 link_state_data[5]; 320 + u32 reserved4[174]; 321 + } __packed; 322 + 323 + struct switchtec_ntb; 324 + 325 + struct switchtec_dev { 326 + struct pci_dev *pdev; 327 + struct device dev; 328 + struct cdev cdev; 329 + 330 + int partition; 331 + int partition_count; 332 + int pff_csr_count; 333 + char pff_local[SWITCHTEC_MAX_PFF_CSR]; 334 + 335 + void __iomem *mmio; 336 + struct mrpc_regs __iomem *mmio_mrpc; 337 + struct sw_event_regs __iomem *mmio_sw_event; 338 + struct sys_info_regs __iomem *mmio_sys_info; 339 + struct flash_info_regs __iomem *mmio_flash_info; 340 + struct ntb_info_regs __iomem *mmio_ntb; 341 + struct part_cfg_regs __iomem *mmio_part_cfg; 342 + struct part_cfg_regs __iomem *mmio_part_cfg_all; 343 + struct pff_csr_regs __iomem *mmio_pff_csr; 344 + 345 + /* 346 + * The mrpc mutex must be held when accessing the other 347 + * mrpc_ fields, alive flag and stuser->state field 348 + */ 349 + struct mutex mrpc_mutex; 350 + struct list_head mrpc_queue; 351 + int mrpc_busy; 352 + struct work_struct mrpc_work; 353 + struct delayed_work mrpc_timeout; 354 + bool alive; 355 + 356 + wait_queue_head_t event_wq; 357 + atomic_t event_cnt; 358 + 359 + struct work_struct link_event_work; 360 + void (*link_notifier)(struct switchtec_dev *stdev); 361 + u8 link_event_count[SWITCHTEC_MAX_PFF_CSR]; 362 + 363 + struct switchtec_ntb *sndev; 364 + }; 365 + 366 + static inline struct switchtec_dev *to_stdev(struct device *dev) 367 + { 368 + return container_of(dev, struct switchtec_dev, dev); 369 + } 370 + 371 + extern struct class *switchtec_class; 372 + 373 + #endif