Merge tag 'ntb-3.15' of git://github.com/jonmason/ntb

Pull PCIe non-transparent bridge fixes and features from Jon Mason:
"NTB driver bug fixes to address issues in list traversal, skb leak in
ntb_netdev, a typo, and a leak of msix entries in the error path.
Clean ups of the event handling logic, as well as a overall style
cleanup. Finally, the driver was converted to use the new
pci_enable_msix_range logic (and the refactoring to go along with it)"

* tag 'ntb-3.15' of git://github.com/jonmason/ntb:
ntb: Use pci_enable_msix_range() instead of pci_enable_msix()
ntb: Split ntb_setup_msix() into separate BWD/SNB routines
ntb: Use pci_msix_vec_count() to obtain number of MSI-Xs
NTB: Code Style Clean-up
NTB: client event cleanup
ntb: Fix leakage of ntb_device::msix_entries[] array
NTB: Fix typo in setting one translation register
ntb_netdev: Fix skb free issue in open
ntb_netdev: Fix list_for_each_entry exit issue

+150 -116
+20 -7
drivers/net/ntb_netdev.c
··· 78 netdev_dbg(ndev, "Event %x, Link %x\n", status, 79 ntb_transport_link_query(dev->qp)); 80 81 - /* Currently, only link status event is supported */ 82 - if (status) 83 - netif_carrier_on(ndev); 84 - else 85 netif_carrier_off(ndev); 86 } 87 88 static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, ··· 190 191 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data, 192 ndev->mtu + ETH_HLEN); 193 - if (rc == -EINVAL) 194 goto err; 195 } 196 197 netif_carrier_off(ndev); ··· 377 { 378 struct net_device *ndev; 379 struct ntb_netdev *dev; 380 381 list_for_each_entry(dev, &dev_list, list) { 382 - if (dev->pdev == pdev) 383 break; 384 } 385 - if (dev == NULL) 386 return; 387 388 list_del(&dev->list);
··· 78 netdev_dbg(ndev, "Event %x, Link %x\n", status, 79 ntb_transport_link_query(dev->qp)); 80 81 + switch (status) { 82 + case NTB_LINK_DOWN: 83 netif_carrier_off(ndev); 84 + break; 85 + case NTB_LINK_UP: 86 + if (!ntb_transport_link_query(dev->qp)) 87 + return; 88 + 89 + netif_carrier_on(ndev); 90 + break; 91 + default: 92 + netdev_warn(ndev, "Unsupported event type %d\n", status); 93 + } 94 } 95 96 static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, ··· 182 183 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data, 184 ndev->mtu + ETH_HLEN); 185 + if (rc == -EINVAL) { 186 + dev_kfree_skb(skb); 187 goto err; 188 + } 189 } 190 191 netif_carrier_off(ndev); ··· 367 { 368 struct net_device *ndev; 369 struct ntb_netdev *dev; 370 + bool found = false; 371 372 list_for_each_entry(dev, &dev_list, list) { 373 + if (dev->pdev == pdev) { 374 + found = true; 375 break; 376 + } 377 } 378 + if (!found) 379 return; 380 381 list_del(&dev->list);
+107 -85
drivers/ntb/ntb_hw.c
··· 91 /* Translate memory window 0,1 to BAR 2,4 */ 92 #define MW_TO_BAR(mw) (mw * NTB_MAX_NUM_MW + 2) 93 94 - static DEFINE_PCI_DEVICE_TABLE(ntb_pci_tbl) = { 95 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)}, 96 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)}, 97 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)}, ··· 120 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 121 */ 122 int ntb_register_event_callback(struct ntb_device *ndev, 123 - void (*func)(void *handle, enum ntb_hw_event event)) 124 { 125 if (ndev->event_cb) 126 return -EINVAL; ··· 716 SNB_PBAR4LMT_OFFSET); 717 /* HW errata on the Limit registers. They can only be 718 * written when the base register is 4GB aligned and 719 - * < 32bit. This should already be the case based on the 720 - * driver defaults, but write the Limit registers first 721 - * just in case. 722 */ 723 } else { 724 ndev->limits.max_mw = SNB_MAX_MW; ··· 740 writeq(0, ndev->reg_base + SNB_PBAR4LMT_OFFSET); 741 /* HW errata on the Limit registers. They can only be 742 * written when the base register is 4GB aligned and 743 - * < 32bit. This should already be the case based on the 744 - * driver defaults, but write the Limit registers first 745 - * just in case. 746 */ 747 } 748 ··· 786 /* B2B_XLAT_OFFSET is a 64bit register, but can 787 * only take 32bit writes 788 */ 789 - writel(SNB_MBAR01_DSD_ADDR & 0xffffffff, 790 ndev->reg_base + SNB_B2B_XLAT_OFFSETL); 791 writel(SNB_MBAR01_USD_ADDR >> 32, 792 ndev->reg_base + SNB_B2B_XLAT_OFFSETU); ··· 804 ndev->conn_type = NTB_CONN_RP; 805 806 if (xeon_errata_workaround) { 807 - dev_err(&ndev->pdev->dev, 808 "NTB-RP disabled due to hardware errata. To disregard this warning and potentially lock-up the system, add the parameter 'xeon_errata_workaround=0'.\n"); 809 return -EINVAL; 810 } ··· 1080 return IRQ_HANDLED; 1081 } 1082 1083 - static int ntb_setup_msix(struct ntb_device *ndev) 1084 { 1085 struct pci_dev *pdev = ndev->pdev; 1086 struct msix_entry *msix; 1087 - int msix_entries; 1088 int rc, i; 1089 - u16 val; 1090 1091 - if (!pdev->msix_cap) { 1092 - rc = -EIO; 1093 - goto err; 1094 } 1095 1096 - rc = pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &val); 1097 - if (rc) 1098 - goto err; 1099 1100 - msix_entries = msix_table_size(val); 1101 - if (msix_entries > ndev->limits.msix_cnt) { 1102 rc = -EINVAL; 1103 goto err; 1104 } ··· 1192 for (i = 0; i < msix_entries; i++) 1193 ndev->msix_entries[i].entry = i; 1194 1195 - rc = pci_enable_msix(pdev, ndev->msix_entries, msix_entries); 1196 - if (rc < 0) 1197 - goto err1; 1198 - if (rc > 0) { 1199 - /* On SNB, the link interrupt is always tied to 4th vector. If 1200 - * we can't get all 4, then we can't use MSI-X. 1201 - */ 1202 - if (ndev->hw_type != BWD_HW) { 1203 - rc = -EIO; 1204 - goto err1; 1205 - } 1206 - 1207 - dev_warn(&pdev->dev, 1208 - "Only %d MSI-X vectors. Limiting the number of queues to that number.\n", 1209 - rc); 1210 - msix_entries = rc; 1211 - 1212 - rc = pci_enable_msix(pdev, ndev->msix_entries, msix_entries); 1213 - if (rc) 1214 - goto err1; 1215 - } 1216 - 1217 - for (i = 0; i < msix_entries; i++) { 1218 - msix = &ndev->msix_entries[i]; 1219 - WARN_ON(!msix->vector); 1220 - 1221 - /* Use the last MSI-X vector for Link status */ 1222 - if (ndev->hw_type == BWD_HW) { 1223 - rc = request_irq(msix->vector, bwd_callback_msix_irq, 0, 1224 - "ntb-callback-msix", &ndev->db_cb[i]); 1225 - if (rc) 1226 - goto err2; 1227 - } else { 1228 - if (i == msix_entries - 1) { 1229 - rc = request_irq(msix->vector, 1230 - xeon_event_msix_irq, 0, 1231 - "ntb-event-msix", ndev); 1232 - if (rc) 1233 - goto err2; 1234 - } else { 1235 - rc = request_irq(msix->vector, 1236 - xeon_callback_msix_irq, 0, 1237 - "ntb-callback-msix", 1238 - &ndev->db_cb[i]); 1239 - if (rc) 1240 - goto err2; 1241 - } 1242 - } 1243 - } 1244 - 1245 - ndev->num_msix = msix_entries; 1246 if (ndev->hw_type == BWD_HW) 1247 - ndev->max_cbs = msix_entries; 1248 else 1249 - ndev->max_cbs = msix_entries - 1; 1250 1251 return 0; 1252 1253 - err2: 1254 - while (--i >= 0) { 1255 - msix = &ndev->msix_entries[i]; 1256 - if (ndev->hw_type != BWD_HW && i == ndev->num_msix - 1) 1257 - free_irq(msix->vector, ndev); 1258 - else 1259 - free_irq(msix->vector, &ndev->db_cb[i]); 1260 - } 1261 - pci_disable_msix(pdev); 1262 err1: 1263 kfree(ndev->msix_entries); 1264 - dev_err(&pdev->dev, "Error allocating MSI-X interrupt\n"); 1265 err: 1266 - ndev->num_msix = 0; 1267 return rc; 1268 } 1269 ··· 1302 free_irq(msix->vector, &ndev->db_cb[i]); 1303 } 1304 pci_disable_msix(pdev); 1305 } else { 1306 free_irq(pdev->irq, ndev); 1307
··· 91 /* Translate memory window 0,1 to BAR 2,4 */ 92 #define MW_TO_BAR(mw) (mw * NTB_MAX_NUM_MW + 2) 93 94 + static const struct pci_device_id ntb_pci_tbl[] = { 95 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)}, 96 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)}, 97 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)}, ··· 120 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 121 */ 122 int ntb_register_event_callback(struct ntb_device *ndev, 123 + void (*func)(void *handle, 124 + enum ntb_hw_event event)) 125 { 126 if (ndev->event_cb) 127 return -EINVAL; ··· 715 SNB_PBAR4LMT_OFFSET); 716 /* HW errata on the Limit registers. They can only be 717 * written when the base register is 4GB aligned and 718 + * < 32bit. This should already be the case based on 719 + * the driver defaults, but write the Limit registers 720 + * first just in case. 721 */ 722 } else { 723 ndev->limits.max_mw = SNB_MAX_MW; ··· 739 writeq(0, ndev->reg_base + SNB_PBAR4LMT_OFFSET); 740 /* HW errata on the Limit registers. They can only be 741 * written when the base register is 4GB aligned and 742 + * < 32bit. This should already be the case based on 743 + * the driver defaults, but write the Limit registers 744 + * first just in case. 745 */ 746 } 747 ··· 785 /* B2B_XLAT_OFFSET is a 64bit register, but can 786 * only take 32bit writes 787 */ 788 + writel(SNB_MBAR01_USD_ADDR & 0xffffffff, 789 ndev->reg_base + SNB_B2B_XLAT_OFFSETL); 790 writel(SNB_MBAR01_USD_ADDR >> 32, 791 ndev->reg_base + SNB_B2B_XLAT_OFFSETU); ··· 803 ndev->conn_type = NTB_CONN_RP; 804 805 if (xeon_errata_workaround) { 806 + dev_err(&ndev->pdev->dev, 807 "NTB-RP disabled due to hardware errata. To disregard this warning and potentially lock-up the system, add the parameter 'xeon_errata_workaround=0'.\n"); 808 return -EINVAL; 809 } ··· 1079 return IRQ_HANDLED; 1080 } 1081 1082 + static int ntb_setup_snb_msix(struct ntb_device *ndev, int msix_entries) 1083 { 1084 struct pci_dev *pdev = ndev->pdev; 1085 struct msix_entry *msix; 1086 int rc, i; 1087 1088 + if (msix_entries < ndev->limits.msix_cnt) 1089 + return -ENOSPC; 1090 + 1091 + rc = pci_enable_msix_exact(pdev, ndev->msix_entries, msix_entries); 1092 + if (rc < 0) 1093 + return rc; 1094 + 1095 + for (i = 0; i < msix_entries; i++) { 1096 + msix = &ndev->msix_entries[i]; 1097 + WARN_ON(!msix->vector); 1098 + 1099 + if (i == msix_entries - 1) { 1100 + rc = request_irq(msix->vector, 1101 + xeon_event_msix_irq, 0, 1102 + "ntb-event-msix", ndev); 1103 + if (rc) 1104 + goto err; 1105 + } else { 1106 + rc = request_irq(msix->vector, 1107 + xeon_callback_msix_irq, 0, 1108 + "ntb-callback-msix", 1109 + &ndev->db_cb[i]); 1110 + if (rc) 1111 + goto err; 1112 + } 1113 } 1114 1115 + ndev->num_msix = msix_entries; 1116 + ndev->max_cbs = msix_entries - 1; 1117 1118 + return 0; 1119 + 1120 + err: 1121 + while (--i >= 0) { 1122 + /* Code never reaches here for entry nr 'ndev->num_msix - 1' */ 1123 + msix = &ndev->msix_entries[i]; 1124 + free_irq(msix->vector, &ndev->db_cb[i]); 1125 + } 1126 + 1127 + pci_disable_msix(pdev); 1128 + ndev->num_msix = 0; 1129 + 1130 + return rc; 1131 + } 1132 + 1133 + static int ntb_setup_bwd_msix(struct ntb_device *ndev, int msix_entries) 1134 + { 1135 + struct pci_dev *pdev = ndev->pdev; 1136 + struct msix_entry *msix; 1137 + int rc, i; 1138 + 1139 + msix_entries = pci_enable_msix_range(pdev, ndev->msix_entries, 1140 + 1, msix_entries); 1141 + if (msix_entries < 0) 1142 + return msix_entries; 1143 + 1144 + for (i = 0; i < msix_entries; i++) { 1145 + msix = &ndev->msix_entries[i]; 1146 + WARN_ON(!msix->vector); 1147 + 1148 + rc = request_irq(msix->vector, bwd_callback_msix_irq, 0, 1149 + "ntb-callback-msix", &ndev->db_cb[i]); 1150 + if (rc) 1151 + goto err; 1152 + } 1153 + 1154 + ndev->num_msix = msix_entries; 1155 + ndev->max_cbs = msix_entries; 1156 + 1157 + return 0; 1158 + 1159 + err: 1160 + while (--i >= 0) 1161 + free_irq(msix->vector, &ndev->db_cb[i]); 1162 + 1163 + pci_disable_msix(pdev); 1164 + ndev->num_msix = 0; 1165 + 1166 + return rc; 1167 + } 1168 + 1169 + static int ntb_setup_msix(struct ntb_device *ndev) 1170 + { 1171 + struct pci_dev *pdev = ndev->pdev; 1172 + int msix_entries; 1173 + int rc, i; 1174 + 1175 + msix_entries = pci_msix_vec_count(pdev); 1176 + if (msix_entries < 0) { 1177 + rc = msix_entries; 1178 + goto err; 1179 + } else if (msix_entries > ndev->limits.msix_cnt) { 1180 rc = -EINVAL; 1181 goto err; 1182 } ··· 1112 for (i = 0; i < msix_entries; i++) 1113 ndev->msix_entries[i].entry = i; 1114 1115 if (ndev->hw_type == BWD_HW) 1116 + rc = ntb_setup_bwd_msix(ndev, msix_entries); 1117 else 1118 + rc = ntb_setup_snb_msix(ndev, msix_entries); 1119 + if (rc) 1120 + goto err1; 1121 1122 return 0; 1123 1124 err1: 1125 kfree(ndev->msix_entries); 1126 err: 1127 + dev_err(&pdev->dev, "Error allocating MSI-X interrupt\n"); 1128 return rc; 1129 } 1130 ··· 1281 free_irq(msix->vector, &ndev->db_cb[i]); 1282 } 1283 pci_disable_msix(pdev); 1284 + kfree(ndev->msix_entries); 1285 } else { 1286 free_irq(pdev->irq, ndev); 1287
+2 -6
drivers/ntb/ntb_hw.h
··· 45 * Contact Information: 46 * Jon Mason <jon.mason@intel.com> 47 */ 48 49 #define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF 0x3725 50 #define PCI_DEVICE_ID_INTEL_NTB_PS_JSF 0x3726 ··· 60 #define PCI_DEVICE_ID_INTEL_NTB_PS_HSX 0x2F0E 61 #define PCI_DEVICE_ID_INTEL_NTB_SS_HSX 0x2F0F 62 #define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD 0x0C4E 63 - 64 - #define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1) 65 66 #ifndef readq 67 static inline u64 readq(void __iomem *addr) ··· 81 #define NTB_BAR_45 4 82 #define NTB_BAR_MASK ((1 << NTB_BAR_MMIO) | (1 << NTB_BAR_23) |\ 83 (1 << NTB_BAR_45)) 84 - 85 - #define NTB_LINK_DOWN 0 86 - #define NTB_LINK_UP 1 87 88 #define NTB_HB_TIMEOUT msecs_to_jiffies(1000) 89 ··· 229 int db_num)); 230 void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx); 231 int ntb_register_event_callback(struct ntb_device *ndev, 232 - void (*event_cb_func) (void *handle, 233 enum ntb_hw_event event)); 234 void ntb_unregister_event_callback(struct ntb_device *ndev); 235 int ntb_get_max_spads(struct ntb_device *ndev);
··· 45 * Contact Information: 46 * Jon Mason <jon.mason@intel.com> 47 */ 48 + #include <linux/ntb.h> 49 50 #define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF 0x3725 51 #define PCI_DEVICE_ID_INTEL_NTB_PS_JSF 0x3726 ··· 59 #define PCI_DEVICE_ID_INTEL_NTB_PS_HSX 0x2F0E 60 #define PCI_DEVICE_ID_INTEL_NTB_SS_HSX 0x2F0F 61 #define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD 0x0C4E 62 63 #ifndef readq 64 static inline u64 readq(void __iomem *addr) ··· 82 #define NTB_BAR_45 4 83 #define NTB_BAR_MASK ((1 << NTB_BAR_MMIO) | (1 << NTB_BAR_23) |\ 84 (1 << NTB_BAR_45)) 85 86 #define NTB_HB_TIMEOUT msecs_to_jiffies(1000) 87 ··· 233 int db_num)); 234 void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx); 235 int ntb_register_event_callback(struct ntb_device *ndev, 236 + void (*event_cb_func)(void *handle, 237 enum ntb_hw_event event)); 238 void ntb_unregister_event_callback(struct ntb_device *ndev); 239 int ntb_get_max_spads(struct ntb_device *ndev);
+9 -11
drivers/ntb/ntb_transport.c
··· 56 #include <linux/pci.h> 57 #include <linux/slab.h> 58 #include <linux/types.h> 59 - #include <linux/ntb.h> 60 #include "ntb_hw.h" 61 62 #define NTB_TRANSPORT_VERSION 3 ··· 106 struct ntb_rx_info __iomem *rx_info; 107 struct ntb_rx_info *remote_rx_info; 108 109 - void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data, 110 - void *data, int len); 111 struct list_head tx_free_q; 112 spinlock_t ntb_tx_free_q_lock; 113 void __iomem *tx_mw; ··· 116 unsigned int tx_max_entry; 117 unsigned int tx_max_frame; 118 119 - void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data, 120 - void *data, int len); 121 struct list_head rx_pend_q; 122 struct list_head rx_free_q; 123 spinlock_t ntb_rx_pend_q_lock; ··· 128 unsigned int rx_max_frame; 129 dma_cookie_t last_cookie; 130 131 - void (*event_handler) (void *data, int status); 132 struct delayed_work link_work; 133 struct work_struct link_cleanup; 134 ··· 479 } 480 481 static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock, 482 - struct list_head *list) 483 { 484 struct ntb_queue_entry *entry; 485 unsigned long flags; ··· 838 } 839 840 static int ntb_transport_init_queue(struct ntb_transport *nt, 841 - unsigned int qp_num) 842 { 843 struct ntb_transport_qp *qp; 844 unsigned int num_qps_mw, tx_size; ··· 1054 if (!chan) 1055 goto err; 1056 1057 - if (len < copy_bytes) 1058 goto err_wait; 1059 1060 device = chan->device; ··· 1189 return 0; 1190 1191 err: 1192 - ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, 1193 - &qp->rx_pend_q); 1194 /* Ensure that the data is fully copied out before clearing the flag */ 1195 wmb(); 1196 hdr->flags = 0;
··· 56 #include <linux/pci.h> 57 #include <linux/slab.h> 58 #include <linux/types.h> 59 #include "ntb_hw.h" 60 61 #define NTB_TRANSPORT_VERSION 3 ··· 107 struct ntb_rx_info __iomem *rx_info; 108 struct ntb_rx_info *remote_rx_info; 109 110 + void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, 111 + void *data, int len); 112 struct list_head tx_free_q; 113 spinlock_t ntb_tx_free_q_lock; 114 void __iomem *tx_mw; ··· 117 unsigned int tx_max_entry; 118 unsigned int tx_max_frame; 119 120 + void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 121 + void *data, int len); 122 struct list_head rx_pend_q; 123 struct list_head rx_free_q; 124 spinlock_t ntb_rx_pend_q_lock; ··· 129 unsigned int rx_max_frame; 130 dma_cookie_t last_cookie; 131 132 + void (*event_handler)(void *data, int status); 133 struct delayed_work link_work; 134 struct work_struct link_cleanup; 135 ··· 480 } 481 482 static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock, 483 + struct list_head *list) 484 { 485 struct ntb_queue_entry *entry; 486 unsigned long flags; ··· 839 } 840 841 static int ntb_transport_init_queue(struct ntb_transport *nt, 842 + unsigned int qp_num) 843 { 844 struct ntb_transport_qp *qp; 845 unsigned int num_qps_mw, tx_size; ··· 1055 if (!chan) 1056 goto err; 1057 1058 + if (len < copy_bytes) 1059 goto err_wait; 1060 1061 device = chan->device; ··· 1190 return 0; 1191 1192 err: 1193 + ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q); 1194 /* Ensure that the data is fully copied out before clearing the flag */ 1195 wmb(); 1196 hdr->flags = 0;
+12 -7
include/linux/ntb.h
··· 50 51 struct ntb_client { 52 struct device_driver driver; 53 - int (*probe) (struct pci_dev *pdev); 54 - void (*remove) (struct pci_dev *pdev); 55 }; 56 57 int ntb_register_client(struct ntb_client *drvr); ··· 65 void ntb_unregister_client_dev(char *device_name); 66 67 struct ntb_queue_handlers { 68 - void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data, 69 - void *data, int len); 70 - void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data, 71 - void *data, int len); 72 - void (*event_handler) (void *data, int status); 73 }; 74 75 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp);
··· 50 51 struct ntb_client { 52 struct device_driver driver; 53 + int (*probe)(struct pci_dev *pdev); 54 + void (*remove)(struct pci_dev *pdev); 55 + }; 56 + 57 + enum { 58 + NTB_LINK_DOWN = 0, 59 + NTB_LINK_UP, 60 }; 61 62 int ntb_register_client(struct ntb_client *drvr); ··· 60 void ntb_unregister_client_dev(char *device_name); 61 62 struct ntb_queue_handlers { 63 + void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 64 + void *data, int len); 65 + void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, 66 + void *data, int len); 67 + void (*event_handler)(void *data, int status); 68 }; 69 70 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp);