Merge tag 'ntb-3.15' of git://github.com/jonmason/ntb

Pull PCIe non-transparent bridge fixes and features from Jon Mason:
"NTB driver bug fixes to address issues in list traversal, skb leak in
ntb_netdev, a typo, and a leak of msix entries in the error path.
Clean ups of the event handling logic, as well as a overall style
cleanup. Finally, the driver was converted to use the new
pci_enable_msix_range logic (and the refactoring to go along with it)"

* tag 'ntb-3.15' of git://github.com/jonmason/ntb:
ntb: Use pci_enable_msix_range() instead of pci_enable_msix()
ntb: Split ntb_setup_msix() into separate BWD/SNB routines
ntb: Use pci_msix_vec_count() to obtain number of MSI-Xs
NTB: Code Style Clean-up
NTB: client event cleanup
ntb: Fix leakage of ntb_device::msix_entries[] array
NTB: Fix typo in setting one translation register
ntb_netdev: Fix skb free issue in open
ntb_netdev: Fix list_for_each_entry exit issue

+150 -116
+20 -7
drivers/net/ntb_netdev.c
··· 78 78 netdev_dbg(ndev, "Event %x, Link %x\n", status, 79 79 ntb_transport_link_query(dev->qp)); 80 80 81 - /* Currently, only link status event is supported */ 82 - if (status) 83 - netif_carrier_on(ndev); 84 - else 81 + switch (status) { 82 + case NTB_LINK_DOWN: 85 83 netif_carrier_off(ndev); 84 + break; 85 + case NTB_LINK_UP: 86 + if (!ntb_transport_link_query(dev->qp)) 87 + return; 88 + 89 + netif_carrier_on(ndev); 90 + break; 91 + default: 92 + netdev_warn(ndev, "Unsupported event type %d\n", status); 93 + } 86 94 } 87 95 88 96 static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, ··· 190 182 191 183 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data, 192 184 ndev->mtu + ETH_HLEN); 193 - if (rc == -EINVAL) 185 + if (rc == -EINVAL) { 186 + dev_kfree_skb(skb); 194 187 goto err; 188 + } 195 189 } 196 190 197 191 netif_carrier_off(ndev); ··· 377 367 { 378 368 struct net_device *ndev; 379 369 struct ntb_netdev *dev; 370 + bool found = false; 380 371 381 372 list_for_each_entry(dev, &dev_list, list) { 382 - if (dev->pdev == pdev) 373 + if (dev->pdev == pdev) { 374 + found = true; 383 375 break; 376 + } 384 377 } 385 - if (dev == NULL) 378 + if (!found) 386 379 return; 387 380 388 381 list_del(&dev->list);
+107 -85
drivers/ntb/ntb_hw.c
··· 91 91 /* Translate memory window 0,1 to BAR 2,4 */ 92 92 #define MW_TO_BAR(mw) (mw * NTB_MAX_NUM_MW + 2) 93 93 94 - static DEFINE_PCI_DEVICE_TABLE(ntb_pci_tbl) = { 94 + static const struct pci_device_id ntb_pci_tbl[] = { 95 95 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)}, 96 96 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)}, 97 97 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)}, ··· 120 120 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 121 121 */ 122 122 int ntb_register_event_callback(struct ntb_device *ndev, 123 - void (*func)(void *handle, enum ntb_hw_event event)) 123 + void (*func)(void *handle, 124 + enum ntb_hw_event event)) 124 125 { 125 126 if (ndev->event_cb) 126 127 return -EINVAL; ··· 716 715 SNB_PBAR4LMT_OFFSET); 717 716 /* HW errata on the Limit registers. They can only be 718 717 * written when the base register is 4GB aligned and 719 - * < 32bit. This should already be the case based on the 720 - * driver defaults, but write the Limit registers first 721 - * just in case. 718 + * < 32bit. This should already be the case based on 719 + * the driver defaults, but write the Limit registers 720 + * first just in case. 722 721 */ 723 722 } else { 724 723 ndev->limits.max_mw = SNB_MAX_MW; ··· 740 739 writeq(0, ndev->reg_base + SNB_PBAR4LMT_OFFSET); 741 740 /* HW errata on the Limit registers. They can only be 742 741 * written when the base register is 4GB aligned and 743 - * < 32bit. This should already be the case based on the 744 - * driver defaults, but write the Limit registers first 745 - * just in case. 742 + * < 32bit. This should already be the case based on 743 + * the driver defaults, but write the Limit registers 744 + * first just in case. 746 745 */ 747 746 } 748 747 ··· 786 785 /* B2B_XLAT_OFFSET is a 64bit register, but can 787 786 * only take 32bit writes 788 787 */ 789 - writel(SNB_MBAR01_DSD_ADDR & 0xffffffff, 788 + writel(SNB_MBAR01_USD_ADDR & 0xffffffff, 790 789 ndev->reg_base + SNB_B2B_XLAT_OFFSETL); 791 790 writel(SNB_MBAR01_USD_ADDR >> 32, 792 791 ndev->reg_base + SNB_B2B_XLAT_OFFSETU); ··· 804 803 ndev->conn_type = NTB_CONN_RP; 805 804 806 805 if (xeon_errata_workaround) { 807 - dev_err(&ndev->pdev->dev, 806 + dev_err(&ndev->pdev->dev, 808 807 "NTB-RP disabled due to hardware errata. To disregard this warning and potentially lock-up the system, add the parameter 'xeon_errata_workaround=0'.\n"); 809 808 return -EINVAL; 810 809 } ··· 1080 1079 return IRQ_HANDLED; 1081 1080 } 1082 1081 1083 - static int ntb_setup_msix(struct ntb_device *ndev) 1082 + static int ntb_setup_snb_msix(struct ntb_device *ndev, int msix_entries) 1084 1083 { 1085 1084 struct pci_dev *pdev = ndev->pdev; 1086 1085 struct msix_entry *msix; 1087 - int msix_entries; 1088 1086 int rc, i; 1089 - u16 val; 1090 1087 1091 - if (!pdev->msix_cap) { 1092 - rc = -EIO; 1093 - goto err; 1088 + if (msix_entries < ndev->limits.msix_cnt) 1089 + return -ENOSPC; 1090 + 1091 + rc = pci_enable_msix_exact(pdev, ndev->msix_entries, msix_entries); 1092 + if (rc < 0) 1093 + return rc; 1094 + 1095 + for (i = 0; i < msix_entries; i++) { 1096 + msix = &ndev->msix_entries[i]; 1097 + WARN_ON(!msix->vector); 1098 + 1099 + if (i == msix_entries - 1) { 1100 + rc = request_irq(msix->vector, 1101 + xeon_event_msix_irq, 0, 1102 + "ntb-event-msix", ndev); 1103 + if (rc) 1104 + goto err; 1105 + } else { 1106 + rc = request_irq(msix->vector, 1107 + xeon_callback_msix_irq, 0, 1108 + "ntb-callback-msix", 1109 + &ndev->db_cb[i]); 1110 + if (rc) 1111 + goto err; 1112 + } 1094 1113 } 1095 1114 1096 - rc = pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &val); 1097 - if (rc) 1098 - goto err; 1115 + ndev->num_msix = msix_entries; 1116 + ndev->max_cbs = msix_entries - 1; 1099 1117 1100 - msix_entries = msix_table_size(val); 1101 - if (msix_entries > ndev->limits.msix_cnt) { 1118 + return 0; 1119 + 1120 + err: 1121 + while (--i >= 0) { 1122 + /* Code never reaches here for entry nr 'ndev->num_msix - 1' */ 1123 + msix = &ndev->msix_entries[i]; 1124 + free_irq(msix->vector, &ndev->db_cb[i]); 1125 + } 1126 + 1127 + pci_disable_msix(pdev); 1128 + ndev->num_msix = 0; 1129 + 1130 + return rc; 1131 + } 1132 + 1133 + static int ntb_setup_bwd_msix(struct ntb_device *ndev, int msix_entries) 1134 + { 1135 + struct pci_dev *pdev = ndev->pdev; 1136 + struct msix_entry *msix; 1137 + int rc, i; 1138 + 1139 + msix_entries = pci_enable_msix_range(pdev, ndev->msix_entries, 1140 + 1, msix_entries); 1141 + if (msix_entries < 0) 1142 + return msix_entries; 1143 + 1144 + for (i = 0; i < msix_entries; i++) { 1145 + msix = &ndev->msix_entries[i]; 1146 + WARN_ON(!msix->vector); 1147 + 1148 + rc = request_irq(msix->vector, bwd_callback_msix_irq, 0, 1149 + "ntb-callback-msix", &ndev->db_cb[i]); 1150 + if (rc) 1151 + goto err; 1152 + } 1153 + 1154 + ndev->num_msix = msix_entries; 1155 + ndev->max_cbs = msix_entries; 1156 + 1157 + return 0; 1158 + 1159 + err: 1160 + while (--i >= 0) 1161 + free_irq(msix->vector, &ndev->db_cb[i]); 1162 + 1163 + pci_disable_msix(pdev); 1164 + ndev->num_msix = 0; 1165 + 1166 + return rc; 1167 + } 1168 + 1169 + static int ntb_setup_msix(struct ntb_device *ndev) 1170 + { 1171 + struct pci_dev *pdev = ndev->pdev; 1172 + int msix_entries; 1173 + int rc, i; 1174 + 1175 + msix_entries = pci_msix_vec_count(pdev); 1176 + if (msix_entries < 0) { 1177 + rc = msix_entries; 1178 + goto err; 1179 + } else if (msix_entries > ndev->limits.msix_cnt) { 1102 1180 rc = -EINVAL; 1103 1181 goto err; 1104 1182 } ··· 1192 1112 for (i = 0; i < msix_entries; i++) 1193 1113 ndev->msix_entries[i].entry = i; 1194 1114 1195 - rc = pci_enable_msix(pdev, ndev->msix_entries, msix_entries); 1196 - if (rc < 0) 1197 - goto err1; 1198 - if (rc > 0) { 1199 - /* On SNB, the link interrupt is always tied to 4th vector. If 1200 - * we can't get all 4, then we can't use MSI-X. 1201 - */ 1202 - if (ndev->hw_type != BWD_HW) { 1203 - rc = -EIO; 1204 - goto err1; 1205 - } 1206 - 1207 - dev_warn(&pdev->dev, 1208 - "Only %d MSI-X vectors. Limiting the number of queues to that number.\n", 1209 - rc); 1210 - msix_entries = rc; 1211 - 1212 - rc = pci_enable_msix(pdev, ndev->msix_entries, msix_entries); 1213 - if (rc) 1214 - goto err1; 1215 - } 1216 - 1217 - for (i = 0; i < msix_entries; i++) { 1218 - msix = &ndev->msix_entries[i]; 1219 - WARN_ON(!msix->vector); 1220 - 1221 - /* Use the last MSI-X vector for Link status */ 1222 - if (ndev->hw_type == BWD_HW) { 1223 - rc = request_irq(msix->vector, bwd_callback_msix_irq, 0, 1224 - "ntb-callback-msix", &ndev->db_cb[i]); 1225 - if (rc) 1226 - goto err2; 1227 - } else { 1228 - if (i == msix_entries - 1) { 1229 - rc = request_irq(msix->vector, 1230 - xeon_event_msix_irq, 0, 1231 - "ntb-event-msix", ndev); 1232 - if (rc) 1233 - goto err2; 1234 - } else { 1235 - rc = request_irq(msix->vector, 1236 - xeon_callback_msix_irq, 0, 1237 - "ntb-callback-msix", 1238 - &ndev->db_cb[i]); 1239 - if (rc) 1240 - goto err2; 1241 - } 1242 - } 1243 - } 1244 - 1245 - ndev->num_msix = msix_entries; 1246 1115 if (ndev->hw_type == BWD_HW) 1247 - ndev->max_cbs = msix_entries; 1116 + rc = ntb_setup_bwd_msix(ndev, msix_entries); 1248 1117 else 1249 - ndev->max_cbs = msix_entries - 1; 1118 + rc = ntb_setup_snb_msix(ndev, msix_entries); 1119 + if (rc) 1120 + goto err1; 1250 1121 1251 1122 return 0; 1252 1123 1253 - err2: 1254 - while (--i >= 0) { 1255 - msix = &ndev->msix_entries[i]; 1256 - if (ndev->hw_type != BWD_HW && i == ndev->num_msix - 1) 1257 - free_irq(msix->vector, ndev); 1258 - else 1259 - free_irq(msix->vector, &ndev->db_cb[i]); 1260 - } 1261 - pci_disable_msix(pdev); 1262 1124 err1: 1263 1125 kfree(ndev->msix_entries); 1264 - dev_err(&pdev->dev, "Error allocating MSI-X interrupt\n"); 1265 1126 err: 1266 - ndev->num_msix = 0; 1127 + dev_err(&pdev->dev, "Error allocating MSI-X interrupt\n"); 1267 1128 return rc; 1268 1129 } 1269 1130 ··· 1302 1281 free_irq(msix->vector, &ndev->db_cb[i]); 1303 1282 } 1304 1283 pci_disable_msix(pdev); 1284 + kfree(ndev->msix_entries); 1305 1285 } else { 1306 1286 free_irq(pdev->irq, ndev); 1307 1287
+2 -6
drivers/ntb/ntb_hw.h
··· 45 45 * Contact Information: 46 46 * Jon Mason <jon.mason@intel.com> 47 47 */ 48 + #include <linux/ntb.h> 48 49 49 50 #define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF 0x3725 50 51 #define PCI_DEVICE_ID_INTEL_NTB_PS_JSF 0x3726 ··· 60 59 #define PCI_DEVICE_ID_INTEL_NTB_PS_HSX 0x2F0E 61 60 #define PCI_DEVICE_ID_INTEL_NTB_SS_HSX 0x2F0F 62 61 #define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD 0x0C4E 63 - 64 - #define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1) 65 62 66 63 #ifndef readq 67 64 static inline u64 readq(void __iomem *addr) ··· 81 82 #define NTB_BAR_45 4 82 83 #define NTB_BAR_MASK ((1 << NTB_BAR_MMIO) | (1 << NTB_BAR_23) |\ 83 84 (1 << NTB_BAR_45)) 84 - 85 - #define NTB_LINK_DOWN 0 86 - #define NTB_LINK_UP 1 87 85 88 86 #define NTB_HB_TIMEOUT msecs_to_jiffies(1000) 89 87 ··· 229 233 int db_num)); 230 234 void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx); 231 235 int ntb_register_event_callback(struct ntb_device *ndev, 232 - void (*event_cb_func) (void *handle, 236 + void (*event_cb_func)(void *handle, 233 237 enum ntb_hw_event event)); 234 238 void ntb_unregister_event_callback(struct ntb_device *ndev); 235 239 int ntb_get_max_spads(struct ntb_device *ndev);
+9 -11
drivers/ntb/ntb_transport.c
··· 56 56 #include <linux/pci.h> 57 57 #include <linux/slab.h> 58 58 #include <linux/types.h> 59 - #include <linux/ntb.h> 60 59 #include "ntb_hw.h" 61 60 62 61 #define NTB_TRANSPORT_VERSION 3 ··· 106 107 struct ntb_rx_info __iomem *rx_info; 107 108 struct ntb_rx_info *remote_rx_info; 108 109 109 - void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data, 110 - void *data, int len); 110 + void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, 111 + void *data, int len); 111 112 struct list_head tx_free_q; 112 113 spinlock_t ntb_tx_free_q_lock; 113 114 void __iomem *tx_mw; ··· 116 117 unsigned int tx_max_entry; 117 118 unsigned int tx_max_frame; 118 119 119 - void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data, 120 - void *data, int len); 120 + void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 121 + void *data, int len); 121 122 struct list_head rx_pend_q; 122 123 struct list_head rx_free_q; 123 124 spinlock_t ntb_rx_pend_q_lock; ··· 128 129 unsigned int rx_max_frame; 129 130 dma_cookie_t last_cookie; 130 131 131 - void (*event_handler) (void *data, int status); 132 + void (*event_handler)(void *data, int status); 132 133 struct delayed_work link_work; 133 134 struct work_struct link_cleanup; 134 135 ··· 479 480 } 480 481 481 482 static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock, 482 - struct list_head *list) 483 + struct list_head *list) 483 484 { 484 485 struct ntb_queue_entry *entry; 485 486 unsigned long flags; ··· 838 839 } 839 840 840 841 static int ntb_transport_init_queue(struct ntb_transport *nt, 841 - unsigned int qp_num) 842 + unsigned int qp_num) 842 843 { 843 844 struct ntb_transport_qp *qp; 844 845 unsigned int num_qps_mw, tx_size; ··· 1054 1055 if (!chan) 1055 1056 goto err; 1056 1057 1057 - if (len < copy_bytes) 1058 + if (len < copy_bytes) 1058 1059 goto err_wait; 1059 1060 1060 1061 device = chan->device; ··· 1189 1190 return 0; 1190 1191 1191 1192 err: 1192 - ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, 1193 - &qp->rx_pend_q); 1193 + ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q); 1194 1194 /* Ensure that the data is fully copied out before clearing the flag */ 1195 1195 wmb(); 1196 1196 hdr->flags = 0;
+12 -7
include/linux/ntb.h
··· 50 50 51 51 struct ntb_client { 52 52 struct device_driver driver; 53 - int (*probe) (struct pci_dev *pdev); 54 - void (*remove) (struct pci_dev *pdev); 53 + int (*probe)(struct pci_dev *pdev); 54 + void (*remove)(struct pci_dev *pdev); 55 + }; 56 + 57 + enum { 58 + NTB_LINK_DOWN = 0, 59 + NTB_LINK_UP, 55 60 }; 56 61 57 62 int ntb_register_client(struct ntb_client *drvr); ··· 65 60 void ntb_unregister_client_dev(char *device_name); 66 61 67 62 struct ntb_queue_handlers { 68 - void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data, 69 - void *data, int len); 70 - void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data, 71 - void *data, int len); 72 - void (*event_handler) (void *data, int status); 63 + void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 64 + void *data, int len); 65 + void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, 66 + void *data, int len); 67 + void (*event_handler)(void *data, int status); 73 68 }; 74 69 75 70 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp);