Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
IB/mthca: FMR ioremap fix
IPoIB: Free child interfaces properly
IB/mthca: Fix race in reference counting
IB/srp: Fix tracking of pending requests during error handling
IB: Fix display of 4-bit port counters in sysfs

+202 -137
+1 -1
drivers/infiniband/core/sysfs.c
··· 336 switch (width) { 337 case 4: 338 ret = sprintf(buf, "%u\n", (out_mad->data[40 + offset / 8] >> 339 - (offset % 4)) & 0xf); 340 break; 341 case 8: 342 ret = sprintf(buf, "%u\n", out_mad->data[40 + offset / 8]);
··· 336 switch (width) { 337 case 4: 338 ret = sprintf(buf, "%u\n", (out_mad->data[40 + offset / 8] >> 339 + (4 - (offset % 8))) & 0xf); 340 break; 341 case 8: 342 ret = sprintf(buf, "%u\n", out_mad->data[40 + offset / 8]);
+21 -20
drivers/infiniband/hw/mthca/mthca_cq.c
··· 238 spin_lock(&dev->cq_table.lock); 239 240 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); 241 - 242 if (cq) 243 - atomic_inc(&cq->refcount); 244 spin_unlock(&dev->cq_table.lock); 245 246 if (!cq) { ··· 254 if (cq->ibcq.event_handler) 255 cq->ibcq.event_handler(&event, cq->ibcq.cq_context); 256 257 - if (atomic_dec_and_test(&cq->refcount)) 258 wake_up(&cq->wait); 259 } 260 261 static inline int is_recv_cqe(struct mthca_cqe *cqe) ··· 269 return !(cqe->is_send & 0x80); 270 } 271 272 - void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, 273 struct mthca_srq *srq) 274 { 275 - struct mthca_cq *cq; 276 struct mthca_cqe *cqe; 277 u32 prod_index; 278 int nfreed = 0; 279 - 280 - spin_lock_irq(&dev->cq_table.lock); 281 - cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); 282 - if (cq) 283 - atomic_inc(&cq->refcount); 284 - spin_unlock_irq(&dev->cq_table.lock); 285 - 286 - if (!cq) 287 - return; 288 289 spin_lock_irq(&cq->lock); 290 ··· 293 294 if (0) 295 mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n", 296 - qpn, cqn, cq->cons_index, prod_index); 297 298 /* 299 * Now sweep backwards through the CQ, removing CQ entries ··· 317 } 318 319 spin_unlock_irq(&cq->lock); 320 - if (atomic_dec_and_test(&cq->refcount)) 321 - wake_up(&cq->wait); 322 } 323 324 void mthca_cq_resize_copy_cqes(struct mthca_cq *cq) ··· 811 } 812 813 spin_lock_init(&cq->lock); 814 - atomic_set(&cq->refcount, 1); 815 init_waitqueue_head(&cq->wait); 816 817 memset(cq_context, 0, sizeof *cq_context); ··· 886 return err; 887 } 888 889 void mthca_free_cq(struct mthca_dev *dev, 890 struct mthca_cq *cq) 891 { ··· 930 spin_lock_irq(&dev->cq_table.lock); 931 mthca_array_clear(&dev->cq_table.cq, 932 cq->cqn & (dev->limits.num_cqs - 1)); 933 spin_unlock_irq(&dev->cq_table.lock); 934 935 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) ··· 938 else 939 synchronize_irq(dev->pdev->irq); 940 941 - atomic_dec(&cq->refcount); 942 - wait_event(cq->wait, !atomic_read(&cq->refcount)); 943 944 if (cq->is_kernel) { 945 mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
··· 238 spin_lock(&dev->cq_table.lock); 239 240 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); 241 if (cq) 242 + ++cq->refcount; 243 + 244 spin_unlock(&dev->cq_table.lock); 245 246 if (!cq) { ··· 254 if (cq->ibcq.event_handler) 255 cq->ibcq.event_handler(&event, cq->ibcq.cq_context); 256 257 + spin_lock(&dev->cq_table.lock); 258 + if (!--cq->refcount) 259 wake_up(&cq->wait); 260 + spin_unlock(&dev->cq_table.lock); 261 } 262 263 static inline int is_recv_cqe(struct mthca_cqe *cqe) ··· 267 return !(cqe->is_send & 0x80); 268 } 269 270 + void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, 271 struct mthca_srq *srq) 272 { 273 struct mthca_cqe *cqe; 274 u32 prod_index; 275 int nfreed = 0; 276 277 spin_lock_irq(&cq->lock); 278 ··· 301 302 if (0) 303 mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n", 304 + qpn, cq->cqn, cq->cons_index, prod_index); 305 306 /* 307 * Now sweep backwards through the CQ, removing CQ entries ··· 325 } 326 327 spin_unlock_irq(&cq->lock); 328 } 329 330 void mthca_cq_resize_copy_cqes(struct mthca_cq *cq) ··· 821 } 822 823 spin_lock_init(&cq->lock); 824 + cq->refcount = 1; 825 init_waitqueue_head(&cq->wait); 826 827 memset(cq_context, 0, sizeof *cq_context); ··· 896 return err; 897 } 898 899 + static inline int get_cq_refcount(struct mthca_dev *dev, struct mthca_cq *cq) 900 + { 901 + int c; 902 + 903 + spin_lock_irq(&dev->cq_table.lock); 904 + c = cq->refcount; 905 + spin_unlock_irq(&dev->cq_table.lock); 906 + 907 + return c; 908 + } 909 + 910 void mthca_free_cq(struct mthca_dev *dev, 911 struct mthca_cq *cq) 912 { ··· 929 spin_lock_irq(&dev->cq_table.lock); 930 mthca_array_clear(&dev->cq_table.cq, 931 cq->cqn & (dev->limits.num_cqs - 1)); 932 + --cq->refcount; 933 spin_unlock_irq(&dev->cq_table.lock); 934 935 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) ··· 936 else 937 synchronize_irq(dev->pdev->irq); 938 939 + wait_event(cq->wait, !get_cq_refcount(dev, cq)); 940 941 if (cq->is_kernel) { 942 mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
+1 -1
drivers/infiniband/hw/mthca/mthca_dev.h
··· 496 void mthca_cq_completion(struct mthca_dev *dev, u32 cqn); 497 void mthca_cq_event(struct mthca_dev *dev, u32 cqn, 498 enum ib_event_type event_type); 499 - void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, 500 struct mthca_srq *srq); 501 void mthca_cq_resize_copy_cqes(struct mthca_cq *cq); 502 int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent);
··· 496 void mthca_cq_completion(struct mthca_dev *dev, u32 cqn); 497 void mthca_cq_event(struct mthca_dev *dev, u32 cqn, 498 enum ib_event_type event_type); 499 + void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, 500 struct mthca_srq *srq); 501 void mthca_cq_resize_copy_cqes(struct mthca_cq *cq); 502 int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent);
+11 -4
drivers/infiniband/hw/mthca/mthca_mr.c
··· 761 762 int __devinit mthca_init_mr_table(struct mthca_dev *dev) 763 { 764 int err, i; 765 766 err = mthca_alloc_init(&dev->mr_table.mpt_alloc, ··· 797 goto err_fmr_mpt; 798 } 799 800 dev->mr_table.tavor_fmr.mpt_base = 801 - ioremap(dev->mr_table.mpt_base, 802 - (1 << i) * sizeof (struct mthca_mpt_entry)); 803 804 if (!dev->mr_table.tavor_fmr.mpt_base) { 805 mthca_warn(dev, "MPT ioremap for FMR failed.\n"); ··· 810 goto err_fmr_mpt; 811 } 812 813 dev->mr_table.tavor_fmr.mtt_base = 814 - ioremap(dev->mr_table.mtt_base, 815 - (1 << i) * MTHCA_MTT_SEG_SIZE); 816 if (!dev->mr_table.tavor_fmr.mtt_base) { 817 mthca_warn(dev, "MTT ioremap for FMR failed.\n"); 818 err = -ENOMEM;
··· 761 762 int __devinit mthca_init_mr_table(struct mthca_dev *dev) 763 { 764 + unsigned long addr; 765 int err, i; 766 767 err = mthca_alloc_init(&dev->mr_table.mpt_alloc, ··· 796 goto err_fmr_mpt; 797 } 798 799 + addr = pci_resource_start(dev->pdev, 4) + 800 + ((pci_resource_len(dev->pdev, 4) - 1) & 801 + dev->mr_table.mpt_base); 802 + 803 dev->mr_table.tavor_fmr.mpt_base = 804 + ioremap(addr, (1 << i) * sizeof(struct mthca_mpt_entry)); 805 806 if (!dev->mr_table.tavor_fmr.mpt_base) { 807 mthca_warn(dev, "MPT ioremap for FMR failed.\n"); ··· 806 goto err_fmr_mpt; 807 } 808 809 + addr = pci_resource_start(dev->pdev, 4) + 810 + ((pci_resource_len(dev->pdev, 4) - 1) & 811 + dev->mr_table.mtt_base); 812 + 813 dev->mr_table.tavor_fmr.mtt_base = 814 + ioremap(addr, (1 << i) * MTHCA_MTT_SEG_SIZE); 815 if (!dev->mr_table.tavor_fmr.mtt_base) { 816 mthca_warn(dev, "MTT ioremap for FMR failed.\n"); 817 err = -ENOMEM;
+12 -10
drivers/infiniband/hw/mthca/mthca_provider.h
··· 139 * a qp may be locked, with the send cq locked first. No other 140 * nesting should be done. 141 * 142 - * Each struct mthca_cq/qp also has an atomic_t ref count. The 143 - * pointer from the cq/qp_table to the struct counts as one reference. 144 - * This reference also is good for access through the consumer API, so 145 - * modifying the CQ/QP etc doesn't need to take another reference. 146 - * Access because of a completion being polled does need a reference. 147 * 148 * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the 149 * destroy function to sleep on. ··· 160 * - decrement ref count; if zero, wake up waiters 161 * 162 * To destroy a CQ/QP, we can do the following: 163 - * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock 164 - * - decrement ref count 165 * - wait_event until ref count is zero 166 * 167 * It is the consumer's responsibilty to make sure that no QP ··· 199 struct mthca_cq { 200 struct ib_cq ibcq; 201 spinlock_t lock; 202 - atomic_t refcount; 203 int cqn; 204 u32 cons_index; 205 struct mthca_cq_buf buf; ··· 219 struct mthca_srq { 220 struct ib_srq ibsrq; 221 spinlock_t lock; 222 - atomic_t refcount; 223 int srqn; 224 int max; 225 int max_gs; ··· 256 257 struct mthca_qp { 258 struct ib_qp ibqp; 259 - atomic_t refcount; 260 u32 qpn; 261 int is_direct; 262 u8 port; /* for SQP and memfree use only */
··· 139 * a qp may be locked, with the send cq locked first. No other 140 * nesting should be done. 141 * 142 + * Each struct mthca_cq/qp also has an ref count, protected by the 143 + * corresponding table lock. The pointer from the cq/qp_table to the 144 + * struct counts as one reference. This reference also is good for 145 + * access through the consumer API, so modifying the CQ/QP etc doesn't 146 + * need to take another reference. Access to a QP because of a 147 + * completion being polled does not need a reference either. 148 * 149 * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the 150 * destroy function to sleep on. ··· 159 * - decrement ref count; if zero, wake up waiters 160 * 161 * To destroy a CQ/QP, we can do the following: 162 + * - lock cq/qp_table 163 + * - remove pointer and decrement ref count 164 + * - unlock cq/qp_table lock 165 * - wait_event until ref count is zero 166 * 167 * It is the consumer's responsibilty to make sure that no QP ··· 197 struct mthca_cq { 198 struct ib_cq ibcq; 199 spinlock_t lock; 200 + int refcount; 201 int cqn; 202 u32 cons_index; 203 struct mthca_cq_buf buf; ··· 217 struct mthca_srq { 218 struct ib_srq ibsrq; 219 spinlock_t lock; 220 + int refcount; 221 int srqn; 222 int max; 223 int max_gs; ··· 254 255 struct mthca_qp { 256 struct ib_qp ibqp; 257 + int refcount; 258 u32 qpn; 259 int is_direct; 260 u8 port; /* for SQP and memfree use only */
+22 -9
drivers/infiniband/hw/mthca/mthca_qp.c
··· 240 spin_lock(&dev->qp_table.lock); 241 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); 242 if (qp) 243 - atomic_inc(&qp->refcount); 244 spin_unlock(&dev->qp_table.lock); 245 246 if (!qp) { ··· 257 if (qp->ibqp.event_handler) 258 qp->ibqp.event_handler(&event, qp->ibqp.qp_context); 259 260 - if (atomic_dec_and_test(&qp->refcount)) 261 wake_up(&qp->wait); 262 } 263 264 static int to_mthca_state(enum ib_qp_state ib_state) ··· 835 * entries and reinitialize the QP. 836 */ 837 if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { 838 - mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, 839 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 840 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) 841 - mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, 842 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 843 844 mthca_wq_init(&qp->sq); ··· 1098 int ret; 1099 int i; 1100 1101 - atomic_set(&qp->refcount, 1); 1102 init_waitqueue_head(&qp->wait); 1103 qp->state = IB_QPS_RESET; 1104 qp->atomic_rd_en = 0; ··· 1320 return err; 1321 } 1322 1323 void mthca_free_qp(struct mthca_dev *dev, 1324 struct mthca_qp *qp) 1325 { ··· 1352 spin_lock(&dev->qp_table.lock); 1353 mthca_array_clear(&dev->qp_table.qp, 1354 qp->qpn & (dev->limits.num_qps - 1)); 1355 spin_unlock(&dev->qp_table.lock); 1356 1357 if (send_cq != recv_cq) 1358 spin_unlock(&recv_cq->lock); 1359 spin_unlock_irq(&send_cq->lock); 1360 1361 - atomic_dec(&qp->refcount); 1362 - wait_event(qp->wait, !atomic_read(&qp->refcount)); 1363 1364 if (qp->state != IB_QPS_RESET) 1365 mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, ··· 1371 * unref the mem-free tables and free the QPN in our table. 1372 */ 1373 if (!qp->ibqp.uobject) { 1374 - mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, 1375 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 1376 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) 1377 - mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, 1378 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 1379 1380 mthca_free_memfree(dev, qp);
··· 240 spin_lock(&dev->qp_table.lock); 241 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); 242 if (qp) 243 + ++qp->refcount; 244 spin_unlock(&dev->qp_table.lock); 245 246 if (!qp) { ··· 257 if (qp->ibqp.event_handler) 258 qp->ibqp.event_handler(&event, qp->ibqp.qp_context); 259 260 + spin_lock(&dev->qp_table.lock); 261 + if (!--qp->refcount) 262 wake_up(&qp->wait); 263 + spin_unlock(&dev->qp_table.lock); 264 } 265 266 static int to_mthca_state(enum ib_qp_state ib_state) ··· 833 * entries and reinitialize the QP. 834 */ 835 if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { 836 + mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, 837 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 838 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) 839 + mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, 840 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 841 842 mthca_wq_init(&qp->sq); ··· 1096 int ret; 1097 int i; 1098 1099 + qp->refcount = 1; 1100 init_waitqueue_head(&qp->wait); 1101 qp->state = IB_QPS_RESET; 1102 qp->atomic_rd_en = 0; ··· 1318 return err; 1319 } 1320 1321 + static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp) 1322 + { 1323 + int c; 1324 + 1325 + spin_lock_irq(&dev->qp_table.lock); 1326 + c = qp->refcount; 1327 + spin_unlock_irq(&dev->qp_table.lock); 1328 + 1329 + return c; 1330 + } 1331 + 1332 void mthca_free_qp(struct mthca_dev *dev, 1333 struct mthca_qp *qp) 1334 { ··· 1339 spin_lock(&dev->qp_table.lock); 1340 mthca_array_clear(&dev->qp_table.qp, 1341 qp->qpn & (dev->limits.num_qps - 1)); 1342 + --qp->refcount; 1343 spin_unlock(&dev->qp_table.lock); 1344 1345 if (send_cq != recv_cq) 1346 spin_unlock(&recv_cq->lock); 1347 spin_unlock_irq(&send_cq->lock); 1348 1349 + wait_event(qp->wait, !get_qp_refcount(dev, qp)); 1350 1351 if (qp->state != IB_QPS_RESET) 1352 mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, ··· 1358 * unref the mem-free tables and free the QPN in our table. 1359 */ 1360 if (!qp->ibqp.uobject) { 1361 + mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, 1362 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 1363 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) 1364 + mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, 1365 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 1366 1367 mthca_free_memfree(dev, qp);
+18 -5
drivers/infiniband/hw/mthca/mthca_srq.c
··· 241 goto err_out_mailbox; 242 243 spin_lock_init(&srq->lock); 244 - atomic_set(&srq->refcount, 1); 245 init_waitqueue_head(&srq->wait); 246 247 if (mthca_is_memfree(dev)) ··· 308 return err; 309 } 310 311 void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) 312 { 313 struct mthca_mailbox *mailbox; ··· 340 spin_lock_irq(&dev->srq_table.lock); 341 mthca_array_clear(&dev->srq_table.srq, 342 srq->srqn & (dev->limits.num_srqs - 1)); 343 spin_unlock_irq(&dev->srq_table.lock); 344 345 - atomic_dec(&srq->refcount); 346 - wait_event(srq->wait, !atomic_read(&srq->refcount)); 347 348 if (!srq->ibsrq.uobject) { 349 mthca_free_srq_buf(dev, srq); ··· 425 spin_lock(&dev->srq_table.lock); 426 srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); 427 if (srq) 428 - atomic_inc(&srq->refcount); 429 spin_unlock(&dev->srq_table.lock); 430 431 if (!srq) { ··· 442 srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); 443 444 out: 445 - if (atomic_dec_and_test(&srq->refcount)) 446 wake_up(&srq->wait); 447 } 448 449 /*
··· 241 goto err_out_mailbox; 242 243 spin_lock_init(&srq->lock); 244 + srq->refcount = 1; 245 init_waitqueue_head(&srq->wait); 246 247 if (mthca_is_memfree(dev)) ··· 308 return err; 309 } 310 311 + static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq) 312 + { 313 + int c; 314 + 315 + spin_lock_irq(&dev->srq_table.lock); 316 + c = srq->refcount; 317 + spin_unlock_irq(&dev->srq_table.lock); 318 + 319 + return c; 320 + } 321 + 322 void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) 323 { 324 struct mthca_mailbox *mailbox; ··· 329 spin_lock_irq(&dev->srq_table.lock); 330 mthca_array_clear(&dev->srq_table.srq, 331 srq->srqn & (dev->limits.num_srqs - 1)); 332 + --srq->refcount; 333 spin_unlock_irq(&dev->srq_table.lock); 334 335 + wait_event(srq->wait, !get_srq_refcount(dev, srq)); 336 337 if (!srq->ibsrq.uobject) { 338 mthca_free_srq_buf(dev, srq); ··· 414 spin_lock(&dev->srq_table.lock); 415 srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); 416 if (srq) 417 + ++srq->refcount; 418 spin_unlock(&dev->srq_table.lock); 419 420 if (!srq) { ··· 431 srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); 432 433 out: 434 + spin_lock(&dev->srq_table.lock); 435 + if (!--srq->refcount) 436 wake_up(&srq->wait); 437 + spin_unlock(&dev->srq_table.lock); 438 } 439 440 /*
+1 -3
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
··· 158 if (priv->pkey == pkey) { 159 unregister_netdev(priv->dev); 160 ipoib_dev_cleanup(priv->dev); 161 - 162 list_del(&priv->list); 163 - 164 - kfree(priv); 165 166 ret = 0; 167 break;
··· 158 if (priv->pkey == pkey) { 159 unregister_netdev(priv->dev); 160 ipoib_dev_cleanup(priv->dev); 161 list_del(&priv->list); 162 + free_netdev(priv->dev); 163 164 ret = 0; 165 break;
+113 -82
drivers/infiniband/ulp/srp/ib_srp.c
··· 409 } 410 } 411 412 static int srp_reconnect_target(struct srp_target_port *target) 413 { 414 struct ib_cm_id *new_cm_id; ··· 483 list_for_each_entry(req, &target->req_queue, list) { 484 req->scmnd->result = DID_RESET << 16; 485 req->scmnd->scsi_done(req->scmnd); 486 } 487 488 target->rx_head = 0; 489 target->tx_head = 0; 490 target->tx_tail = 0; 491 - target->req_head = 0; 492 - for (i = 0; i < SRP_SQ_SIZE - 1; ++i) 493 - target->req_ring[i].next = i + 1; 494 - target->req_ring[SRP_SQ_SIZE - 1].next = -1; 495 INIT_LIST_HEAD(&target->req_queue); 496 497 ret = srp_connect_target(target); 498 if (ret) ··· 617 return len; 618 } 619 620 - static void srp_unmap_data(struct scsi_cmnd *scmnd, 621 - struct srp_target_port *target, 622 - struct srp_request *req) 623 { 624 - struct scatterlist *scat; 625 - int nents; 626 - 627 - if (!scmnd->request_buffer || 628 - (scmnd->sc_data_direction != DMA_TO_DEVICE && 629 - scmnd->sc_data_direction != DMA_FROM_DEVICE)) 630 - return; 631 - 632 - /* 633 - * This handling of non-SG commands can be killed when the 634 - * SCSI midlayer no longer generates non-SG commands. 635 - */ 636 - if (likely(scmnd->use_sg)) { 637 - nents = scmnd->use_sg; 638 - scat = scmnd->request_buffer; 639 - } else { 640 - nents = 1; 641 - scat = &req->fake_sg; 642 - } 643 - 644 - dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents, 645 - scmnd->sc_data_direction); 646 - } 647 - 648 - static void srp_remove_req(struct srp_target_port *target, struct srp_request *req, 649 - int index) 650 - { 651 - list_del(&req->list); 652 - req->next = target->req_head; 653 - target->req_head = index; 654 } 655 656 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) ··· 645 req->tsk_status = rsp->data[3]; 646 complete(&req->done); 647 } else { 648 - scmnd = req->scmnd; 649 if (!scmnd) 650 printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n", 651 (unsigned long long) rsp->tag); ··· 663 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) 664 scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt); 665 666 - srp_unmap_data(scmnd, target, req); 667 - 668 if (!req->tsk_mgmt) { 669 - req->scmnd = NULL; 670 scmnd->host_scribble = (void *) -1L; 671 scmnd->scsi_done(scmnd); 672 673 - srp_remove_req(target, req, rsp->tag & ~SRP_TAG_TSK_MGMT); 674 } else 675 req->cmd_done = 1; 676 } ··· 854 struct srp_request *req; 855 struct srp_iu *iu; 856 struct srp_cmd *cmd; 857 - long req_index; 858 int len; 859 860 if (target->state == SRP_TARGET_CONNECTING) ··· 873 dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma, 874 SRP_MAX_IU_LEN, DMA_TO_DEVICE); 875 876 - req_index = target->req_head; 877 878 scmnd->scsi_done = done; 879 scmnd->result = 0; 880 - scmnd->host_scribble = (void *) req_index; 881 882 cmd = iu->buf; 883 memset(cmd, 0, sizeof *cmd); 884 885 cmd->opcode = SRP_CMD; 886 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); 887 - cmd->tag = req_index; 888 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); 889 - 890 - req = &target->req_ring[req_index]; 891 892 req->scmnd = scmnd; 893 req->cmd = iu; ··· 911 goto err_unmap; 912 } 913 914 - target->req_head = req->next; 915 - list_add_tail(&req->list, &target->req_queue); 916 917 return 0; 918 ··· 1134 return 0; 1135 } 1136 1137 - static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func) 1138 { 1139 - struct srp_target_port *target = host_to_target(scmnd->device->host); 1140 - struct srp_request *req; 1141 struct srp_iu *iu; 1142 struct srp_tsk_mgmt *tsk_mgmt; 1143 - int req_index; 1144 - int ret = FAILED; 1145 1146 spin_lock_irq(target->scsi_host->host_lock); 1147 1148 if (target->state == SRP_TARGET_DEAD || 1149 target->state == SRP_TARGET_REMOVED) { 1150 - scmnd->result = DID_BAD_TARGET << 16; 1151 goto out; 1152 } 1153 1154 - if (scmnd->host_scribble == (void *) -1L) 1155 - goto out; 1156 - 1157 - req_index = (long) scmnd->host_scribble; 1158 - printk(KERN_ERR "Abort for req_index %d\n", req_index); 1159 - 1160 - req = &target->req_ring[req_index]; 1161 init_completion(&req->done); 1162 1163 iu = __srp_get_tx_iu(target); ··· 1158 memset(tsk_mgmt, 0, sizeof *tsk_mgmt); 1159 1160 tsk_mgmt->opcode = SRP_TSK_MGMT; 1161 - tsk_mgmt->lun = cpu_to_be64((u64) scmnd->device->lun << 48); 1162 - tsk_mgmt->tag = req_index | SRP_TAG_TSK_MGMT; 1163 tsk_mgmt->tsk_mgmt_func = func; 1164 - tsk_mgmt->task_tag = req_index; 1165 1166 if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) 1167 goto out; ··· 1169 req->tsk_mgmt = iu; 1170 1171 spin_unlock_irq(target->scsi_host->host_lock); 1172 if (!wait_for_completion_timeout(&req->done, 1173 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) 1174 - return FAILED; 1175 - spin_lock_irq(target->scsi_host->host_lock); 1176 1177 - if (req->cmd_done) { 1178 - srp_remove_req(target, req, req_index); 1179 - scmnd->scsi_done(scmnd); 1180 - } else if (!req->tsk_status) { 1181 - srp_remove_req(target, req, req_index); 1182 - scmnd->result = DID_ABORT << 16; 1183 - ret = SUCCESS; 1184 - } 1185 1186 out: 1187 spin_unlock_irq(target->scsi_host->host_lock); 1188 - return ret; 1189 } 1190 1191 static int srp_abort(struct scsi_cmnd *scmnd) 1192 { 1193 printk(KERN_ERR "SRP abort called\n"); 1194 1195 - return srp_send_tsk_mgmt(scmnd, SRP_TSK_ABORT_TASK); 1196 } 1197 1198 static int srp_reset_device(struct scsi_cmnd *scmnd) 1199 { 1200 printk(KERN_ERR "SRP reset_device called\n"); 1201 1202 - return srp_send_tsk_mgmt(scmnd, SRP_TSK_LUN_RESET); 1203 } 1204 1205 static int srp_reset_host(struct scsi_cmnd *scmnd) ··· 1547 1548 INIT_WORK(&target->work, srp_reconnect_work, target); 1549 1550 - for (i = 0; i < SRP_SQ_SIZE - 1; ++i) 1551 - target->req_ring[i].next = i + 1; 1552 - target->req_ring[SRP_SQ_SIZE - 1].next = -1; 1553 INIT_LIST_HEAD(&target->req_queue); 1554 1555 ret = srp_parse_options(buf, target); 1556 if (ret)
··· 409 } 410 } 411 412 + static void srp_unmap_data(struct scsi_cmnd *scmnd, 413 + struct srp_target_port *target, 414 + struct srp_request *req) 415 + { 416 + struct scatterlist *scat; 417 + int nents; 418 + 419 + if (!scmnd->request_buffer || 420 + (scmnd->sc_data_direction != DMA_TO_DEVICE && 421 + scmnd->sc_data_direction != DMA_FROM_DEVICE)) 422 + return; 423 + 424 + /* 425 + * This handling of non-SG commands can be killed when the 426 + * SCSI midlayer no longer generates non-SG commands. 427 + */ 428 + if (likely(scmnd->use_sg)) { 429 + nents = scmnd->use_sg; 430 + scat = scmnd->request_buffer; 431 + } else { 432 + nents = 1; 433 + scat = &req->fake_sg; 434 + } 435 + 436 + dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents, 437 + scmnd->sc_data_direction); 438 + } 439 + 440 static int srp_reconnect_target(struct srp_target_port *target) 441 { 442 struct ib_cm_id *new_cm_id; ··· 455 list_for_each_entry(req, &target->req_queue, list) { 456 req->scmnd->result = DID_RESET << 16; 457 req->scmnd->scsi_done(req->scmnd); 458 + srp_unmap_data(req->scmnd, target, req); 459 } 460 461 target->rx_head = 0; 462 target->tx_head = 0; 463 target->tx_tail = 0; 464 + INIT_LIST_HEAD(&target->free_reqs); 465 INIT_LIST_HEAD(&target->req_queue); 466 + for (i = 0; i < SRP_SQ_SIZE; ++i) 467 + list_add_tail(&target->req_ring[i].list, &target->free_reqs); 468 469 ret = srp_connect_target(target); 470 if (ret) ··· 589 return len; 590 } 591 592 + static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) 593 { 594 + srp_unmap_data(req->scmnd, target, req); 595 + list_move_tail(&req->list, &target->free_reqs); 596 } 597 598 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) ··· 647 req->tsk_status = rsp->data[3]; 648 complete(&req->done); 649 } else { 650 + scmnd = req->scmnd; 651 if (!scmnd) 652 printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n", 653 (unsigned long long) rsp->tag); ··· 665 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) 666 scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt); 667 668 if (!req->tsk_mgmt) { 669 scmnd->host_scribble = (void *) -1L; 670 scmnd->scsi_done(scmnd); 671 672 + srp_remove_req(target, req); 673 } else 674 req->cmd_done = 1; 675 } ··· 859 struct srp_request *req; 860 struct srp_iu *iu; 861 struct srp_cmd *cmd; 862 int len; 863 864 if (target->state == SRP_TARGET_CONNECTING) ··· 879 dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma, 880 SRP_MAX_IU_LEN, DMA_TO_DEVICE); 881 882 + req = list_entry(target->free_reqs.next, struct srp_request, list); 883 884 scmnd->scsi_done = done; 885 scmnd->result = 0; 886 + scmnd->host_scribble = (void *) (long) req->index; 887 888 cmd = iu->buf; 889 memset(cmd, 0, sizeof *cmd); 890 891 cmd->opcode = SRP_CMD; 892 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); 893 + cmd->tag = req->index; 894 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); 895 896 req->scmnd = scmnd; 897 req->cmd = iu; ··· 919 goto err_unmap; 920 } 921 922 + list_move_tail(&req->list, &target->req_queue); 923 924 return 0; 925 ··· 1143 return 0; 1144 } 1145 1146 + static int srp_send_tsk_mgmt(struct srp_target_port *target, 1147 + struct srp_request *req, u8 func) 1148 { 1149 struct srp_iu *iu; 1150 struct srp_tsk_mgmt *tsk_mgmt; 1151 1152 spin_lock_irq(target->scsi_host->host_lock); 1153 1154 if (target->state == SRP_TARGET_DEAD || 1155 target->state == SRP_TARGET_REMOVED) { 1156 + req->scmnd->result = DID_BAD_TARGET << 16; 1157 goto out; 1158 } 1159 1160 init_completion(&req->done); 1161 1162 iu = __srp_get_tx_iu(target); ··· 1177 memset(tsk_mgmt, 0, sizeof *tsk_mgmt); 1178 1179 tsk_mgmt->opcode = SRP_TSK_MGMT; 1180 + tsk_mgmt->lun = cpu_to_be64((u64) req->scmnd->device->lun << 48); 1181 + tsk_mgmt->tag = req->index | SRP_TAG_TSK_MGMT; 1182 tsk_mgmt->tsk_mgmt_func = func; 1183 + tsk_mgmt->task_tag = req->index; 1184 1185 if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) 1186 goto out; ··· 1188 req->tsk_mgmt = iu; 1189 1190 spin_unlock_irq(target->scsi_host->host_lock); 1191 + 1192 if (!wait_for_completion_timeout(&req->done, 1193 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) 1194 + return -1; 1195 1196 + return 0; 1197 1198 out: 1199 spin_unlock_irq(target->scsi_host->host_lock); 1200 + return -1; 1201 + } 1202 + 1203 + static int srp_find_req(struct srp_target_port *target, 1204 + struct scsi_cmnd *scmnd, 1205 + struct srp_request **req) 1206 + { 1207 + if (scmnd->host_scribble == (void *) -1L) 1208 + return -1; 1209 + 1210 + *req = &target->req_ring[(long) scmnd->host_scribble]; 1211 + 1212 + return 0; 1213 } 1214 1215 static int srp_abort(struct scsi_cmnd *scmnd) 1216 { 1217 + struct srp_target_port *target = host_to_target(scmnd->device->host); 1218 + struct srp_request *req; 1219 + int ret = SUCCESS; 1220 + 1221 printk(KERN_ERR "SRP abort called\n"); 1222 1223 + if (srp_find_req(target, scmnd, &req)) 1224 + return FAILED; 1225 + if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK)) 1226 + return FAILED; 1227 + 1228 + spin_lock_irq(target->scsi_host->host_lock); 1229 + 1230 + if (req->cmd_done) { 1231 + srp_remove_req(target, req); 1232 + scmnd->scsi_done(scmnd); 1233 + } else if (!req->tsk_status) { 1234 + srp_remove_req(target, req); 1235 + scmnd->result = DID_ABORT << 16; 1236 + } else 1237 + ret = FAILED; 1238 + 1239 + spin_unlock_irq(target->scsi_host->host_lock); 1240 + 1241 + return ret; 1242 } 1243 1244 static int srp_reset_device(struct scsi_cmnd *scmnd) 1245 { 1246 + struct srp_target_port *target = host_to_target(scmnd->device->host); 1247 + struct srp_request *req, *tmp; 1248 + 1249 printk(KERN_ERR "SRP reset_device called\n"); 1250 1251 + if (srp_find_req(target, scmnd, &req)) 1252 + return FAILED; 1253 + if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET)) 1254 + return FAILED; 1255 + if (req->tsk_status) 1256 + return FAILED; 1257 + 1258 + spin_lock_irq(target->scsi_host->host_lock); 1259 + 1260 + list_for_each_entry_safe(req, tmp, &target->req_queue, list) 1261 + if (req->scmnd->device == scmnd->device) { 1262 + req->scmnd->result = DID_RESET << 16; 1263 + scmnd->scsi_done(scmnd); 1264 + srp_remove_req(target, req); 1265 + } 1266 + 1267 + spin_unlock_irq(target->scsi_host->host_lock); 1268 + 1269 + return SUCCESS; 1270 } 1271 1272 static int srp_reset_host(struct scsi_cmnd *scmnd) ··· 1518 1519 INIT_WORK(&target->work, srp_reconnect_work, target); 1520 1521 + INIT_LIST_HEAD(&target->free_reqs); 1522 INIT_LIST_HEAD(&target->req_queue); 1523 + for (i = 0; i < SRP_SQ_SIZE; ++i) { 1524 + target->req_ring[i].index = i; 1525 + list_add_tail(&target->req_ring[i].list, &target->free_reqs); 1526 + } 1527 1528 ret = srp_parse_options(buf, target); 1529 if (ret)
+2 -2
drivers/infiniband/ulp/srp/ib_srp.h
··· 101 */ 102 struct scatterlist fake_sg; 103 struct completion done; 104 - short next; 105 u8 cmd_done; 106 u8 tsk_status; 107 }; ··· 133 unsigned tx_tail; 134 struct srp_iu *tx_ring[SRP_SQ_SIZE + 1]; 135 136 - int req_head; 137 struct list_head req_queue; 138 struct srp_request req_ring[SRP_SQ_SIZE]; 139
··· 101 */ 102 struct scatterlist fake_sg; 103 struct completion done; 104 + short index; 105 u8 cmd_done; 106 u8 tsk_status; 107 }; ··· 133 unsigned tx_tail; 134 struct srp_iu *tx_ring[SRP_SQ_SIZE + 1]; 135 136 + struct list_head free_reqs; 137 struct list_head req_queue; 138 struct srp_request req_ring[SRP_SQ_SIZE]; 139