Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
RDMA/iwcm: Don't access a cm_id after dropping reference
IB/iser: Handle iser_device allocation error gracefully
IB/iser: Fix list iteration bug
RDMA/cxgb3: Fix iwch_create_cq() off-by-one error
RDMA/cxgb3: Return correct max_inline_data when creating a QP
IB/fmr_pool: Flush all dirty FMRs from ib_fmr_pool_flush()
Revert "IB/fmr_pool: ib_fmr_pool_flush() should flush all dirty FMRs"
IB/cm: Flush workqueue when removing device
MAINTAINERS: update ipath owner

+56 -44
+1 -1
MAINTAINERS
··· 2156 2156 S: Maintained 2157 2157 2158 2158 IPATH DRIVER: 2159 - P: Arthur Jones 2159 + P: Ralph Campbell 2160 2160 M: infinipath@qlogic.com 2161 2161 L: general@lists.openfabrics.org 2162 2162 T: git git://git.qlogic.com/ipath-linux-2.6
+2 -1
drivers/infiniband/core/cm.c
··· 3759 3759 port = cm_dev->port[i-1]; 3760 3760 ib_modify_port(device, port->port_num, 0, &port_modify); 3761 3761 ib_unregister_mad_agent(port->mad_agent); 3762 + flush_workqueue(cm.wq); 3762 3763 cm_remove_port_fs(port); 3763 3764 } 3764 3765 kobject_put(&cm_dev->dev_obj); ··· 3814 3813 cancel_delayed_work(&timewait_info->work.work); 3815 3814 spin_unlock_irq(&cm.lock); 3816 3815 3816 + ib_unregister_client(&cm_client); 3817 3817 destroy_workqueue(cm.wq); 3818 3818 3819 3819 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) { ··· 3822 3820 kfree(timewait_info); 3823 3821 } 3824 3822 3825 - ib_unregister_client(&cm_client); 3826 3823 class_unregister(&cm_class); 3827 3824 idr_destroy(&cm.local_id_table); 3828 3825 }
+22 -16
drivers/infiniband/core/fmr_pool.c
··· 139 139 static void ib_fmr_batch_release(struct ib_fmr_pool *pool) 140 140 { 141 141 int ret; 142 - struct ib_pool_fmr *fmr, *next; 142 + struct ib_pool_fmr *fmr; 143 143 LIST_HEAD(unmap_list); 144 144 LIST_HEAD(fmr_list); 145 145 ··· 156 156 fmr, fmr->ref_count); 157 157 } 158 158 #endif 159 - } 160 - 161 - /* 162 - * The free_list may hold FMRs that have been put there 163 - * because they haven't reached the max_remap count. 164 - * Invalidate their mapping as well. 165 - */ 166 - list_for_each_entry_safe(fmr, next, &pool->free_list, list) { 167 - if (fmr->remap_count == 0) 168 - continue; 169 - hlist_del_init(&fmr->cache_node); 170 - fmr->remap_count = 0; 171 - list_add_tail(&fmr->fmr->list, &fmr_list); 172 - list_move(&fmr->list, &unmap_list); 173 159 } 174 160 175 161 list_splice(&pool->dirty_list, &unmap_list); ··· 370 384 371 385 i = 0; 372 386 list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) { 387 + if (fmr->remap_count) { 388 + INIT_LIST_HEAD(&fmr_list); 389 + list_add_tail(&fmr->fmr->list, &fmr_list); 390 + ib_unmap_fmr(&fmr_list); 391 + } 373 392 ib_dealloc_fmr(fmr->fmr); 374 393 list_del(&fmr->list); 375 394 kfree(fmr); ··· 398 407 */ 399 408 int ib_flush_fmr_pool(struct ib_fmr_pool *pool) 400 409 { 401 - int serial = atomic_inc_return(&pool->req_ser); 410 + int serial; 411 + struct ib_pool_fmr *fmr, *next; 402 412 413 + /* 414 + * The free_list holds FMRs that may have been used 415 + * but have not been remapped enough times to be dirty. 416 + * Put them on the dirty list now so that the cleanup 417 + * thread will reap them too. 418 + */ 419 + spin_lock_irq(&pool->pool_lock); 420 + list_for_each_entry_safe(fmr, next, &pool->free_list, list) { 421 + if (fmr->remap_count > 0) 422 + list_move(&fmr->list, &pool->dirty_list); 423 + } 424 + spin_unlock_irq(&pool->pool_lock); 425 + 426 + serial = atomic_inc_return(&pool->req_ser); 403 427 wake_up_process(pool->thread); 404 428 405 429 if (wait_event_interruptible(pool->force_wait,
+3 -2
drivers/infiniband/core/iwcm.c
··· 839 839 unsigned long flags; 840 840 int empty; 841 841 int ret = 0; 842 + int destroy_id; 842 843 843 844 spin_lock_irqsave(&cm_id_priv->lock, flags); 844 845 empty = list_empty(&cm_id_priv->work_list); ··· 858 857 destroy_cm_id(&cm_id_priv->id); 859 858 } 860 859 BUG_ON(atomic_read(&cm_id_priv->refcount)==0); 860 + destroy_id = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); 861 861 if (iwcm_deref_id(cm_id_priv)) { 862 - if (test_bit(IWCM_F_CALLBACK_DESTROY, 863 - &cm_id_priv->flags)) { 862 + if (destroy_id) { 864 863 BUG_ON(!list_empty(&cm_id_priv->work_list)); 865 864 free_cm_id(cm_id_priv); 866 865 }
+4 -1
drivers/infiniband/hw/cxgb3/iwch_provider.c
··· 189 189 return ERR_PTR(-ENOMEM); 190 190 } 191 191 chp->rhp = rhp; 192 - chp->ibcq.cqe = (1 << chp->cq.size_log2) - 1; 192 + chp->ibcq.cqe = 1 << chp->cq.size_log2; 193 193 spin_lock_init(&chp->lock); 194 194 atomic_set(&chp->refcnt, 1); 195 195 init_waitqueue_head(&chp->wait); ··· 819 819 kfree(qhp); 820 820 return ERR_PTR(-ENOMEM); 821 821 } 822 + 822 823 attrs->cap.max_recv_wr = rqsize - 1; 823 824 attrs->cap.max_send_wr = sqsize; 825 + attrs->cap.max_inline_data = T3_MAX_INLINE; 826 + 824 827 qhp->rhp = rhp; 825 828 qhp->attr.pd = php->pdid; 826 829 qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
+24 -23
drivers/infiniband/ulp/iser/iser_verbs.c
··· 237 237 static 238 238 struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id) 239 239 { 240 - struct list_head *p_list; 241 - struct iser_device *device = NULL; 240 + struct iser_device *device; 242 241 243 242 mutex_lock(&ig.device_list_mutex); 244 243 245 - p_list = ig.device_list.next; 246 - while (p_list != &ig.device_list) { 247 - device = list_entry(p_list, struct iser_device, ig_list); 244 + list_for_each_entry(device, &ig.device_list, ig_list) 248 245 /* find if there's a match using the node GUID */ 249 246 if (device->ib_device->node_guid == cma_id->device->node_guid) 250 - break; 251 - } 247 + goto inc_refcnt; 252 248 253 - if (device == NULL) { 254 - device = kzalloc(sizeof *device, GFP_KERNEL); 255 - if (device == NULL) 256 - goto out; 257 - /* assign this device to the device */ 258 - device->ib_device = cma_id->device; 259 - /* init the device and link it into ig device list */ 260 - if (iser_create_device_ib_res(device)) { 261 - kfree(device); 262 - device = NULL; 263 - goto out; 264 - } 265 - list_add(&device->ig_list, &ig.device_list); 249 + device = kzalloc(sizeof *device, GFP_KERNEL); 250 + if (device == NULL) 251 + goto out; 252 + 253 + /* assign this device to the device */ 254 + device->ib_device = cma_id->device; 255 + /* init the device and link it into ig device list */ 256 + if (iser_create_device_ib_res(device)) { 257 + kfree(device); 258 + device = NULL; 259 + goto out; 266 260 } 267 - out: 268 - BUG_ON(device == NULL); 261 + list_add(&device->ig_list, &ig.device_list); 262 + 263 + inc_refcnt: 269 264 device->refcount++; 265 + out: 270 266 mutex_unlock(&ig.device_list_mutex); 271 267 return device; 272 268 } ··· 368 372 int ret; 369 373 370 374 device = iser_device_find_by_ib_device(cma_id); 375 + if (!device) { 376 + iser_err("device lookup/creation failed\n"); 377 + iser_connect_error(cma_id); 378 + return; 379 + } 380 + 371 381 ib_conn = (struct iser_conn *)cma_id->context; 372 382 ib_conn->device = device; 373 383 ··· 382 380 iser_err("resolve route failed: %d\n", ret); 383 381 iser_connect_error(cma_id); 384 382 } 385 - return; 386 383 } 387 384 388 385 static void iser_route_handler(struct rdma_cm_id *cma_id)