Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
RDMA/iwcm: Don't access a cm_id after dropping reference
IB/iser: Handle iser_device allocation error gracefully
IB/iser: Fix list iteration bug
RDMA/cxgb3: Fix iwch_create_cq() off-by-one error
RDMA/cxgb3: Return correct max_inline_data when creating a QP
IB/fmr_pool: Flush all dirty FMRs from ib_fmr_pool_flush()
Revert "IB/fmr_pool: ib_fmr_pool_flush() should flush all dirty FMRs"
IB/cm: Flush workqueue when removing device
MAINTAINERS: update ipath owner

+56 -44
+1 -1
MAINTAINERS
··· 2156 S: Maintained 2157 2158 IPATH DRIVER: 2159 - P: Arthur Jones 2160 M: infinipath@qlogic.com 2161 L: general@lists.openfabrics.org 2162 T: git git://git.qlogic.com/ipath-linux-2.6
··· 2156 S: Maintained 2157 2158 IPATH DRIVER: 2159 + P: Ralph Campbell 2160 M: infinipath@qlogic.com 2161 L: general@lists.openfabrics.org 2162 T: git git://git.qlogic.com/ipath-linux-2.6
+2 -1
drivers/infiniband/core/cm.c
··· 3759 port = cm_dev->port[i-1]; 3760 ib_modify_port(device, port->port_num, 0, &port_modify); 3761 ib_unregister_mad_agent(port->mad_agent); 3762 cm_remove_port_fs(port); 3763 } 3764 kobject_put(&cm_dev->dev_obj); ··· 3814 cancel_delayed_work(&timewait_info->work.work); 3815 spin_unlock_irq(&cm.lock); 3816 3817 destroy_workqueue(cm.wq); 3818 3819 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) { ··· 3822 kfree(timewait_info); 3823 } 3824 3825 - ib_unregister_client(&cm_client); 3826 class_unregister(&cm_class); 3827 idr_destroy(&cm.local_id_table); 3828 }
··· 3759 port = cm_dev->port[i-1]; 3760 ib_modify_port(device, port->port_num, 0, &port_modify); 3761 ib_unregister_mad_agent(port->mad_agent); 3762 + flush_workqueue(cm.wq); 3763 cm_remove_port_fs(port); 3764 } 3765 kobject_put(&cm_dev->dev_obj); ··· 3813 cancel_delayed_work(&timewait_info->work.work); 3814 spin_unlock_irq(&cm.lock); 3815 3816 + ib_unregister_client(&cm_client); 3817 destroy_workqueue(cm.wq); 3818 3819 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) { ··· 3820 kfree(timewait_info); 3821 } 3822 3823 class_unregister(&cm_class); 3824 idr_destroy(&cm.local_id_table); 3825 }
+22 -16
drivers/infiniband/core/fmr_pool.c
··· 139 static void ib_fmr_batch_release(struct ib_fmr_pool *pool) 140 { 141 int ret; 142 - struct ib_pool_fmr *fmr, *next; 143 LIST_HEAD(unmap_list); 144 LIST_HEAD(fmr_list); 145 ··· 156 fmr, fmr->ref_count); 157 } 158 #endif 159 - } 160 - 161 - /* 162 - * The free_list may hold FMRs that have been put there 163 - * because they haven't reached the max_remap count. 164 - * Invalidate their mapping as well. 165 - */ 166 - list_for_each_entry_safe(fmr, next, &pool->free_list, list) { 167 - if (fmr->remap_count == 0) 168 - continue; 169 - hlist_del_init(&fmr->cache_node); 170 - fmr->remap_count = 0; 171 - list_add_tail(&fmr->fmr->list, &fmr_list); 172 - list_move(&fmr->list, &unmap_list); 173 } 174 175 list_splice(&pool->dirty_list, &unmap_list); ··· 370 371 i = 0; 372 list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) { 373 ib_dealloc_fmr(fmr->fmr); 374 list_del(&fmr->list); 375 kfree(fmr); ··· 398 */ 399 int ib_flush_fmr_pool(struct ib_fmr_pool *pool) 400 { 401 - int serial = atomic_inc_return(&pool->req_ser); 402 403 wake_up_process(pool->thread); 404 405 if (wait_event_interruptible(pool->force_wait,
··· 139 static void ib_fmr_batch_release(struct ib_fmr_pool *pool) 140 { 141 int ret; 142 + struct ib_pool_fmr *fmr; 143 LIST_HEAD(unmap_list); 144 LIST_HEAD(fmr_list); 145 ··· 156 fmr, fmr->ref_count); 157 } 158 #endif 159 } 160 161 list_splice(&pool->dirty_list, &unmap_list); ··· 384 385 i = 0; 386 list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) { 387 + if (fmr->remap_count) { 388 + INIT_LIST_HEAD(&fmr_list); 389 + list_add_tail(&fmr->fmr->list, &fmr_list); 390 + ib_unmap_fmr(&fmr_list); 391 + } 392 ib_dealloc_fmr(fmr->fmr); 393 list_del(&fmr->list); 394 kfree(fmr); ··· 407 */ 408 int ib_flush_fmr_pool(struct ib_fmr_pool *pool) 409 { 410 + int serial; 411 + struct ib_pool_fmr *fmr, *next; 412 413 + /* 414 + * The free_list holds FMRs that may have been used 415 + * but have not been remapped enough times to be dirty. 416 + * Put them on the dirty list now so that the cleanup 417 + * thread will reap them too. 418 + */ 419 + spin_lock_irq(&pool->pool_lock); 420 + list_for_each_entry_safe(fmr, next, &pool->free_list, list) { 421 + if (fmr->remap_count > 0) 422 + list_move(&fmr->list, &pool->dirty_list); 423 + } 424 + spin_unlock_irq(&pool->pool_lock); 425 + 426 + serial = atomic_inc_return(&pool->req_ser); 427 wake_up_process(pool->thread); 428 429 if (wait_event_interruptible(pool->force_wait,
+3 -2
drivers/infiniband/core/iwcm.c
··· 839 unsigned long flags; 840 int empty; 841 int ret = 0; 842 843 spin_lock_irqsave(&cm_id_priv->lock, flags); 844 empty = list_empty(&cm_id_priv->work_list); ··· 858 destroy_cm_id(&cm_id_priv->id); 859 } 860 BUG_ON(atomic_read(&cm_id_priv->refcount)==0); 861 if (iwcm_deref_id(cm_id_priv)) { 862 - if (test_bit(IWCM_F_CALLBACK_DESTROY, 863 - &cm_id_priv->flags)) { 864 BUG_ON(!list_empty(&cm_id_priv->work_list)); 865 free_cm_id(cm_id_priv); 866 }
··· 839 unsigned long flags; 840 int empty; 841 int ret = 0; 842 + int destroy_id; 843 844 spin_lock_irqsave(&cm_id_priv->lock, flags); 845 empty = list_empty(&cm_id_priv->work_list); ··· 857 destroy_cm_id(&cm_id_priv->id); 858 } 859 BUG_ON(atomic_read(&cm_id_priv->refcount)==0); 860 + destroy_id = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); 861 if (iwcm_deref_id(cm_id_priv)) { 862 + if (destroy_id) { 863 BUG_ON(!list_empty(&cm_id_priv->work_list)); 864 free_cm_id(cm_id_priv); 865 }
+4 -1
drivers/infiniband/hw/cxgb3/iwch_provider.c
··· 189 return ERR_PTR(-ENOMEM); 190 } 191 chp->rhp = rhp; 192 - chp->ibcq.cqe = (1 << chp->cq.size_log2) - 1; 193 spin_lock_init(&chp->lock); 194 atomic_set(&chp->refcnt, 1); 195 init_waitqueue_head(&chp->wait); ··· 819 kfree(qhp); 820 return ERR_PTR(-ENOMEM); 821 } 822 attrs->cap.max_recv_wr = rqsize - 1; 823 attrs->cap.max_send_wr = sqsize; 824 qhp->rhp = rhp; 825 qhp->attr.pd = php->pdid; 826 qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
··· 189 return ERR_PTR(-ENOMEM); 190 } 191 chp->rhp = rhp; 192 + chp->ibcq.cqe = 1 << chp->cq.size_log2; 193 spin_lock_init(&chp->lock); 194 atomic_set(&chp->refcnt, 1); 195 init_waitqueue_head(&chp->wait); ··· 819 kfree(qhp); 820 return ERR_PTR(-ENOMEM); 821 } 822 + 823 attrs->cap.max_recv_wr = rqsize - 1; 824 attrs->cap.max_send_wr = sqsize; 825 + attrs->cap.max_inline_data = T3_MAX_INLINE; 826 + 827 qhp->rhp = rhp; 828 qhp->attr.pd = php->pdid; 829 qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
+24 -23
drivers/infiniband/ulp/iser/iser_verbs.c
··· 237 static 238 struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id) 239 { 240 - struct list_head *p_list; 241 - struct iser_device *device = NULL; 242 243 mutex_lock(&ig.device_list_mutex); 244 245 - p_list = ig.device_list.next; 246 - while (p_list != &ig.device_list) { 247 - device = list_entry(p_list, struct iser_device, ig_list); 248 /* find if there's a match using the node GUID */ 249 if (device->ib_device->node_guid == cma_id->device->node_guid) 250 - break; 251 - } 252 253 - if (device == NULL) { 254 - device = kzalloc(sizeof *device, GFP_KERNEL); 255 - if (device == NULL) 256 - goto out; 257 - /* assign this device to the device */ 258 - device->ib_device = cma_id->device; 259 - /* init the device and link it into ig device list */ 260 - if (iser_create_device_ib_res(device)) { 261 - kfree(device); 262 - device = NULL; 263 - goto out; 264 - } 265 - list_add(&device->ig_list, &ig.device_list); 266 } 267 - out: 268 - BUG_ON(device == NULL); 269 device->refcount++; 270 mutex_unlock(&ig.device_list_mutex); 271 return device; 272 } ··· 368 int ret; 369 370 device = iser_device_find_by_ib_device(cma_id); 371 ib_conn = (struct iser_conn *)cma_id->context; 372 ib_conn->device = device; 373 ··· 382 iser_err("resolve route failed: %d\n", ret); 383 iser_connect_error(cma_id); 384 } 385 - return; 386 } 387 388 static void iser_route_handler(struct rdma_cm_id *cma_id)
··· 237 static 238 struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id) 239 { 240 + struct iser_device *device; 241 242 mutex_lock(&ig.device_list_mutex); 243 244 + list_for_each_entry(device, &ig.device_list, ig_list) 245 /* find if there's a match using the node GUID */ 246 if (device->ib_device->node_guid == cma_id->device->node_guid) 247 + goto inc_refcnt; 248 249 + device = kzalloc(sizeof *device, GFP_KERNEL); 250 + if (device == NULL) 251 + goto out; 252 + 253 + /* assign this device to the device */ 254 + device->ib_device = cma_id->device; 255 + /* init the device and link it into ig device list */ 256 + if (iser_create_device_ib_res(device)) { 257 + kfree(device); 258 + device = NULL; 259 + goto out; 260 } 261 + list_add(&device->ig_list, &ig.device_list); 262 + 263 + inc_refcnt: 264 device->refcount++; 265 + out: 266 mutex_unlock(&ig.device_list_mutex); 267 return device; 268 } ··· 372 int ret; 373 374 device = iser_device_find_by_ib_device(cma_id); 375 + if (!device) { 376 + iser_err("device lookup/creation failed\n"); 377 + iser_connect_error(cma_id); 378 + return; 379 + } 380 + 381 ib_conn = (struct iser_conn *)cma_id->context; 382 ib_conn->device = device; 383 ··· 380 iser_err("resolve route failed: %d\n", ret); 381 iser_connect_error(cma_id); 382 } 383 } 384 385 static void iser_route_handler(struct rdma_cm_id *cma_id)