Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband:
IB/mad: Fix race between cancel and receive completion
RDMA/amso1100: Fix && typo
RDMA/amso1100: Fix unitialized pseudo_netdev accessed in c2_register_device
IB/ehca: Activate scaling code by default
IB/ehca: Use named constant for max mtu
IB/ehca: Assure 4K alignment for firmware control blocks

+111 -61
+1 -1
drivers/infiniband/core/mad.c
··· 1750 */ 1751 (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) || 1752 rcv_has_same_gid(mad_agent_priv, wr, wc))) 1753 - return wr; 1754 } 1755 1756 /*
··· 1750 */ 1751 (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) || 1752 rcv_has_same_gid(mad_agent_priv, wr, wc))) 1753 + return (wr->status == IB_WC_SUCCESS) ? wr : NULL; 1754 } 1755 1756 /*
+2 -1
drivers/infiniband/hw/amso1100/c2.c
··· 1155 goto bail10; 1156 } 1157 1158 - c2_register_device(c2dev); 1159 1160 return 0; 1161
··· 1155 goto bail10; 1156 } 1157 1158 + if (c2_register_device(c2dev)) 1159 + goto bail10; 1160 1161 return 0; 1162
+20 -19
drivers/infiniband/hw/amso1100/c2_provider.c
··· 757 758 int c2_register_device(struct c2_dev *dev) 759 { 760 - int ret; 761 int i; 762 763 /* Register pseudo network device */ 764 dev->pseudo_netdev = c2_pseudo_netdev_init(dev); 765 - if (dev->pseudo_netdev) { 766 - ret = register_netdev(dev->pseudo_netdev); 767 - if (ret) { 768 - printk(KERN_ERR PFX 769 - "Unable to register netdev, ret = %d\n", ret); 770 - free_netdev(dev->pseudo_netdev); 771 - return ret; 772 - } 773 - } 774 775 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 776 strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX); ··· 845 846 ret = ib_register_device(&dev->ibdev); 847 if (ret) 848 - return ret; 849 850 for (i = 0; i < ARRAY_SIZE(c2_class_attributes); ++i) { 851 ret = class_device_create_file(&dev->ibdev.class_dev, 852 c2_class_attributes[i]); 853 - if (ret) { 854 - unregister_netdev(dev->pseudo_netdev); 855 - free_netdev(dev->pseudo_netdev); 856 - ib_unregister_device(&dev->ibdev); 857 - return ret; 858 - } 859 } 860 861 - pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 862 - return 0; 863 } 864 865 void c2_unregister_device(struct c2_dev *dev)
··· 757 758 int c2_register_device(struct c2_dev *dev) 759 { 760 + int ret = -ENOMEM; 761 int i; 762 763 /* Register pseudo network device */ 764 dev->pseudo_netdev = c2_pseudo_netdev_init(dev); 765 + if (!dev->pseudo_netdev) 766 + goto out3; 767 + 768 + ret = register_netdev(dev->pseudo_netdev); 769 + if (ret) 770 + goto out2; 771 772 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 773 strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX); ··· 848 849 ret = ib_register_device(&dev->ibdev); 850 if (ret) 851 + goto out1; 852 853 for (i = 0; i < ARRAY_SIZE(c2_class_attributes); ++i) { 854 ret = class_device_create_file(&dev->ibdev.class_dev, 855 c2_class_attributes[i]); 856 + if (ret) 857 + goto out0; 858 } 859 + goto out3; 860 861 + out0: 862 + ib_unregister_device(&dev->ibdev); 863 + out1: 864 + unregister_netdev(dev->pseudo_netdev); 865 + out2: 866 + free_netdev(dev->pseudo_netdev); 867 + out3: 868 + pr_debug("%s:%u ret=%d\n", __FUNCTION__, __LINE__, ret); 869 + return ret; 870 } 871 872 void c2_unregister_device(struct c2_dev *dev)
+2 -2
drivers/infiniband/hw/amso1100/c2_rnic.c
··· 157 158 props->fw_ver = 159 ((u64)be32_to_cpu(reply->fw_ver_major) << 32) | 160 - ((be32_to_cpu(reply->fw_ver_minor) && 0xFFFF) << 16) | 161 - (be32_to_cpu(reply->fw_ver_patch) && 0xFFFF); 162 memcpy(&props->sys_image_guid, c2dev->netdev->dev_addr, 6); 163 props->max_mr_size = 0xFFFFFFFF; 164 props->page_size_cap = ~(C2_MIN_PAGESIZE-1);
··· 157 158 props->fw_ver = 159 ((u64)be32_to_cpu(reply->fw_ver_major) << 32) | 160 + ((be32_to_cpu(reply->fw_ver_minor) & 0xFFFF) << 16) | 161 + (be32_to_cpu(reply->fw_ver_patch) & 0xFFFF); 162 memcpy(&props->sys_image_guid, c2dev->netdev->dev_addr, 6); 163 props->max_mr_size = 0xFFFFFFFF; 164 props->page_size_cap = ~(C2_MIN_PAGESIZE-1);
+1
drivers/infiniband/hw/ehca/Kconfig
··· 10 config INFINIBAND_EHCA_SCALING 11 bool "Scaling support (EXPERIMENTAL)" 12 depends on IBMEBUS && INFINIBAND_EHCA && HOTPLUG_CPU && EXPERIMENTAL 13 ---help--- 14 eHCA scaling support schedules the CQ callbacks to different CPUs. 15
··· 10 config INFINIBAND_EHCA_SCALING 11 bool "Scaling support (EXPERIMENTAL)" 12 depends on IBMEBUS && INFINIBAND_EHCA && HOTPLUG_CPU && EXPERIMENTAL 13 + default y 14 ---help--- 15 eHCA scaling support schedules the CQ callbacks to different CPUs. 16
+2 -3
drivers/infiniband/hw/ehca/ehca_av.c
··· 118 } 119 memcpy(&av->av.grh.word_1, &gid, sizeof(gid)); 120 } 121 - /* for the time being we use a hard coded PMTU of 2048 Bytes */ 122 - av->av.pmtu = 4; 123 124 /* dgid comes in grh.word_3 */ 125 memcpy(&av->av.grh.word_3, &ah_attr->grh.dgid, ··· 192 memcpy(&new_ehca_av.grh.word_1, &gid, sizeof(gid)); 193 } 194 195 - new_ehca_av.pmtu = 4; /* see also comment in create_ah() */ 196 197 memcpy(&new_ehca_av.grh.word_3, &ah_attr->grh.dgid, 198 sizeof(ah_attr->grh.dgid));
··· 118 } 119 memcpy(&av->av.grh.word_1, &gid, sizeof(gid)); 120 } 121 + av->av.pmtu = EHCA_MAX_MTU; 122 123 /* dgid comes in grh.word_3 */ 124 memcpy(&av->av.grh.word_3, &ah_attr->grh.dgid, ··· 193 memcpy(&new_ehca_av.grh.word_1, &gid, sizeof(gid)); 194 } 195 196 + new_ehca_av.pmtu = EHCA_MAX_MTU; 197 198 memcpy(&new_ehca_av.grh.word_3, &ah_attr->grh.dgid, 199 sizeof(ah_attr->grh.dgid));
+9 -8
drivers/infiniband/hw/ehca/ehca_hca.c
··· 40 */ 41 42 #include "ehca_tools.h" 43 #include "hcp_if.h" 44 45 int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props) ··· 50 ib_device); 51 struct hipz_query_hca *rblock; 52 53 - rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 54 if (!rblock) { 55 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 56 return -ENOMEM; ··· 97 = min_t(int, rblock->max_total_mcast_qp_attach, INT_MAX); 98 99 query_device1: 100 - kfree(rblock); 101 102 return ret; 103 } ··· 110 ib_device); 111 struct hipz_query_port *rblock; 112 113 - rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 114 if (!rblock) { 115 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 116 return -ENOMEM; ··· 163 props->active_speed = 0x1; 164 165 query_port1: 166 - kfree(rblock); 167 168 return ret; 169 } ··· 179 return -EINVAL; 180 } 181 182 - rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 183 if (!rblock) { 184 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 185 return -ENOMEM; ··· 194 memcpy(pkey, &rblock->pkey_entries + index, sizeof(u16)); 195 196 query_pkey1: 197 - kfree(rblock); 198 199 return ret; 200 } ··· 212 return -EINVAL; 213 } 214 215 - rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 216 if (!rblock) { 217 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 218 return -ENOMEM; ··· 228 memcpy(&gid->raw[8], &rblock->guid_entries[index], sizeof(u64)); 229 230 query_gid1: 231 - kfree(rblock); 232 233 return ret; 234 }
··· 40 */ 41 42 #include "ehca_tools.h" 43 + #include "ehca_iverbs.h" 44 #include "hcp_if.h" 45 46 int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props) ··· 49 ib_device); 50 struct hipz_query_hca *rblock; 51 52 + rblock = ehca_alloc_fw_ctrlblock(); 53 if (!rblock) { 54 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 55 return -ENOMEM; ··· 96 = min_t(int, rblock->max_total_mcast_qp_attach, INT_MAX); 97 98 query_device1: 99 + ehca_free_fw_ctrlblock(rblock); 100 101 return ret; 102 } ··· 109 ib_device); 110 struct hipz_query_port *rblock; 111 112 + rblock = ehca_alloc_fw_ctrlblock(); 113 if (!rblock) { 114 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 115 return -ENOMEM; ··· 162 props->active_speed = 0x1; 163 164 query_port1: 165 + ehca_free_fw_ctrlblock(rblock); 166 167 return ret; 168 } ··· 178 return -EINVAL; 179 } 180 181 + rblock = ehca_alloc_fw_ctrlblock(); 182 if (!rblock) { 183 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 184 return -ENOMEM; ··· 193 memcpy(pkey, &rblock->pkey_entries + index, sizeof(u16)); 194 195 query_pkey1: 196 + ehca_free_fw_ctrlblock(rblock); 197 198 return ret; 199 } ··· 211 return -EINVAL; 212 } 213 214 + rblock = ehca_alloc_fw_ctrlblock(); 215 if (!rblock) { 216 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 217 return -ENOMEM; ··· 227 memcpy(&gid->raw[8], &rblock->guid_entries[index], sizeof(u64)); 228 229 query_gid1: 230 + ehca_free_fw_ctrlblock(rblock); 231 232 return ret; 233 }
+8 -9
drivers/infiniband/hw/ehca/ehca_irq.c
··· 45 #include "ehca_tools.h" 46 #include "hcp_if.h" 47 #include "hipz_fns.h" 48 49 #define EQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1) 50 #define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM(8,31) ··· 138 u64 *rblock; 139 unsigned long block_count; 140 141 - rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 142 if (!rblock) { 143 ehca_err(&shca->ib_device, "Cannot allocate rblock memory."); 144 ret = -ENOMEM; 145 goto error_data1; 146 } 147 148 ret = hipz_h_error_data(shca->ipz_hca_handle, 149 resource, 150 rblock, 151 &block_count); 152 153 - if (ret == H_R_STATE) { 154 ehca_err(&shca->ib_device, 155 "No error data is available: %lx.", resource); 156 - } 157 else if (ret == H_SUCCESS) { 158 int length; 159 160 length = EHCA_BMASK_GET(ERROR_DATA_LENGTH, rblock[0]); 161 162 - if (length > PAGE_SIZE) 163 - length = PAGE_SIZE; 164 165 print_error_data(shca, data, rblock, length); 166 - } 167 - else { 168 ehca_err(&shca->ib_device, 169 "Error data could not be fetched: %lx", resource); 170 - } 171 172 - kfree(rblock); 173 174 error_data1: 175 return ret;
··· 45 #include "ehca_tools.h" 46 #include "hcp_if.h" 47 #include "hipz_fns.h" 48 + #include "ipz_pt_fn.h" 49 50 #define EQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1) 51 #define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM(8,31) ··· 137 u64 *rblock; 138 unsigned long block_count; 139 140 + rblock = ehca_alloc_fw_ctrlblock(); 141 if (!rblock) { 142 ehca_err(&shca->ib_device, "Cannot allocate rblock memory."); 143 ret = -ENOMEM; 144 goto error_data1; 145 } 146 147 + /* rblock must be 4K aligned and should be 4K large */ 148 ret = hipz_h_error_data(shca->ipz_hca_handle, 149 resource, 150 rblock, 151 &block_count); 152 153 + if (ret == H_R_STATE) 154 ehca_err(&shca->ib_device, 155 "No error data is available: %lx.", resource); 156 else if (ret == H_SUCCESS) { 157 int length; 158 159 length = EHCA_BMASK_GET(ERROR_DATA_LENGTH, rblock[0]); 160 161 + if (length > EHCA_PAGESIZE) 162 + length = EHCA_PAGESIZE; 163 164 print_error_data(shca, data, rblock, length); 165 + } else 166 ehca_err(&shca->ib_device, 167 "Error data could not be fetched: %lx", resource); 168 169 + ehca_free_fw_ctrlblock(rblock); 170 171 error_data1: 172 return ret;
+8
drivers/infiniband/hw/ehca/ehca_iverbs.h
··· 179 180 int ehca_munmap(unsigned long addr, size_t len); 181 182 #endif
··· 179 180 int ehca_munmap(unsigned long addr, size_t len); 181 182 + #ifdef CONFIG_PPC_64K_PAGES 183 + void *ehca_alloc_fw_ctrlblock(void); 184 + void ehca_free_fw_ctrlblock(void *ptr); 185 + #else 186 + #define ehca_alloc_fw_ctrlblock() ((void *) get_zeroed_page(GFP_KERNEL)) 187 + #define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr)) 188 + #endif 189 + 190 #endif
+47 -9
drivers/infiniband/hw/ehca/ehca_main.c
··· 40 * POSSIBILITY OF SUCH DAMAGE. 41 */ 42 43 #include "ehca_classes.h" 44 #include "ehca_iverbs.h" 45 #include "ehca_mrmw.h" ··· 52 MODULE_LICENSE("Dual BSD/GPL"); 53 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); 54 MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); 55 - MODULE_VERSION("SVNEHCA_0017"); 56 57 int ehca_open_aqp1 = 0; 58 int ehca_debug_level = 0; ··· 97 DEFINE_IDR(ehca_qp_idr); 98 DEFINE_IDR(ehca_cq_idr); 99 100 static struct list_head shca_list; /* list of all registered ehcas */ 101 static spinlock_t shca_list_lock; 102 103 static struct timer_list poll_eqs_timer; 104 105 static int ehca_create_slab_caches(void) 106 { ··· 156 goto create_slab_caches5; 157 } 158 159 return 0; 160 161 create_slab_caches5: ··· 191 ehca_cleanup_qp_cache(); 192 ehca_cleanup_cq_cache(); 193 ehca_cleanup_pd_cache(); 194 } 195 196 #define EHCA_HCAAVER EHCA_BMASK_IBM(32,39) ··· 206 u64 h_ret; 207 struct hipz_query_hca *rblock; 208 209 - rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 210 if (!rblock) { 211 ehca_gen_err("Cannot allocate rblock memory."); 212 return -ENOMEM; ··· 249 shca->sport[1].rate = IB_RATE_30_GBPS; 250 251 num_ports1: 252 - kfree(rblock); 253 return ret; 254 } 255 ··· 258 int ret = 0; 259 struct hipz_query_hca *rblock; 260 261 - rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 262 if (!rblock) { 263 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 264 return -ENOMEM; ··· 273 memcpy(&shca->ib_device.node_guid, &rblock->node_guid, sizeof(u64)); 274 275 init_node_guid1: 276 - kfree(rblock); 277 return ret; 278 } 279 ··· 469 \ 470 shca = dev->driver_data; \ 471 \ 472 - rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); \ 473 if (!rblock) { \ 474 dev_err(dev, "Can't allocate rblock memory."); \ 475 return 0; \ ··· 477 \ 478 if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { \ 479 dev_err(dev, "Can't query device properties"); \ 480 - kfree(rblock); \ 481 return 0; \ 482 } \ 483 \ 484 data = rblock->name; \ 485 - kfree(rblock); \ 486 \ 487 if ((strcmp(#name, "num_ports") == 0) && (ehca_nr_ports == 1)) \ 488 return snprintf(buf, 256, "1\n"); \ ··· 790 int ret; 791 792 printk(KERN_INFO "eHCA Infiniband Device Driver " 793 - "(Rel.: SVNEHCA_0017)\n"); 794 idr_init(&ehca_qp_idr); 795 idr_init(&ehca_cq_idr); 796 spin_lock_init(&ehca_qp_idr_lock);
··· 40 * POSSIBILITY OF SUCH DAMAGE. 41 */ 42 43 + #ifdef CONFIG_PPC_64K_PAGES 44 + #include <linux/slab.h> 45 + #endif 46 #include "ehca_classes.h" 47 #include "ehca_iverbs.h" 48 #include "ehca_mrmw.h" ··· 49 MODULE_LICENSE("Dual BSD/GPL"); 50 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); 51 MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); 52 + MODULE_VERSION("SVNEHCA_0018"); 53 54 int ehca_open_aqp1 = 0; 55 int ehca_debug_level = 0; ··· 94 DEFINE_IDR(ehca_qp_idr); 95 DEFINE_IDR(ehca_cq_idr); 96 97 + 98 static struct list_head shca_list; /* list of all registered ehcas */ 99 static spinlock_t shca_list_lock; 100 101 static struct timer_list poll_eqs_timer; 102 + 103 + #ifdef CONFIG_PPC_64K_PAGES 104 + static struct kmem_cache *ctblk_cache = NULL; 105 + 106 + void *ehca_alloc_fw_ctrlblock(void) 107 + { 108 + void *ret = kmem_cache_zalloc(ctblk_cache, SLAB_KERNEL); 109 + if (!ret) 110 + ehca_gen_err("Out of memory for ctblk"); 111 + return ret; 112 + } 113 + 114 + void ehca_free_fw_ctrlblock(void *ptr) 115 + { 116 + if (ptr) 117 + kmem_cache_free(ctblk_cache, ptr); 118 + 119 + } 120 + #endif 121 122 static int ehca_create_slab_caches(void) 123 { ··· 133 goto create_slab_caches5; 134 } 135 136 + #ifdef CONFIG_PPC_64K_PAGES 137 + ctblk_cache = kmem_cache_create("ehca_cache_ctblk", 138 + EHCA_PAGESIZE, H_CB_ALIGNMENT, 139 + SLAB_HWCACHE_ALIGN, 140 + NULL, NULL); 141 + if (!ctblk_cache) { 142 + ehca_gen_err("Cannot create ctblk SLAB cache."); 143 + ehca_cleanup_mrmw_cache(); 144 + goto create_slab_caches5; 145 + } 146 + #endif 147 return 0; 148 149 create_slab_caches5: ··· 157 ehca_cleanup_qp_cache(); 158 ehca_cleanup_cq_cache(); 159 ehca_cleanup_pd_cache(); 160 + #ifdef CONFIG_PPC_64K_PAGES 161 + if (ctblk_cache) 162 + kmem_cache_destroy(ctblk_cache); 163 + #endif 164 } 165 166 #define EHCA_HCAAVER EHCA_BMASK_IBM(32,39) ··· 168 u64 h_ret; 169 struct hipz_query_hca *rblock; 170 171 + rblock = ehca_alloc_fw_ctrlblock(); 172 if (!rblock) { 173 ehca_gen_err("Cannot allocate rblock memory."); 174 return -ENOMEM; ··· 211 shca->sport[1].rate = IB_RATE_30_GBPS; 212 213 num_ports1: 214 + ehca_free_fw_ctrlblock(rblock); 215 return ret; 216 } 217 ··· 220 int ret = 0; 221 struct hipz_query_hca *rblock; 222 223 + rblock = ehca_alloc_fw_ctrlblock(); 224 if (!rblock) { 225 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 226 return -ENOMEM; ··· 235 memcpy(&shca->ib_device.node_guid, &rblock->node_guid, sizeof(u64)); 236 237 init_node_guid1: 238 + ehca_free_fw_ctrlblock(rblock); 239 return ret; 240 } 241 ··· 431 \ 432 shca = dev->driver_data; \ 433 \ 434 + rblock = ehca_alloc_fw_ctrlblock(); \ 435 if (!rblock) { \ 436 dev_err(dev, "Can't allocate rblock memory."); \ 437 return 0; \ ··· 439 \ 440 if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { \ 441 dev_err(dev, "Can't query device properties"); \ 442 + ehca_free_fw_ctrlblock(rblock); \ 443 return 0; \ 444 } \ 445 \ 446 data = rblock->name; \ 447 + ehca_free_fw_ctrlblock(rblock); \ 448 \ 449 if ((strcmp(#name, "num_ports") == 0) && (ehca_nr_ports == 1)) \ 450 return snprintf(buf, 256, "1\n"); \ ··· 752 int ret; 753 754 printk(KERN_INFO "eHCA Infiniband Device Driver " 755 + "(Rel.: SVNEHCA_0018)\n"); 756 idr_init(&ehca_qp_idr); 757 idr_init(&ehca_cq_idr); 758 spin_lock_init(&ehca_qp_idr_lock);
+4 -4
drivers/infiniband/hw/ehca/ehca_mrmw.c
··· 1013 u32 i; 1014 u64 *kpage; 1015 1016 - kpage = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 1017 if (!kpage) { 1018 ehca_err(&shca->ib_device, "kpage alloc failed"); 1019 ret = -ENOMEM; ··· 1092 1093 1094 ehca_reg_mr_rpages_exit1: 1095 - kfree(kpage); 1096 ehca_reg_mr_rpages_exit0: 1097 if (ret) 1098 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p " ··· 1124 ehca_mrmw_map_acl(acl, &hipz_acl); 1125 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); 1126 1127 - kpage = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 1128 if (!kpage) { 1129 ehca_err(&shca->ib_device, "kpage alloc failed"); 1130 ret = -ENOMEM; ··· 1181 } 1182 1183 ehca_rereg_mr_rereg1_exit1: 1184 - kfree(kpage); 1185 ehca_rereg_mr_rereg1_exit0: 1186 if ( ret && (ret != -EAGAIN) ) 1187 ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x "
··· 1013 u32 i; 1014 u64 *kpage; 1015 1016 + kpage = ehca_alloc_fw_ctrlblock(); 1017 if (!kpage) { 1018 ehca_err(&shca->ib_device, "kpage alloc failed"); 1019 ret = -ENOMEM; ··· 1092 1093 1094 ehca_reg_mr_rpages_exit1: 1095 + ehca_free_fw_ctrlblock(kpage); 1096 ehca_reg_mr_rpages_exit0: 1097 if (ret) 1098 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p " ··· 1124 ehca_mrmw_map_acl(acl, &hipz_acl); 1125 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); 1126 1127 + kpage = ehca_alloc_fw_ctrlblock(); 1128 if (!kpage) { 1129 ehca_err(&shca->ib_device, "kpage alloc failed"); 1130 ret = -ENOMEM; ··· 1181 } 1182 1183 ehca_rereg_mr_rereg1_exit1: 1184 + ehca_free_fw_ctrlblock(kpage); 1185 ehca_rereg_mr_rereg1_exit0: 1186 if ( ret && (ret != -EAGAIN) ) 1187 ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x "
+5 -5
drivers/infiniband/hw/ehca/ehca_qp.c
··· 811 unsigned long spl_flags = 0; 812 813 /* do query_qp to obtain current attr values */ 814 - mqpcb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 815 - if (mqpcb == NULL) { 816 ehca_err(ibqp->device, "Could not get zeroed page for mqpcb " 817 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num); 818 return -ENOMEM; ··· 1225 } 1226 1227 modify_qp_exit1: 1228 - kfree(mqpcb); 1229 1230 return ret; 1231 } ··· 1277 return -EINVAL; 1278 } 1279 1280 - qpcb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL ); 1281 if (!qpcb) { 1282 ehca_err(qp->device,"Out of memory for qpcb " 1283 "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num); ··· 1401 ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num); 1402 1403 query_qp_exit1: 1404 - kfree(qpcb); 1405 1406 return ret; 1407 }
··· 811 unsigned long spl_flags = 0; 812 813 /* do query_qp to obtain current attr values */ 814 + mqpcb = ehca_alloc_fw_ctrlblock(); 815 + if (!mqpcb) { 816 ehca_err(ibqp->device, "Could not get zeroed page for mqpcb " 817 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num); 818 return -ENOMEM; ··· 1225 } 1226 1227 modify_qp_exit1: 1228 + ehca_free_fw_ctrlblock(mqpcb); 1229 1230 return ret; 1231 } ··· 1277 return -EINVAL; 1278 } 1279 1280 + qpcb = ehca_alloc_fw_ctrlblock(); 1281 if (!qpcb) { 1282 ehca_err(qp->device,"Out of memory for qpcb " 1283 "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num); ··· 1401 ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num); 1402 1403 query_qp_exit1: 1404 + ehca_free_fw_ctrlblock(qpcb); 1405 1406 return ret; 1407 }
+2
drivers/infiniband/hw/ehca/hipz_hw.h
··· 45 46 #include "ehca_tools.h" 47 48 /* QP Table Entry Memory Map */ 49 struct hipz_qptemm { 50 u64 qpx_hcr;
··· 45 46 #include "ehca_tools.h" 47 48 + #define EHCA_MAX_MTU 4 49 + 50 /* QP Table Entry Memory Map */ 51 struct hipz_qptemm { 52 u64 qpx_hcr;