IB/ehca: Assure 4K alignment for firmware control blocks

Assure 4K alignment for firmware control blocks in 64K page mode,
because kzalloc()'s result address might not be 4K aligned if 64K
pages are enabled. Thus, we introduce wrappers called
ehca_{alloc,free}_fw_ctrlblock(), which use a slab cache for objects
with 4K length and 4K alignment in order to alloc/free firmware
control blocks in 64K page mode. In 4K page mode those wrappers just
are defines of get_zeroed_page() and free_page().

Signed-off-by: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>

authored by Hoang-Nam Nguyen and committed by Roland Dreier 7e28db5d 40eb0066

+81 -35
+9 -8
drivers/infiniband/hw/ehca/ehca_hca.c
··· 40 */ 41 42 #include "ehca_tools.h" 43 #include "hcp_if.h" 44 45 int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props) ··· 50 ib_device); 51 struct hipz_query_hca *rblock; 52 53 - rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 54 if (!rblock) { 55 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 56 return -ENOMEM; ··· 97 = min_t(int, rblock->max_total_mcast_qp_attach, INT_MAX); 98 99 query_device1: 100 - kfree(rblock); 101 102 return ret; 103 } ··· 110 ib_device); 111 struct hipz_query_port *rblock; 112 113 - rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 114 if (!rblock) { 115 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 116 return -ENOMEM; ··· 163 props->active_speed = 0x1; 164 165 query_port1: 166 - kfree(rblock); 167 168 return ret; 169 } ··· 179 return -EINVAL; 180 } 181 182 - rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 183 if (!rblock) { 184 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 185 return -ENOMEM; ··· 194 memcpy(pkey, &rblock->pkey_entries + index, sizeof(u16)); 195 196 query_pkey1: 197 - kfree(rblock); 198 199 return ret; 200 } ··· 212 return -EINVAL; 213 } 214 215 - rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 216 if (!rblock) { 217 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 218 return -ENOMEM; ··· 228 memcpy(&gid->raw[8], &rblock->guid_entries[index], sizeof(u64)); 229 230 query_gid1: 231 - kfree(rblock); 232 233 return ret; 234 }
··· 40 */ 41 42 #include "ehca_tools.h" 43 + #include "ehca_iverbs.h" 44 #include "hcp_if.h" 45 46 int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props) ··· 49 ib_device); 50 struct hipz_query_hca *rblock; 51 52 + rblock = ehca_alloc_fw_ctrlblock(); 53 if (!rblock) { 54 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 55 return -ENOMEM; ··· 96 = min_t(int, rblock->max_total_mcast_qp_attach, INT_MAX); 97 98 query_device1: 99 + ehca_free_fw_ctrlblock(rblock); 100 101 return ret; 102 } ··· 109 ib_device); 110 struct hipz_query_port *rblock; 111 112 + rblock = ehca_alloc_fw_ctrlblock(); 113 if (!rblock) { 114 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 115 return -ENOMEM; ··· 162 props->active_speed = 0x1; 163 164 query_port1: 165 + ehca_free_fw_ctrlblock(rblock); 166 167 return ret; 168 } ··· 178 return -EINVAL; 179 } 180 181 + rblock = ehca_alloc_fw_ctrlblock(); 182 if (!rblock) { 183 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 184 return -ENOMEM; ··· 193 memcpy(pkey, &rblock->pkey_entries + index, sizeof(u16)); 194 195 query_pkey1: 196 + ehca_free_fw_ctrlblock(rblock); 197 198 return ret; 199 } ··· 211 return -EINVAL; 212 } 213 214 + rblock = ehca_alloc_fw_ctrlblock(); 215 if (!rblock) { 216 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 217 return -ENOMEM; ··· 227 memcpy(&gid->raw[8], &rblock->guid_entries[index], sizeof(u64)); 228 229 query_gid1: 230 + ehca_free_fw_ctrlblock(rblock); 231 232 return ret; 233 }
+8 -9
drivers/infiniband/hw/ehca/ehca_irq.c
··· 45 #include "ehca_tools.h" 46 #include "hcp_if.h" 47 #include "hipz_fns.h" 48 49 #define EQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1) 50 #define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM(8,31) ··· 138 u64 *rblock; 139 unsigned long block_count; 140 141 - rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 142 if (!rblock) { 143 ehca_err(&shca->ib_device, "Cannot allocate rblock memory."); 144 ret = -ENOMEM; 145 goto error_data1; 146 } 147 148 ret = hipz_h_error_data(shca->ipz_hca_handle, 149 resource, 150 rblock, 151 &block_count); 152 153 - if (ret == H_R_STATE) { 154 ehca_err(&shca->ib_device, 155 "No error data is available: %lx.", resource); 156 - } 157 else if (ret == H_SUCCESS) { 158 int length; 159 160 length = EHCA_BMASK_GET(ERROR_DATA_LENGTH, rblock[0]); 161 162 - if (length > PAGE_SIZE) 163 - length = PAGE_SIZE; 164 165 print_error_data(shca, data, rblock, length); 166 - } 167 - else { 168 ehca_err(&shca->ib_device, 169 "Error data could not be fetched: %lx", resource); 170 - } 171 172 - kfree(rblock); 173 174 error_data1: 175 return ret;
··· 45 #include "ehca_tools.h" 46 #include "hcp_if.h" 47 #include "hipz_fns.h" 48 + #include "ipz_pt_fn.h" 49 50 #define EQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1) 51 #define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM(8,31) ··· 137 u64 *rblock; 138 unsigned long block_count; 139 140 + rblock = ehca_alloc_fw_ctrlblock(); 141 if (!rblock) { 142 ehca_err(&shca->ib_device, "Cannot allocate rblock memory."); 143 ret = -ENOMEM; 144 goto error_data1; 145 } 146 147 + /* rblock must be 4K aligned and should be 4K large */ 148 ret = hipz_h_error_data(shca->ipz_hca_handle, 149 resource, 150 rblock, 151 &block_count); 152 153 + if (ret == H_R_STATE) 154 ehca_err(&shca->ib_device, 155 "No error data is available: %lx.", resource); 156 else if (ret == H_SUCCESS) { 157 int length; 158 159 length = EHCA_BMASK_GET(ERROR_DATA_LENGTH, rblock[0]); 160 161 + if (length > EHCA_PAGESIZE) 162 + length = EHCA_PAGESIZE; 163 164 print_error_data(shca, data, rblock, length); 165 + } else 166 ehca_err(&shca->ib_device, 167 "Error data could not be fetched: %lx", resource); 168 169 + ehca_free_fw_ctrlblock(rblock); 170 171 error_data1: 172 return ret;
+8
drivers/infiniband/hw/ehca/ehca_iverbs.h
··· 179 180 int ehca_munmap(unsigned long addr, size_t len); 181 182 #endif
··· 179 180 int ehca_munmap(unsigned long addr, size_t len); 181 182 + #ifdef CONFIG_PPC_64K_PAGES 183 + void *ehca_alloc_fw_ctrlblock(void); 184 + void ehca_free_fw_ctrlblock(void *ptr); 185 + #else 186 + #define ehca_alloc_fw_ctrlblock() ((void *) get_zeroed_page(GFP_KERNEL)) 187 + #define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr)) 188 + #endif 189 + 190 #endif
+47 -9
drivers/infiniband/hw/ehca/ehca_main.c
··· 40 * POSSIBILITY OF SUCH DAMAGE. 41 */ 42 43 #include "ehca_classes.h" 44 #include "ehca_iverbs.h" 45 #include "ehca_mrmw.h" ··· 52 MODULE_LICENSE("Dual BSD/GPL"); 53 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); 54 MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); 55 - MODULE_VERSION("SVNEHCA_0017"); 56 57 int ehca_open_aqp1 = 0; 58 int ehca_debug_level = 0; ··· 97 DEFINE_IDR(ehca_qp_idr); 98 DEFINE_IDR(ehca_cq_idr); 99 100 static struct list_head shca_list; /* list of all registered ehcas */ 101 static spinlock_t shca_list_lock; 102 103 static struct timer_list poll_eqs_timer; 104 105 static int ehca_create_slab_caches(void) 106 { ··· 156 goto create_slab_caches5; 157 } 158 159 return 0; 160 161 create_slab_caches5: ··· 191 ehca_cleanup_qp_cache(); 192 ehca_cleanup_cq_cache(); 193 ehca_cleanup_pd_cache(); 194 } 195 196 #define EHCA_HCAAVER EHCA_BMASK_IBM(32,39) ··· 206 u64 h_ret; 207 struct hipz_query_hca *rblock; 208 209 - rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 210 if (!rblock) { 211 ehca_gen_err("Cannot allocate rblock memory."); 212 return -ENOMEM; ··· 249 shca->sport[1].rate = IB_RATE_30_GBPS; 250 251 num_ports1: 252 - kfree(rblock); 253 return ret; 254 } 255 ··· 258 int ret = 0; 259 struct hipz_query_hca *rblock; 260 261 - rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 262 if (!rblock) { 263 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 264 return -ENOMEM; ··· 273 memcpy(&shca->ib_device.node_guid, &rblock->node_guid, sizeof(u64)); 274 275 init_node_guid1: 276 - kfree(rblock); 277 return ret; 278 } 279 ··· 469 \ 470 shca = dev->driver_data; \ 471 \ 472 - rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); \ 473 if (!rblock) { \ 474 dev_err(dev, "Can't allocate rblock memory."); \ 475 return 0; \ ··· 477 \ 478 if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { \ 479 dev_err(dev, "Can't query device properties"); \ 480 - kfree(rblock); \ 481 return 0; \ 482 } \ 483 \ 484 data = rblock->name; \ 485 - kfree(rblock); \ 486 \ 487 if ((strcmp(#name, "num_ports") == 0) && (ehca_nr_ports == 1)) \ 488 return snprintf(buf, 256, "1\n"); \ ··· 790 int ret; 791 792 printk(KERN_INFO "eHCA Infiniband Device Driver " 793 - "(Rel.: SVNEHCA_0017)\n"); 794 idr_init(&ehca_qp_idr); 795 idr_init(&ehca_cq_idr); 796 spin_lock_init(&ehca_qp_idr_lock);
··· 40 * POSSIBILITY OF SUCH DAMAGE. 41 */ 42 43 + #ifdef CONFIG_PPC_64K_PAGES 44 + #include <linux/slab.h> 45 + #endif 46 #include "ehca_classes.h" 47 #include "ehca_iverbs.h" 48 #include "ehca_mrmw.h" ··· 49 MODULE_LICENSE("Dual BSD/GPL"); 50 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); 51 MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); 52 + MODULE_VERSION("SVNEHCA_0018"); 53 54 int ehca_open_aqp1 = 0; 55 int ehca_debug_level = 0; ··· 94 DEFINE_IDR(ehca_qp_idr); 95 DEFINE_IDR(ehca_cq_idr); 96 97 + 98 static struct list_head shca_list; /* list of all registered ehcas */ 99 static spinlock_t shca_list_lock; 100 101 static struct timer_list poll_eqs_timer; 102 + 103 + #ifdef CONFIG_PPC_64K_PAGES 104 + static struct kmem_cache *ctblk_cache = NULL; 105 + 106 + void *ehca_alloc_fw_ctrlblock(void) 107 + { 108 + void *ret = kmem_cache_zalloc(ctblk_cache, SLAB_KERNEL); 109 + if (!ret) 110 + ehca_gen_err("Out of memory for ctblk"); 111 + return ret; 112 + } 113 + 114 + void ehca_free_fw_ctrlblock(void *ptr) 115 + { 116 + if (ptr) 117 + kmem_cache_free(ctblk_cache, ptr); 118 + 119 + } 120 + #endif 121 122 static int ehca_create_slab_caches(void) 123 { ··· 133 goto create_slab_caches5; 134 } 135 136 + #ifdef CONFIG_PPC_64K_PAGES 137 + ctblk_cache = kmem_cache_create("ehca_cache_ctblk", 138 + EHCA_PAGESIZE, H_CB_ALIGNMENT, 139 + SLAB_HWCACHE_ALIGN, 140 + NULL, NULL); 141 + if (!ctblk_cache) { 142 + ehca_gen_err("Cannot create ctblk SLAB cache."); 143 + ehca_cleanup_mrmw_cache(); 144 + goto create_slab_caches5; 145 + } 146 + #endif 147 return 0; 148 149 create_slab_caches5: ··· 157 ehca_cleanup_qp_cache(); 158 ehca_cleanup_cq_cache(); 159 ehca_cleanup_pd_cache(); 160 + #ifdef CONFIG_PPC_64K_PAGES 161 + if (ctblk_cache) 162 + kmem_cache_destroy(ctblk_cache); 163 + #endif 164 } 165 166 #define EHCA_HCAAVER EHCA_BMASK_IBM(32,39) ··· 168 u64 h_ret; 169 struct hipz_query_hca *rblock; 170 171 + rblock = ehca_alloc_fw_ctrlblock(); 172 if (!rblock) { 173 ehca_gen_err("Cannot allocate rblock memory."); 174 return -ENOMEM; ··· 211 shca->sport[1].rate = IB_RATE_30_GBPS; 212 213 num_ports1: 214 + ehca_free_fw_ctrlblock(rblock); 215 return ret; 216 } 217 ··· 220 int ret = 0; 221 struct hipz_query_hca *rblock; 222 223 + rblock = ehca_alloc_fw_ctrlblock(); 224 if (!rblock) { 225 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 226 return -ENOMEM; ··· 235 memcpy(&shca->ib_device.node_guid, &rblock->node_guid, sizeof(u64)); 236 237 init_node_guid1: 238 + ehca_free_fw_ctrlblock(rblock); 239 return ret; 240 } 241 ··· 431 \ 432 shca = dev->driver_data; \ 433 \ 434 + rblock = ehca_alloc_fw_ctrlblock(); \ 435 if (!rblock) { \ 436 dev_err(dev, "Can't allocate rblock memory."); \ 437 return 0; \ ··· 439 \ 440 if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { \ 441 dev_err(dev, "Can't query device properties"); \ 442 + ehca_free_fw_ctrlblock(rblock); \ 443 return 0; \ 444 } \ 445 \ 446 data = rblock->name; \ 447 + ehca_free_fw_ctrlblock(rblock); \ 448 \ 449 if ((strcmp(#name, "num_ports") == 0) && (ehca_nr_ports == 1)) \ 450 return snprintf(buf, 256, "1\n"); \ ··· 752 int ret; 753 754 printk(KERN_INFO "eHCA Infiniband Device Driver " 755 + "(Rel.: SVNEHCA_0018)\n"); 756 idr_init(&ehca_qp_idr); 757 idr_init(&ehca_cq_idr); 758 spin_lock_init(&ehca_qp_idr_lock);
+4 -4
drivers/infiniband/hw/ehca/ehca_mrmw.c
··· 1013 u32 i; 1014 u64 *kpage; 1015 1016 - kpage = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 1017 if (!kpage) { 1018 ehca_err(&shca->ib_device, "kpage alloc failed"); 1019 ret = -ENOMEM; ··· 1092 1093 1094 ehca_reg_mr_rpages_exit1: 1095 - kfree(kpage); 1096 ehca_reg_mr_rpages_exit0: 1097 if (ret) 1098 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p " ··· 1124 ehca_mrmw_map_acl(acl, &hipz_acl); 1125 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); 1126 1127 - kpage = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 1128 if (!kpage) { 1129 ehca_err(&shca->ib_device, "kpage alloc failed"); 1130 ret = -ENOMEM; ··· 1181 } 1182 1183 ehca_rereg_mr_rereg1_exit1: 1184 - kfree(kpage); 1185 ehca_rereg_mr_rereg1_exit0: 1186 if ( ret && (ret != -EAGAIN) ) 1187 ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x "
··· 1013 u32 i; 1014 u64 *kpage; 1015 1016 + kpage = ehca_alloc_fw_ctrlblock(); 1017 if (!kpage) { 1018 ehca_err(&shca->ib_device, "kpage alloc failed"); 1019 ret = -ENOMEM; ··· 1092 1093 1094 ehca_reg_mr_rpages_exit1: 1095 + ehca_free_fw_ctrlblock(kpage); 1096 ehca_reg_mr_rpages_exit0: 1097 if (ret) 1098 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p " ··· 1124 ehca_mrmw_map_acl(acl, &hipz_acl); 1125 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); 1126 1127 + kpage = ehca_alloc_fw_ctrlblock(); 1128 if (!kpage) { 1129 ehca_err(&shca->ib_device, "kpage alloc failed"); 1130 ret = -ENOMEM; ··· 1181 } 1182 1183 ehca_rereg_mr_rereg1_exit1: 1184 + ehca_free_fw_ctrlblock(kpage); 1185 ehca_rereg_mr_rereg1_exit0: 1186 if ( ret && (ret != -EAGAIN) ) 1187 ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x "
+5 -5
drivers/infiniband/hw/ehca/ehca_qp.c
··· 811 unsigned long spl_flags = 0; 812 813 /* do query_qp to obtain current attr values */ 814 - mqpcb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 815 - if (mqpcb == NULL) { 816 ehca_err(ibqp->device, "Could not get zeroed page for mqpcb " 817 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num); 818 return -ENOMEM; ··· 1225 } 1226 1227 modify_qp_exit1: 1228 - kfree(mqpcb); 1229 1230 return ret; 1231 } ··· 1277 return -EINVAL; 1278 } 1279 1280 - qpcb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL ); 1281 if (!qpcb) { 1282 ehca_err(qp->device,"Out of memory for qpcb " 1283 "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num); ··· 1401 ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num); 1402 1403 query_qp_exit1: 1404 - kfree(qpcb); 1405 1406 return ret; 1407 }
··· 811 unsigned long spl_flags = 0; 812 813 /* do query_qp to obtain current attr values */ 814 + mqpcb = ehca_alloc_fw_ctrlblock(); 815 + if (!mqpcb) { 816 ehca_err(ibqp->device, "Could not get zeroed page for mqpcb " 817 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num); 818 return -ENOMEM; ··· 1225 } 1226 1227 modify_qp_exit1: 1228 + ehca_free_fw_ctrlblock(mqpcb); 1229 1230 return ret; 1231 } ··· 1277 return -EINVAL; 1278 } 1279 1280 + qpcb = ehca_alloc_fw_ctrlblock(); 1281 if (!qpcb) { 1282 ehca_err(qp->device,"Out of memory for qpcb " 1283 "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num); ··· 1401 ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num); 1402 1403 query_qp_exit1: 1404 + ehca_free_fw_ctrlblock(qpcb); 1405 1406 return ret; 1407 }