Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

IB/ehca: MR/MW structure refactoring

- Rename struct ehca_mr fields to clearly distinguish between kernel
and HW page size.
- Sort struct ehca_mr_pginfo into a common part and a union containing
specific fields for physical, user and fast MR

Signed-off-by: Joachim Fenkes <fenkes@de.ibm.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>

authored by

Hoang-Nam Nguyen and committed by
Roland Dreier
df17bfd4 2492398e

+282 -275
+24 -24
drivers/infiniband/hw/ehca/ehca_classes.h
··· 204 204 spinlock_t mrlock; 205 205 206 206 enum ehca_mr_flag flags; 207 - u32 num_pages; /* number of MR pages */ 208 - u32 num_4k; /* number of 4k "page" portions to form MR */ 207 + u32 num_kpages; /* number of kernel pages */ 208 + u32 num_hwpages; /* number of hw pages to form MR */ 209 209 int acl; /* ACL (stored here for usage in reregister) */ 210 210 u64 *start; /* virtual start address (stored here for */ 211 211 /* usage in reregister) */ ··· 217 217 /* fw specific data */ 218 218 struct ipz_mrmw_handle ipz_mr_handle; /* MR handle for h-calls */ 219 219 struct h_galpas galpas; 220 - /* data for userspace bridge */ 221 - u32 nr_of_pages; 222 - void *pagearray; 223 220 }; 224 221 225 222 struct ehca_mw { ··· 238 241 239 242 struct ehca_mr_pginfo { 240 243 enum ehca_mr_pgi_type type; 241 - u64 num_pages; 242 - u64 page_cnt; 243 - u64 num_4k; /* number of 4k "page" portions */ 244 - u64 page_4k_cnt; /* counter for 4k "page" portions */ 245 - u64 next_4k; /* next 4k "page" portion in buffer/chunk/listelem */ 244 + u64 num_kpages; 245 + u64 kpage_cnt; 246 + u64 num_hwpages; /* number of hw pages */ 247 + u64 hwpage_cnt; /* counter for hw pages */ 248 + u64 next_hwpage; /* next hw page in buffer/chunk/listelem */ 246 249 247 - /* type EHCA_MR_PGI_PHYS section */ 248 - int num_phys_buf; 249 - struct ib_phys_buf *phys_buf_array; 250 - u64 next_buf; 251 - 252 - /* type EHCA_MR_PGI_USER section */ 253 - struct ib_umem *region; 254 - struct ib_umem_chunk *next_chunk; 255 - u64 next_nmap; 256 - 257 - /* type EHCA_MR_PGI_FMR section */ 258 - u64 *page_list; 259 - u64 next_listelem; 260 - /* next_4k also used within EHCA_MR_PGI_FMR */ 250 + union { 251 + struct { /* type EHCA_MR_PGI_PHYS section */ 252 + int num_phys_buf; 253 + struct ib_phys_buf *phys_buf_array; 254 + u64 next_buf; 255 + } phy; 256 + struct { /* type EHCA_MR_PGI_USER section */ 257 + struct ib_umem *region; 258 + struct ib_umem_chunk *next_chunk; 259 + u64 next_nmap; 260 + } usr; 261 + struct { /* type EHCA_MR_PGI_FMR section */ 262 + u64 fmr_pgsize; 263 + u64 *page_list; 264 + u64 next_listelem; 265 + } fmr; 266 + } u; 261 267 }; 262 268 263 269 /* output parameters for MR/FMR hipz calls */
+258 -251
drivers/infiniband/hw/ehca/ehca_mrmw.c
··· 150 150 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); 151 151 152 152 u64 size; 153 - struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; 154 - u32 num_pages_mr; 155 - u32 num_pages_4k; /* 4k portion "pages" */ 156 153 157 154 if ((num_phys_buf <= 0) || !phys_buf_array) { 158 155 ehca_err(pd->device, "bad input values: num_phys_buf=%x " ··· 193 196 goto reg_phys_mr_exit0; 194 197 } 195 198 196 - /* determine number of MR pages */ 197 - num_pages_mr = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size, 198 - PAGE_SIZE); 199 - num_pages_4k = NUM_CHUNKS(((u64)iova_start % EHCA_PAGESIZE) + size, 200 - EHCA_PAGESIZE); 201 - 202 199 /* register MR on HCA */ 203 200 if (ehca_mr_is_maxmr(size, iova_start)) { 204 201 e_mr->flags |= EHCA_MR_FLAG_MAXMR; ··· 204 213 goto reg_phys_mr_exit1; 205 214 } 206 215 } else { 207 - pginfo.type = EHCA_MR_PGI_PHYS; 208 - pginfo.num_pages = num_pages_mr; 209 - pginfo.num_4k = num_pages_4k; 210 - pginfo.num_phys_buf = num_phys_buf; 211 - pginfo.phys_buf_array = phys_buf_array; 212 - pginfo.next_4k = (((u64)iova_start & ~PAGE_MASK) / 213 - EHCA_PAGESIZE); 216 + struct ehca_mr_pginfo pginfo; 217 + u32 num_kpages; 218 + u32 num_hwpages; 219 + 220 + num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size, 221 + PAGE_SIZE); 222 + num_hwpages = NUM_CHUNKS(((u64)iova_start % EHCA_PAGESIZE) + 223 + size, EHCA_PAGESIZE); 224 + memset(&pginfo, 0, sizeof(pginfo)); 225 + pginfo.type = EHCA_MR_PGI_PHYS; 226 + pginfo.num_kpages = num_kpages; 227 + pginfo.num_hwpages = num_hwpages; 228 + pginfo.u.phy.num_phys_buf = num_phys_buf; 229 + pginfo.u.phy.phys_buf_array = phys_buf_array; 230 + pginfo.next_hwpage = (((u64)iova_start & ~PAGE_MASK) / 231 + EHCA_PAGESIZE); 214 232 215 233 ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags, 216 234 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, ··· 254 254 struct ehca_shca *shca = 255 255 container_of(pd->device, struct ehca_shca, ib_device); 256 256 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); 257 - struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; 257 + struct ehca_mr_pginfo pginfo; 258 258 int ret; 259 - u32 num_pages_mr; 260 - u32 num_pages_4k; /* 4k portion "pages" */ 259 + u32 num_kpages; 260 + u32 num_hwpages; 261 261 262 262 if (!pd) { 263 263 ehca_gen_err("bad pd=%p", pd); ··· 307 307 } 308 308 309 309 /* determine number of MR pages */ 310 - num_pages_mr = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE); 311 - num_pages_4k = NUM_CHUNKS((virt % EHCA_PAGESIZE) + length, 312 - EHCA_PAGESIZE); 310 + num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE); 311 + num_hwpages = NUM_CHUNKS((virt % EHCA_PAGESIZE) + length, 312 + EHCA_PAGESIZE); 313 313 314 314 /* register MR on HCA */ 315 - pginfo.type = EHCA_MR_PGI_USER; 316 - pginfo.num_pages = num_pages_mr; 317 - pginfo.num_4k = num_pages_4k; 318 - pginfo.region = e_mr->umem; 319 - pginfo.next_4k = e_mr->umem->offset / EHCA_PAGESIZE; 320 - pginfo.next_chunk = list_prepare_entry(pginfo.next_chunk, 321 - (&e_mr->umem->chunk_list), 322 - list); 315 + memset(&pginfo, 0, sizeof(pginfo)); 316 + pginfo.type = EHCA_MR_PGI_USER; 317 + pginfo.num_kpages = num_kpages; 318 + pginfo.num_hwpages = num_hwpages; 319 + pginfo.u.usr.region = e_mr->umem; 320 + pginfo.next_hwpage = e_mr->umem->offset / EHCA_PAGESIZE; 321 + pginfo.u.usr.next_chunk = list_prepare_entry(pginfo.u.usr.next_chunk, 322 + (&e_mr->umem->chunk_list), 323 + list); 323 324 324 325 ret = ehca_reg_mr(shca, e_mr, (u64*) virt, length, mr_access_flags, e_pd, 325 326 &pginfo, &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey); ··· 366 365 struct ehca_pd *new_pd; 367 366 u32 tmp_lkey, tmp_rkey; 368 367 unsigned long sl_flags; 369 - u32 num_pages_mr = 0; 370 - u32 num_pages_4k = 0; /* 4k portion "pages" */ 371 - struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; 368 + u32 num_kpages = 0; 369 + u32 num_hwpages = 0; 370 + struct ehca_mr_pginfo pginfo; 372 371 u32 cur_pid = current->tgid; 373 372 374 373 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && ··· 464 463 ret = -EINVAL; 465 464 goto rereg_phys_mr_exit1; 466 465 } 467 - num_pages_mr = NUM_CHUNKS(((u64)new_start % PAGE_SIZE) + 468 - new_size, PAGE_SIZE); 469 - num_pages_4k = NUM_CHUNKS(((u64)new_start % EHCA_PAGESIZE) + 470 - new_size, EHCA_PAGESIZE); 471 - pginfo.type = EHCA_MR_PGI_PHYS; 472 - pginfo.num_pages = num_pages_mr; 473 - pginfo.num_4k = num_pages_4k; 474 - pginfo.num_phys_buf = num_phys_buf; 475 - pginfo.phys_buf_array = phys_buf_array; 476 - pginfo.next_4k = (((u64)iova_start & ~PAGE_MASK) / 477 - EHCA_PAGESIZE); 466 + num_kpages = NUM_CHUNKS(((u64)new_start % PAGE_SIZE) + 467 + new_size, PAGE_SIZE); 468 + num_hwpages = NUM_CHUNKS(((u64)new_start % EHCA_PAGESIZE) + 469 + new_size, EHCA_PAGESIZE); 470 + memset(&pginfo, 0, sizeof(pginfo)); 471 + pginfo.type = EHCA_MR_PGI_PHYS; 472 + pginfo.num_kpages = num_kpages; 473 + pginfo.num_hwpages = num_hwpages; 474 + pginfo.u.phy.num_phys_buf = num_phys_buf; 475 + pginfo.u.phy.phys_buf_array = phys_buf_array; 476 + pginfo.next_hwpage = (((u64)iova_start & ~PAGE_MASK) / 477 + EHCA_PAGESIZE); 478 478 } 479 479 if (mr_rereg_mask & IB_MR_REREG_ACCESS) 480 480 new_acl = mr_access_flags; ··· 546 544 ret = ehca2ib_return_code(h_ret); 547 545 goto query_mr_exit1; 548 546 } 549 - mr_attr->pd = mr->pd; 547 + mr_attr->pd = mr->pd; 550 548 mr_attr->device_virt_addr = hipzout.vaddr; 551 - mr_attr->size = hipzout.len; 552 - mr_attr->lkey = hipzout.lkey; 553 - mr_attr->rkey = hipzout.rkey; 549 + mr_attr->size = hipzout.len; 550 + mr_attr->lkey = hipzout.lkey; 551 + mr_attr->rkey = hipzout.rkey; 554 552 ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags); 555 553 556 554 query_mr_exit1: ··· 706 704 struct ehca_mr *e_fmr; 707 705 int ret; 708 706 u32 tmp_lkey, tmp_rkey; 709 - struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; 707 + struct ehca_mr_pginfo pginfo; 710 708 711 709 /* check other parameters */ 712 710 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) && ··· 752 750 e_fmr->flags |= EHCA_MR_FLAG_FMR; 753 751 754 752 /* register MR on HCA */ 753 + memset(&pginfo, 0, sizeof(pginfo)); 755 754 ret = ehca_reg_mr(shca, e_fmr, NULL, 756 755 fmr_attr->max_pages * (1 << fmr_attr->page_shift), 757 756 mr_access_flags, e_pd, &pginfo, ··· 791 788 container_of(fmr->device, struct ehca_shca, ib_device); 792 789 struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr); 793 790 struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd); 794 - struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; 791 + struct ehca_mr_pginfo pginfo; 795 792 u32 tmp_lkey, tmp_rkey; 796 793 797 794 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) { ··· 817 814 fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps); 818 815 } 819 816 820 - pginfo.type = EHCA_MR_PGI_FMR; 821 - pginfo.num_pages = list_len; 822 - pginfo.num_4k = list_len * (e_fmr->fmr_page_size / EHCA_PAGESIZE); 823 - pginfo.page_list = page_list; 824 - pginfo.next_4k = ((iova & (e_fmr->fmr_page_size-1)) / 825 - EHCA_PAGESIZE); 817 + memset(&pginfo, 0, sizeof(pginfo)); 818 + pginfo.type = EHCA_MR_PGI_FMR; 819 + pginfo.num_kpages = list_len; 820 + pginfo.num_hwpages = list_len * (e_fmr->fmr_page_size / EHCA_PAGESIZE); 821 + pginfo.u.fmr.page_list = page_list; 822 + pginfo.next_hwpage = ((iova & (e_fmr->fmr_page_size-1)) / 823 + EHCA_PAGESIZE); 826 824 827 825 ret = ehca_rereg_mr(shca, e_fmr, (u64*)iova, 828 826 list_len * e_fmr->fmr_page_size, ··· 983 979 goto ehca_reg_mr_exit1; 984 980 985 981 /* successful registration */ 986 - e_mr->num_pages = pginfo->num_pages; 987 - e_mr->num_4k = pginfo->num_4k; 988 - e_mr->start = iova_start; 989 - e_mr->size = size; 990 - e_mr->acl = acl; 982 + e_mr->num_kpages = pginfo->num_kpages; 983 + e_mr->num_hwpages = pginfo->num_hwpages; 984 + e_mr->start = iova_start; 985 + e_mr->size = size; 986 + e_mr->acl = acl; 991 987 *lkey = hipzout.lkey; 992 988 *rkey = hipzout.rkey; 993 989 return 0; ··· 997 993 if (h_ret != H_SUCCESS) { 998 994 ehca_err(&shca->ib_device, "h_ret=%lx shca=%p e_mr=%p " 999 995 "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x " 1000 - "pginfo=%p num_pages=%lx num_4k=%lx ret=%x", 996 + "pginfo=%p num_kpages=%lx num_hwpages=%lx ret=%x", 1001 997 h_ret, shca, e_mr, iova_start, size, acl, e_pd, 1002 - hipzout.lkey, pginfo, pginfo->num_pages, 1003 - pginfo->num_4k, ret); 998 + hipzout.lkey, pginfo, pginfo->num_kpages, 999 + pginfo->num_hwpages, ret); 1004 1000 ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, " 1005 1001 "not recoverable"); 1006 1002 } ··· 1008 1004 if (ret) 1009 1005 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p " 1010 1006 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p " 1011 - "num_pages=%lx num_4k=%lx", 1007 + "num_kpages=%lx num_hwpages=%lx", 1012 1008 ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo, 1013 - pginfo->num_pages, pginfo->num_4k); 1009 + pginfo->num_kpages, pginfo->num_hwpages); 1014 1010 return ret; 1015 1011 } /* end ehca_reg_mr() */ 1016 1012 ··· 1035 1031 } 1036 1032 1037 1033 /* max 512 pages per shot */ 1038 - for (i = 0; i < NUM_CHUNKS(pginfo->num_4k, MAX_RPAGES); i++) { 1034 + for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) { 1039 1035 1040 - if (i == NUM_CHUNKS(pginfo->num_4k, MAX_RPAGES) - 1) { 1041 - rnum = pginfo->num_4k % MAX_RPAGES; /* last shot */ 1036 + if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) { 1037 + rnum = pginfo->num_hwpages % MAX_RPAGES; /* last shot */ 1042 1038 if (rnum == 0) 1043 1039 rnum = MAX_RPAGES; /* last shot is full */ 1044 1040 } else ··· 1074 1070 0, /* pagesize 4k */ 1075 1071 0, rpage, rnum); 1076 1072 1077 - if (i == NUM_CHUNKS(pginfo->num_4k, MAX_RPAGES) - 1) { 1073 + if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) { 1078 1074 /* 1079 1075 * check for 'registration complete'==H_SUCCESS 1080 1076 * and for 'page registered'==H_PAGE_REGISTERED ··· 1110 1106 ehca_reg_mr_rpages_exit0: 1111 1107 if (ret) 1112 1108 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p " 1113 - "num_pages=%lx num_4k=%lx", ret, shca, e_mr, pginfo, 1114 - pginfo->num_pages, pginfo->num_4k); 1109 + "num_kpages=%lx num_hwpages=%lx", ret, shca, e_mr, 1110 + pginfo, pginfo->num_kpages, pginfo->num_hwpages); 1115 1111 return ret; 1116 1112 } /* end ehca_reg_mr_rpages() */ 1117 1113 ··· 1146 1142 } 1147 1143 1148 1144 pginfo_save = *pginfo; 1149 - ret = ehca_set_pagebuf(e_mr, pginfo, pginfo->num_4k, kpage); 1145 + ret = ehca_set_pagebuf(e_mr, pginfo, pginfo->num_hwpages, kpage); 1150 1146 if (ret) { 1151 1147 ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p " 1152 - "pginfo=%p type=%x num_pages=%lx num_4k=%lx kpage=%p", 1153 - e_mr, pginfo, pginfo->type, pginfo->num_pages, 1154 - pginfo->num_4k,kpage); 1148 + "pginfo=%p type=%x num_kpages=%lx num_hwpages=%lx " 1149 + "kpage=%p", e_mr, pginfo, pginfo->type, 1150 + pginfo->num_kpages, pginfo->num_hwpages, kpage); 1155 1151 goto ehca_rereg_mr_rereg1_exit1; 1156 1152 } 1157 1153 rpage = virt_to_abs(kpage); ··· 1185 1181 * successful reregistration 1186 1182 * note: start and start_out are identical for eServer HCAs 1187 1183 */ 1188 - e_mr->num_pages = pginfo->num_pages; 1189 - e_mr->num_4k = pginfo->num_4k; 1190 - e_mr->start = iova_start; 1191 - e_mr->size = size; 1192 - e_mr->acl = acl; 1184 + e_mr->num_kpages = pginfo->num_kpages; 1185 + e_mr->num_hwpages = pginfo->num_hwpages; 1186 + e_mr->start = iova_start; 1187 + e_mr->size = size; 1188 + e_mr->acl = acl; 1193 1189 *lkey = hipzout.lkey; 1194 1190 *rkey = hipzout.rkey; 1195 1191 } ··· 1199 1195 ehca_rereg_mr_rereg1_exit0: 1200 1196 if ( ret && (ret != -EAGAIN) ) 1201 1197 ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x " 1202 - "pginfo=%p num_pages=%lx num_4k=%lx", 1203 - ret, *lkey, *rkey, pginfo, pginfo->num_pages, 1204 - pginfo->num_4k); 1198 + "pginfo=%p num_kpages=%lx num_hwpages=%lx", 1199 + ret, *lkey, *rkey, pginfo, pginfo->num_kpages, 1200 + pginfo->num_hwpages); 1205 1201 return ret; 1206 1202 } /* end ehca_rereg_mr_rereg1() */ 1207 1203 ··· 1223 1219 int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */ 1224 1220 1225 1221 /* first determine reregistration hCall(s) */ 1226 - if ((pginfo->num_4k > MAX_RPAGES) || (e_mr->num_4k > MAX_RPAGES) || 1227 - (pginfo->num_4k > e_mr->num_4k)) { 1228 - ehca_dbg(&shca->ib_device, "Rereg3 case, pginfo->num_4k=%lx " 1229 - "e_mr->num_4k=%x", pginfo->num_4k, e_mr->num_4k); 1222 + if ((pginfo->num_hwpages > MAX_RPAGES) || 1223 + (e_mr->num_hwpages > MAX_RPAGES) || 1224 + (pginfo->num_hwpages > e_mr->num_hwpages)) { 1225 + ehca_dbg(&shca->ib_device, "Rereg3 case, " 1226 + "pginfo->num_hwpages=%lx e_mr->num_hwpages=%x", 1227 + pginfo->num_hwpages, e_mr->num_hwpages); 1230 1228 rereg_1_hcall = 0; 1231 1229 rereg_3_hcall = 1; 1232 1230 } ··· 1292 1286 if (ret) 1293 1287 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p " 1294 1288 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p " 1295 - "num_pages=%lx lkey=%x rkey=%x rereg_1_hcall=%x " 1289 + "num_kpages=%lx lkey=%x rkey=%x rereg_1_hcall=%x " 1296 1290 "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size, 1297 - acl, e_pd, pginfo, pginfo->num_pages, *lkey, *rkey, 1291 + acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey, 1298 1292 rereg_1_hcall, rereg_3_hcall); 1299 1293 return ret; 1300 1294 } /* end ehca_rereg_mr() */ ··· 1312 1306 container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd); 1313 1307 struct ehca_mr save_fmr; 1314 1308 u32 tmp_lkey, tmp_rkey; 1315 - struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; 1309 + struct ehca_mr_pginfo pginfo; 1316 1310 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0}; 1317 1311 1318 1312 /* first check if reregistration hCall can be used for unmap */ ··· 1376 1370 e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt; 1377 1371 e_fmr->acl = save_fmr.acl; 1378 1372 1379 - pginfo.type = EHCA_MR_PGI_FMR; 1380 - pginfo.num_pages = 0; 1381 - pginfo.num_4k = 0; 1373 + memset(&pginfo, 0, sizeof(pginfo)); 1374 + pginfo.type = EHCA_MR_PGI_FMR; 1375 + pginfo.num_kpages = 0; 1376 + pginfo.num_hwpages = 0; 1382 1377 ret = ehca_reg_mr(shca, e_fmr, NULL, 1383 1378 (e_fmr->fmr_max_pages * e_fmr->fmr_page_size), 1384 1379 e_fmr->acl, e_pd, &pginfo, &tmp_lkey, ··· 1435 1428 goto ehca_reg_smr_exit0; 1436 1429 } 1437 1430 /* successful registration */ 1438 - e_newmr->num_pages = e_origmr->num_pages; 1439 - e_newmr->num_4k = e_origmr->num_4k; 1440 - e_newmr->start = iova_start; 1441 - e_newmr->size = e_origmr->size; 1442 - e_newmr->acl = acl; 1431 + e_newmr->num_kpages = e_origmr->num_kpages; 1432 + e_newmr->num_hwpages = e_origmr->num_hwpages; 1433 + e_newmr->start = iova_start; 1434 + e_newmr->size = e_origmr->size; 1435 + e_newmr->acl = acl; 1443 1436 e_newmr->ipz_mr_handle = hipzout.handle; 1444 1437 *lkey = hipzout.lkey; 1445 1438 *rkey = hipzout.rkey; ··· 1465 1458 struct ehca_mr *e_mr; 1466 1459 u64 *iova_start; 1467 1460 u64 size_maxmr; 1468 - struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; 1461 + struct ehca_mr_pginfo pginfo; 1469 1462 struct ib_phys_buf ib_pbuf; 1470 - u32 num_pages_mr; 1471 - u32 num_pages_4k; /* 4k portion "pages" */ 1463 + u32 num_kpages; 1464 + u32 num_hwpages; 1472 1465 1473 1466 e_mr = ehca_mr_new(); 1474 1467 if (!e_mr) { ··· 1483 1476 iova_start = (u64*)KERNELBASE; 1484 1477 ib_pbuf.addr = 0; 1485 1478 ib_pbuf.size = size_maxmr; 1486 - num_pages_mr = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr, 1487 - PAGE_SIZE); 1488 - num_pages_4k = NUM_CHUNKS(((u64)iova_start % EHCA_PAGESIZE) 1489 - + size_maxmr, EHCA_PAGESIZE); 1479 + num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr, 1480 + PAGE_SIZE); 1481 + num_hwpages = NUM_CHUNKS(((u64)iova_start % EHCA_PAGESIZE) + size_maxmr, 1482 + EHCA_PAGESIZE); 1490 1483 1491 - pginfo.type = EHCA_MR_PGI_PHYS; 1492 - pginfo.num_pages = num_pages_mr; 1493 - pginfo.num_4k = num_pages_4k; 1494 - pginfo.num_phys_buf = 1; 1495 - pginfo.phys_buf_array = &ib_pbuf; 1484 + memset(&pginfo, 0, sizeof(pginfo)); 1485 + pginfo.type = EHCA_MR_PGI_PHYS; 1486 + pginfo.num_kpages = num_kpages; 1487 + pginfo.num_hwpages = num_hwpages; 1488 + pginfo.u.phy.num_phys_buf = 1; 1489 + pginfo.u.phy.phys_buf_array = &ib_pbuf; 1496 1490 1497 1491 ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd, 1498 1492 &pginfo, &e_mr->ib.ib_mr.lkey, 1499 1493 &e_mr->ib.ib_mr.rkey); 1500 1494 if (ret) { 1501 1495 ehca_err(&shca->ib_device, "reg of internal max MR failed, " 1502 - "e_mr=%p iova_start=%p size_maxmr=%lx num_pages_mr=%x " 1503 - "num_pages_4k=%x", e_mr, iova_start, size_maxmr, 1504 - num_pages_mr, num_pages_4k); 1496 + "e_mr=%p iova_start=%p size_maxmr=%lx num_kpages=%x " 1497 + "num_hwpages=%x", e_mr, iova_start, size_maxmr, 1498 + num_kpages, num_hwpages); 1505 1499 goto ehca_reg_internal_maxmr_exit1; 1506 1500 } 1507 1501 ··· 1554 1546 return ehca2ib_return_code(h_ret); 1555 1547 } 1556 1548 /* successful registration */ 1557 - e_newmr->num_pages = e_origmr->num_pages; 1558 - e_newmr->num_4k = e_origmr->num_4k; 1559 - e_newmr->start = iova_start; 1560 - e_newmr->size = e_origmr->size; 1561 - e_newmr->acl = acl; 1549 + e_newmr->num_kpages = e_origmr->num_kpages; 1550 + e_newmr->num_hwpages = e_origmr->num_hwpages; 1551 + e_newmr->start = iova_start; 1552 + e_newmr->size = e_origmr->size; 1553 + e_newmr->acl = acl; 1562 1554 e_newmr->ipz_mr_handle = hipzout.handle; 1563 1555 *lkey = hipzout.lkey; 1564 1556 *rkey = hipzout.rkey; ··· 1701 1693 struct ib_umem_chunk *chunk; 1702 1694 struct ib_phys_buf *pbuf; 1703 1695 u64 *fmrlist; 1704 - u64 num4k, pgaddr, offs4k; 1696 + u64 num_hw, pgaddr, offs_hw; 1705 1697 u32 i = 0; 1706 1698 u32 j = 0; 1707 1699 1708 1700 if (pginfo->type == EHCA_MR_PGI_PHYS) { 1709 1701 /* loop over desired phys_buf_array entries */ 1710 1702 while (i < number) { 1711 - pbuf = pginfo->phys_buf_array + pginfo->next_buf; 1712 - num4k = NUM_CHUNKS((pbuf->addr % EHCA_PAGESIZE) 1713 - + pbuf->size, EHCA_PAGESIZE); 1714 - offs4k = (pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE; 1715 - while (pginfo->next_4k < offs4k + num4k) { 1703 + pbuf = pginfo->u.phy.phys_buf_array 1704 + + pginfo->u.phy.next_buf; 1705 + num_hw = NUM_CHUNKS((pbuf->addr % EHCA_PAGESIZE) + 1706 + pbuf->size, EHCA_PAGESIZE); 1707 + offs_hw = (pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE; 1708 + while (pginfo->next_hwpage < offs_hw + num_hw) { 1716 1709 /* sanity check */ 1717 - if ((pginfo->page_cnt >= pginfo->num_pages) || 1718 - (pginfo->page_4k_cnt >= pginfo->num_4k)) { 1719 - ehca_gen_err("page_cnt >= num_pages, " 1720 - "page_cnt=%lx " 1721 - "num_pages=%lx " 1722 - "page_4k_cnt=%lx " 1723 - "num_4k=%lx i=%x", 1724 - pginfo->page_cnt, 1725 - pginfo->num_pages, 1726 - pginfo->page_4k_cnt, 1727 - pginfo->num_4k, i); 1710 + if ((pginfo->kpage_cnt >= pginfo->num_kpages) || 1711 + (pginfo->hwpage_cnt >= pginfo->num_hwpages)) { 1712 + ehca_gen_err("kpage_cnt >= num_kpages, " 1713 + "kpage_cnt=%lx " 1714 + "num_kpages=%lx " 1715 + "hwpage_cnt=%lx " 1716 + "num_hwpages=%lx i=%x", 1717 + pginfo->kpage_cnt, 1718 + pginfo->num_kpages, 1719 + pginfo->hwpage_cnt, 1720 + pginfo->num_hwpages, i); 1728 1721 ret = -EFAULT; 1729 1722 goto ehca_set_pagebuf_exit0; 1730 1723 } 1731 1724 *kpage = phys_to_abs( 1732 1725 (pbuf->addr & EHCA_PAGEMASK) 1733 - + (pginfo->next_4k * EHCA_PAGESIZE)); 1726 + + (pginfo->next_hwpage * EHCA_PAGESIZE)); 1734 1727 if ( !(*kpage) && pbuf->addr ) { 1735 1728 ehca_gen_err("pbuf->addr=%lx " 1736 1729 "pbuf->size=%lx " 1737 - "next_4k=%lx", pbuf->addr, 1730 + "next_hwpage=%lx", pbuf->addr, 1738 1731 pbuf->size, 1739 - pginfo->next_4k); 1732 + pginfo->next_hwpage); 1740 1733 ret = -EFAULT; 1741 1734 goto ehca_set_pagebuf_exit0; 1742 1735 } 1743 - (pginfo->page_4k_cnt)++; 1744 - (pginfo->next_4k)++; 1745 - if (pginfo->next_4k % 1736 + (pginfo->hwpage_cnt)++; 1737 + (pginfo->next_hwpage)++; 1738 + if (pginfo->next_hwpage % 1746 1739 (PAGE_SIZE / EHCA_PAGESIZE) == 0) 1747 - (pginfo->page_cnt)++; 1740 + (pginfo->kpage_cnt)++; 1748 1741 kpage++; 1749 1742 i++; 1750 1743 if (i >= number) break; 1751 1744 } 1752 - if (pginfo->next_4k >= offs4k + num4k) { 1753 - (pginfo->next_buf)++; 1754 - pginfo->next_4k = 0; 1745 + if (pginfo->next_hwpage >= offs_hw + num_hw) { 1746 + (pginfo->u.phy.next_buf)++; 1747 + pginfo->next_hwpage = 0; 1755 1748 } 1756 1749 } 1757 1750 } else if (pginfo->type == EHCA_MR_PGI_USER) { 1758 1751 /* loop over desired chunk entries */ 1759 - chunk = pginfo->next_chunk; 1760 - prev_chunk = pginfo->next_chunk; 1752 + chunk = pginfo->u.usr.next_chunk; 1753 + prev_chunk = pginfo->u.usr.next_chunk; 1761 1754 list_for_each_entry_continue(chunk, 1762 - (&(pginfo->region->chunk_list)), 1755 + (&(pginfo->u.usr.region->chunk_list)), 1763 1756 list) { 1764 - for (i = pginfo->next_nmap; i < chunk->nmap; ) { 1757 + for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) { 1765 1758 pgaddr = ( page_to_pfn(chunk->page_list[i].page) 1766 1759 << PAGE_SHIFT ); 1767 1760 *kpage = phys_to_abs(pgaddr + 1768 - (pginfo->next_4k * 1761 + (pginfo->next_hwpage * 1769 1762 EHCA_PAGESIZE)); 1770 1763 if ( !(*kpage) ) { 1771 1764 ehca_gen_err("pgaddr=%lx " 1772 1765 "chunk->page_list[i]=%lx " 1773 - "i=%x next_4k=%lx mr=%p", 1766 + "i=%x next_hwpage=%lx mr=%p", 1774 1767 pgaddr, 1775 1768 (u64)sg_dma_address( 1776 1769 &chunk-> 1777 1770 page_list[i]), 1778 - i, pginfo->next_4k, e_mr); 1771 + i, pginfo->next_hwpage, e_mr); 1779 1772 ret = -EFAULT; 1780 1773 goto ehca_set_pagebuf_exit0; 1781 1774 } 1782 - (pginfo->page_4k_cnt)++; 1783 - (pginfo->next_4k)++; 1775 + (pginfo->hwpage_cnt)++; 1776 + (pginfo->next_hwpage)++; 1784 1777 kpage++; 1785 - if (pginfo->next_4k % 1778 + if (pginfo->next_hwpage % 1786 1779 (PAGE_SIZE / EHCA_PAGESIZE) == 0) { 1787 - (pginfo->page_cnt)++; 1788 - (pginfo->next_nmap)++; 1789 - pginfo->next_4k = 0; 1780 + (pginfo->kpage_cnt)++; 1781 + (pginfo->u.usr.next_nmap)++; 1782 + pginfo->next_hwpage = 0; 1790 1783 i++; 1791 1784 } 1792 1785 j++; 1793 1786 if (j >= number) break; 1794 1787 } 1795 - if ((pginfo->next_nmap >= chunk->nmap) && 1788 + if ((pginfo->u.usr.next_nmap >= chunk->nmap) && 1796 1789 (j >= number)) { 1797 - pginfo->next_nmap = 0; 1790 + pginfo->u.usr.next_nmap = 0; 1798 1791 prev_chunk = chunk; 1799 1792 break; 1800 - } else if (pginfo->next_nmap >= chunk->nmap) { 1801 - pginfo->next_nmap = 0; 1793 + } else if (pginfo->u.usr.next_nmap >= chunk->nmap) { 1794 + pginfo->u.usr.next_nmap = 0; 1802 1795 prev_chunk = chunk; 1803 1796 } else if (j >= number) 1804 1797 break; 1805 1798 else 1806 1799 prev_chunk = chunk; 1807 1800 } 1808 - pginfo->next_chunk = 1801 + pginfo->u.usr.next_chunk = 1809 1802 list_prepare_entry(prev_chunk, 1810 - (&(pginfo->region->chunk_list)), 1803 + (&(pginfo->u.usr.region->chunk_list)), 1811 1804 list); 1812 1805 } else if (pginfo->type == EHCA_MR_PGI_FMR) { 1813 1806 /* loop over desired page_list entries */ 1814 - fmrlist = pginfo->page_list + pginfo->next_listelem; 1807 + fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem; 1815 1808 for (i = 0; i < number; i++) { 1816 1809 *kpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) + 1817 - pginfo->next_4k * EHCA_PAGESIZE); 1810 + pginfo->next_hwpage * EHCA_PAGESIZE); 1818 1811 if ( !(*kpage) ) { 1819 1812 ehca_gen_err("*fmrlist=%lx fmrlist=%p " 1820 - "next_listelem=%lx next_4k=%lx", 1813 + "next_listelem=%lx next_hwpage=%lx", 1821 1814 *fmrlist, fmrlist, 1822 - pginfo->next_listelem, 1823 - pginfo->next_4k); 1815 + pginfo->u.fmr.next_listelem, 1816 + pginfo->next_hwpage); 1824 1817 ret = -EFAULT; 1825 1818 goto ehca_set_pagebuf_exit0; 1826 1819 } 1827 - (pginfo->page_4k_cnt)++; 1828 - (pginfo->next_4k)++; 1820 + (pginfo->hwpage_cnt)++; 1821 + (pginfo->next_hwpage)++; 1829 1822 kpage++; 1830 - if (pginfo->next_4k % 1823 + if (pginfo->next_hwpage % 1831 1824 (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) { 1832 - (pginfo->page_cnt)++; 1833 - (pginfo->next_listelem)++; 1825 + (pginfo->kpage_cnt)++; 1826 + (pginfo->u.fmr.next_listelem)++; 1834 1827 fmrlist++; 1835 - pginfo->next_4k = 0; 1828 + pginfo->next_hwpage = 0; 1836 1829 } 1837 1830 } 1838 1831 } else { ··· 1844 1835 1845 1836 ehca_set_pagebuf_exit0: 1846 1837 if (ret) 1847 - ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx " 1848 - "num_4k=%lx next_buf=%lx next_4k=%lx number=%x " 1849 - "kpage=%p page_cnt=%lx page_4k_cnt=%lx i=%x " 1838 + ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_kpages=%lx " 1839 + "num_hwpages=%lx next_buf=%lx next_hwpage=%lx number=%x " 1840 + "kpage=%p kpage_cnt=%lx hwpage_cnt=%lx i=%x " 1850 1841 "next_listelem=%lx region=%p next_chunk=%p " 1851 1842 "next_nmap=%lx", ret, e_mr, pginfo, pginfo->type, 1852 - pginfo->num_pages, pginfo->num_4k, 1853 - pginfo->next_buf, pginfo->next_4k, number, kpage, 1854 - pginfo->page_cnt, pginfo->page_4k_cnt, i, 1855 - pginfo->next_listelem, pginfo->region, 1856 - pginfo->next_chunk, pginfo->next_nmap); 1843 + pginfo->num_kpages, pginfo->num_hwpages, 1844 + pginfo->u.phy.next_buf, pginfo->next_hwpage, number, kpage, 1845 + pginfo->kpage_cnt, pginfo->hwpage_cnt, i, 1846 + pginfo->u.fmr.next_listelem, pginfo->u.usr.region, 1847 + pginfo->u.usr.next_chunk, pginfo->u.usr.next_nmap); 1857 1848 return ret; 1858 1849 } /* end ehca_set_pagebuf() */ 1859 1850 ··· 1869 1860 u64 *fmrlist; 1870 1861 struct ib_umem_chunk *chunk; 1871 1862 struct ib_umem_chunk *prev_chunk; 1872 - u64 pgaddr, num4k, offs4k; 1863 + u64 pgaddr, num_hw, offs_hw; 1873 1864 1874 1865 if (pginfo->type == EHCA_MR_PGI_PHYS) { 1875 1866 /* sanity check */ 1876 - if ((pginfo->page_cnt >= pginfo->num_pages) || 1877 - (pginfo->page_4k_cnt >= pginfo->num_4k)) { 1878 - ehca_gen_err("page_cnt >= num_pages, page_cnt=%lx " 1879 - "num_pages=%lx page_4k_cnt=%lx num_4k=%lx", 1880 - pginfo->page_cnt, pginfo->num_pages, 1881 - pginfo->page_4k_cnt, pginfo->num_4k); 1867 + if ((pginfo->kpage_cnt >= pginfo->num_kpages) || 1868 + (pginfo->hwpage_cnt >= pginfo->num_hwpages)) { 1869 + ehca_gen_err("kpage_cnt >= num_hwpages, kpage_cnt=%lx " 1870 + "num_hwpages=%lx hwpage_cnt=%lx num_hwpages=%lx", 1871 + pginfo->kpage_cnt, pginfo->num_kpages, 1872 + pginfo->hwpage_cnt, pginfo->num_hwpages); 1882 1873 ret = -EFAULT; 1883 1874 goto ehca_set_pagebuf_1_exit0; 1884 1875 } 1885 - tmp_pbuf = pginfo->phys_buf_array + pginfo->next_buf; 1886 - num4k = NUM_CHUNKS((tmp_pbuf->addr % EHCA_PAGESIZE) + 1887 - tmp_pbuf->size, EHCA_PAGESIZE); 1888 - offs4k = (tmp_pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE; 1876 + tmp_pbuf = pginfo->u.phy.phys_buf_array + pginfo->u.phy.next_buf; 1877 + num_hw = NUM_CHUNKS((tmp_pbuf->addr % EHCA_PAGESIZE) + 1878 + tmp_pbuf->size, EHCA_PAGESIZE); 1879 + offs_hw = (tmp_pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE; 1889 1880 *rpage = phys_to_abs((tmp_pbuf->addr & EHCA_PAGEMASK) + 1890 - (pginfo->next_4k * EHCA_PAGESIZE)); 1881 + (pginfo->next_hwpage * EHCA_PAGESIZE)); 1891 1882 if ( !(*rpage) && tmp_pbuf->addr ) { 1892 1883 ehca_gen_err("tmp_pbuf->addr=%lx" 1893 - " tmp_pbuf->size=%lx next_4k=%lx", 1884 + " tmp_pbuf->size=%lx next_hwpage=%lx", 1894 1885 tmp_pbuf->addr, tmp_pbuf->size, 1895 - pginfo->next_4k); 1886 + pginfo->next_hwpage); 1896 1887 ret = -EFAULT; 1897 1888 goto ehca_set_pagebuf_1_exit0; 1898 1889 } 1899 - (pginfo->page_4k_cnt)++; 1900 - (pginfo->next_4k)++; 1901 - if (pginfo->next_4k % (PAGE_SIZE / EHCA_PAGESIZE) == 0) 1902 - (pginfo->page_cnt)++; 1903 - if (pginfo->next_4k >= offs4k + num4k) { 1904 - (pginfo->next_buf)++; 1905 - pginfo->next_4k = 0; 1890 + (pginfo->hwpage_cnt)++; 1891 + (pginfo->next_hwpage)++; 1892 + if (pginfo->next_hwpage % (PAGE_SIZE / EHCA_PAGESIZE) == 0) 1893 + (pginfo->kpage_cnt)++; 1894 + if (pginfo->next_hwpage >= offs_hw + num_hw) { 1895 + (pginfo->u.phy.next_buf)++; 1896 + pginfo->next_hwpage = 0; 1906 1897 } 1907 1898 } else if (pginfo->type == EHCA_MR_PGI_USER) { 1908 - chunk = pginfo->next_chunk; 1909 - prev_chunk = pginfo->next_chunk; 1899 + chunk = pginfo->u.usr.next_chunk; 1900 + prev_chunk = pginfo->u.usr.next_chunk; 1910 1901 list_for_each_entry_continue(chunk, 1911 - (&(pginfo->region->chunk_list)), 1902 + (&(pginfo->u.usr.region->chunk_list)), 1912 1903 list) { 1913 1904 pgaddr = ( page_to_pfn(chunk->page_list[ 1914 - pginfo->next_nmap].page) 1905 + pginfo->u.usr.next_nmap].page) 1915 1906 << PAGE_SHIFT); 1916 1907 *rpage = phys_to_abs(pgaddr + 1917 - (pginfo->next_4k * EHCA_PAGESIZE)); 1908 + (pginfo->next_hwpage * EHCA_PAGESIZE)); 1918 1909 if ( !(*rpage) ) { 1919 1910 ehca_gen_err("pgaddr=%lx chunk->page_list[]=%lx" 1920 - " next_nmap=%lx next_4k=%lx mr=%p", 1911 + " next_nmap=%lx next_hwpage=%lx mr=%p", 1921 1912 pgaddr, (u64)sg_dma_address( 1922 1913 &chunk->page_list[ 1923 - pginfo-> 1914 + pginfo->u.usr. 1924 1915 next_nmap]), 1925 - pginfo->next_nmap, pginfo->next_4k, 1916 + pginfo->u.usr.next_nmap, pginfo->next_hwpage, 1926 1917 e_mr); 1927 1918 ret = -EFAULT; 1928 1919 goto ehca_set_pagebuf_1_exit0; 1929 1920 } 1930 - (pginfo->page_4k_cnt)++; 1931 - (pginfo->next_4k)++; 1932 - if (pginfo->next_4k % 1921 + (pginfo->hwpage_cnt)++; 1922 + (pginfo->next_hwpage)++; 1923 + if (pginfo->next_hwpage % 1933 1924 (PAGE_SIZE / EHCA_PAGESIZE) == 0) { 1934 - (pginfo->page_cnt)++; 1935 - (pginfo->next_nmap)++; 1936 - pginfo->next_4k = 0; 1925 + (pginfo->kpage_cnt)++; 1926 + (pginfo->u.usr.next_nmap)++; 1927 + pginfo->next_hwpage = 0; 1937 1928 } 1938 - if (pginfo->next_nmap >= chunk->nmap) { 1939 - pginfo->next_nmap = 0; 1929 + if (pginfo->u.usr.next_nmap >= chunk->nmap) { 1930 + pginfo->u.usr.next_nmap = 0; 1940 1931 prev_chunk = chunk; 1941 1932 } 1942 1933 break; 1943 1934 } 1944 - pginfo->next_chunk = 1935 + pginfo->u.usr.next_chunk = 1945 1936 list_prepare_entry(prev_chunk, 1946 - (&(pginfo->region->chunk_list)), 1937 + (&(pginfo->u.usr.region->chunk_list)), 1947 1938 list); 1948 1939 } else if (pginfo->type == EHCA_MR_PGI_FMR) { 1949 - fmrlist = pginfo->page_list + pginfo->next_listelem; 1940 + fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem; 1950 1941 *rpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) + 1951 - pginfo->next_4k * EHCA_PAGESIZE); 1942 + pginfo->next_hwpage * EHCA_PAGESIZE); 1952 1943 if ( !(*rpage) ) { 1953 1944 ehca_gen_err("*fmrlist=%lx fmrlist=%p " 1954 - "next_listelem=%lx next_4k=%lx", 1955 - *fmrlist, fmrlist, pginfo->next_listelem, 1956 - pginfo->next_4k); 1945 + "next_listelem=%lx next_hwpage=%lx", 1946 + *fmrlist, fmrlist, pginfo->u.fmr.next_listelem, 1947 + pginfo->next_hwpage); 1957 1948 ret = -EFAULT; 1958 1949 goto ehca_set_pagebuf_1_exit0; 1959 1950 } 1960 - (pginfo->page_4k_cnt)++; 1961 - (pginfo->next_4k)++; 1962 - if (pginfo->next_4k % 1951 + (pginfo->hwpage_cnt)++; 1952 + (pginfo->next_hwpage)++; 1953 + if (pginfo->next_hwpage % 1963 1954 (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) { 1964 - (pginfo->page_cnt)++; 1965 - (pginfo->next_listelem)++; 1966 - pginfo->next_4k = 0; 1955 + (pginfo->kpage_cnt)++; 1956 + (pginfo->u.fmr.next_listelem)++; 1957 + pginfo->next_hwpage = 0; 1967 1958 } 1968 1959 } else { 1969 1960 ehca_gen_err("bad pginfo->type=%x", pginfo->type); ··· 1973 1964 1974 1965 ehca_set_pagebuf_1_exit0: 1975 1966 if (ret) 1976 - ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx " 1977 - "num_4k=%lx next_buf=%lx next_4k=%lx rpage=%p " 1978 - "page_cnt=%lx page_4k_cnt=%lx next_listelem=%lx " 1967 + ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_kpages=%lx " 1968 + "num_hwpages=%lx next_buf=%lx next_hwpage=%lx rpage=%p " 1969 + "kpage_cnt=%lx hwpage_cnt=%lx next_listelem=%lx " 1979 1970 "region=%p next_chunk=%p next_nmap=%lx", ret, e_mr, 1980 - pginfo, pginfo->type, pginfo->num_pages, 1981 - pginfo->num_4k, pginfo->next_buf, pginfo->next_4k, 1982 - rpage, pginfo->page_cnt, pginfo->page_4k_cnt, 1983 - pginfo->next_listelem, pginfo->region, 1984 - pginfo->next_chunk, pginfo->next_nmap); 1971 + pginfo, pginfo->type, pginfo->num_kpages, 1972 + pginfo->num_hwpages, pginfo->u.phy.next_buf, pginfo->next_hwpage, 1973 + rpage, pginfo->kpage_cnt, pginfo->hwpage_cnt, 1974 + pginfo->u.fmr.next_listelem, pginfo->u.usr.region, 1975 + pginfo->u.usr.next_chunk, pginfo->u.usr.next_nmap); 1985 1976 return ret; 1986 1977 } /* end ehca_set_pagebuf_1() */ 1987 1978 ··· 2062 2053 */ 2063 2054 void ehca_mr_deletenew(struct ehca_mr *mr) 2064 2055 { 2065 - mr->flags = 0; 2066 - mr->num_pages = 0; 2067 - mr->num_4k = 0; 2068 - mr->acl = 0; 2069 - mr->start = NULL; 2056 + mr->flags = 0; 2057 + mr->num_kpages = 0; 2058 + mr->num_hwpages = 0; 2059 + mr->acl = 0; 2060 + mr->start = NULL; 2070 2061 mr->fmr_page_size = 0; 2071 2062 mr->fmr_max_pages = 0; 2072 - mr->fmr_max_maps = 0; 2073 - mr->fmr_map_cnt = 0; 2063 + mr->fmr_max_maps = 0; 2064 + mr->fmr_map_cnt = 0; 2074 2065 memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle)); 2075 2066 memset(&mr->galpas, 0, sizeof(mr->galpas)); 2076 - mr->nr_of_pages = 0; 2077 - mr->pagearray = NULL; 2078 2067 } /* end ehca_mr_deletenew() */ 2079 2068 2080 2069 int ehca_init_mrmw_cache(void)