Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
RDMA: Add __init/__exit macros to addr.c and cma.c
IB/ehca: Bump version number
mlx4_core: Fix dma_sync_single_for_cpu() with matching for_device() calls
IB/mthca: Replace dma_sync_single() use with proper functions
RDMA/nes: Fix FIN state handling under error conditions
RDMA/nes: Fix max_qp_init_rd_atom returned from query device
IB/ehca: Ensure that guid_entry index is not negative
IB/ehca: Tolerate dynamic memory operations before driver load

+554 -34
+2 -2
drivers/infiniband/core/addr.c
··· 514 514 .notifier_call = netevent_callback 515 515 }; 516 516 517 - static int addr_init(void) 517 + static int __init addr_init(void) 518 518 { 519 519 addr_wq = create_singlethread_workqueue("ib_addr"); 520 520 if (!addr_wq) ··· 524 524 return 0; 525 525 } 526 526 527 - static void addr_cleanup(void) 527 + static void __exit addr_cleanup(void) 528 528 { 529 529 unregister_netevent_notifier(&nb); 530 530 destroy_workqueue(addr_wq);
+2 -2
drivers/infiniband/core/cma.c
··· 2960 2960 kfree(cma_dev); 2961 2961 } 2962 2962 2963 - static int cma_init(void) 2963 + static int __init cma_init(void) 2964 2964 { 2965 2965 int ret, low, high, remaining; 2966 2966 ··· 2990 2990 return ret; 2991 2991 } 2992 2992 2993 - static void cma_cleanup(void) 2993 + static void __exit cma_cleanup(void) 2994 2994 { 2995 2995 ib_unregister_client(&cma_client); 2996 2996 unregister_netdevice_notifier(&cma_nb);
+1 -1
drivers/infiniband/hw/ehca/ehca_hca.c
··· 319 319 ib_device); 320 320 struct hipz_query_port *rblock; 321 321 322 - if (index > 255) { 322 + if (index < 0 || index > 255) { 323 323 ehca_err(&shca->ib_device, "Invalid index: %x.", index); 324 324 return -EINVAL; 325 325 }
+16 -4
drivers/infiniband/hw/ehca/ehca_main.c
··· 52 52 #include "ehca_tools.h" 53 53 #include "hcp_if.h" 54 54 55 - #define HCAD_VERSION "0027" 55 + #define HCAD_VERSION "0028" 56 56 57 57 MODULE_LICENSE("Dual BSD/GPL"); 58 58 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); ··· 506 506 shca->ib_device.detach_mcast = ehca_detach_mcast; 507 507 shca->ib_device.process_mad = ehca_process_mad; 508 508 shca->ib_device.mmap = ehca_mmap; 509 + shca->ib_device.dma_ops = &ehca_dma_mapping_ops; 509 510 510 511 if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) { 511 512 shca->ib_device.uverbs_cmd_mask |= ··· 1029 1028 goto module_init1; 1030 1029 } 1031 1030 1031 + ret = ehca_create_busmap(); 1032 + if (ret) { 1033 + ehca_gen_err("Cannot create busmap."); 1034 + goto module_init2; 1035 + } 1036 + 1032 1037 ret = ibmebus_register_driver(&ehca_driver); 1033 1038 if (ret) { 1034 1039 ehca_gen_err("Cannot register eHCA device driver"); 1035 1040 ret = -EINVAL; 1036 - goto module_init2; 1041 + goto module_init3; 1037 1042 } 1038 1043 1039 1044 ret = register_memory_notifier(&ehca_mem_nb); 1040 1045 if (ret) { 1041 1046 ehca_gen_err("Failed registering memory add/remove notifier"); 1042 - goto module_init3; 1047 + goto module_init4; 1043 1048 } 1044 1049 1045 1050 if (ehca_poll_all_eqs != 1) { ··· 1060 1053 1061 1054 return 0; 1062 1055 1063 - module_init3: 1056 + module_init4: 1064 1057 ibmebus_unregister_driver(&ehca_driver); 1058 + 1059 + module_init3: 1060 + ehca_destroy_busmap(); 1065 1061 1066 1062 module_init2: 1067 1063 ehca_destroy_slab_caches(); ··· 1082 1072 ibmebus_unregister_driver(&ehca_driver); 1083 1073 1084 1074 unregister_memory_notifier(&ehca_mem_nb); 1075 + 1076 + ehca_destroy_busmap(); 1085 1077 1086 1078 ehca_destroy_slab_caches(); 1087 1079
+495 -13
drivers/infiniband/hw/ehca/ehca_mrmw.c
··· 53 53 /* max number of rpages (per hcall register_rpages) */ 54 54 #define MAX_RPAGES 512 55 55 56 + /* DMEM toleration management */ 57 + #define EHCA_SECTSHIFT SECTION_SIZE_BITS 58 + #define EHCA_SECTSIZE (1UL << EHCA_SECTSHIFT) 59 + #define EHCA_HUGEPAGESHIFT 34 60 + #define EHCA_HUGEPAGE_SIZE (1UL << EHCA_HUGEPAGESHIFT) 61 + #define EHCA_HUGEPAGE_PFN_MASK ((EHCA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT) 62 + #define EHCA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL 63 + #define EHCA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */ 64 + #define EHCA_TOP_INDEX_SHIFT (EHCA_DIR_INDEX_SHIFT * 2) 65 + #define EHCA_MAP_ENTRIES (1 << EHCA_DIR_INDEX_SHIFT) 66 + #define EHCA_TOP_MAP_SIZE (0x10000) /* currently fixed map size */ 67 + #define EHCA_DIR_MAP_SIZE (0x10000) 68 + #define EHCA_ENT_MAP_SIZE (0x10000) 69 + #define EHCA_INDEX_MASK (EHCA_MAP_ENTRIES - 1) 70 + 71 + static unsigned long ehca_mr_len; 72 + 73 + /* 74 + * Memory map data structures 75 + */ 76 + struct ehca_dir_bmap { 77 + u64 ent[EHCA_MAP_ENTRIES]; 78 + }; 79 + struct ehca_top_bmap { 80 + struct ehca_dir_bmap *dir[EHCA_MAP_ENTRIES]; 81 + }; 82 + struct ehca_bmap { 83 + struct ehca_top_bmap *top[EHCA_MAP_ENTRIES]; 84 + }; 85 + 86 + static struct ehca_bmap *ehca_bmap; 87 + 56 88 static struct kmem_cache *mr_cache; 57 89 static struct kmem_cache *mw_cache; 58 90 ··· 99 67 #define EHCA_MR_PGSHIFT64K 16 100 68 #define EHCA_MR_PGSHIFT1M 20 101 69 #define EHCA_MR_PGSHIFT16M 24 70 + 71 + static u64 ehca_map_vaddr(void *caddr); 102 72 103 73 static u32 ehca_encode_hwpage_size(u32 pgsize) 104 74 { ··· 169 135 goto get_dma_mr_exit0; 170 136 } 171 137 172 - ret = ehca_reg_maxmr(shca, e_maxmr, (u64 *)KERNELBASE, 138 + ret = ehca_reg_maxmr(shca, e_maxmr, 139 + (void *)ehca_map_vaddr((void *)KERNELBASE), 173 140 mr_access_flags, e_pd, 174 141 &e_maxmr->ib.ib_mr.lkey, 175 142 &e_maxmr->ib.ib_mr.rkey); ··· 286 251 287 252 ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags, 288 253 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, 289 - &e_mr->ib.ib_mr.rkey); 254 + &e_mr->ib.ib_mr.rkey, EHCA_REG_MR); 290 255 if (ret) { 291 256 ib_mr = ERR_PTR(ret); 292 257 goto reg_phys_mr_exit1; ··· 405 370 406 371 ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags, 407 372 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, 408 - &e_mr->ib.ib_mr.rkey); 373 + &e_mr->ib.ib_mr.rkey, EHCA_REG_MR); 409 374 if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) { 410 375 ehca_warn(pd->device, "failed to register mr " 411 376 "with hwpage_size=%llx", hwpage_size); ··· 829 794 ret = ehca_reg_mr(shca, e_fmr, NULL, 830 795 fmr_attr->max_pages * (1 << fmr_attr->page_shift), 831 796 mr_access_flags, e_pd, &pginfo, 832 - &tmp_lkey, &tmp_rkey); 797 + &tmp_lkey, &tmp_rkey, EHCA_REG_MR); 833 798 if (ret) { 834 799 ib_fmr = ERR_PTR(ret); 835 800 goto alloc_fmr_exit1; ··· 1018 983 1019 984 /*----------------------------------------------------------------------*/ 1020 985 986 + static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca, 987 + struct ehca_mr *e_mr, 988 + struct ehca_mr_pginfo *pginfo); 989 + 1021 990 int ehca_reg_mr(struct ehca_shca *shca, 1022 991 struct ehca_mr *e_mr, 1023 992 u64 *iova_start, ··· 1030 991 struct ehca_pd *e_pd, 1031 992 struct ehca_mr_pginfo *pginfo, 1032 993 u32 *lkey, /*OUT*/ 1033 - u32 *rkey) /*OUT*/ 994 + u32 *rkey, /*OUT*/ 995 + enum ehca_reg_type reg_type) 1034 996 { 1035 997 int ret; 1036 998 u64 h_ret; ··· 1055 1015 1056 1016 e_mr->ipz_mr_handle = hipzout.handle; 1057 1017 1058 - ret = ehca_reg_mr_rpages(shca, e_mr, pginfo); 1018 + if (reg_type == EHCA_REG_BUSMAP_MR) 1019 + ret = ehca_reg_bmap_mr_rpages(shca, e_mr, pginfo); 1020 + else if (reg_type == EHCA_REG_MR) 1021 + ret = ehca_reg_mr_rpages(shca, e_mr, pginfo); 1022 + else 1023 + ret = -EINVAL; 1024 + 1059 1025 if (ret) 1060 1026 goto ehca_reg_mr_exit1; 1061 1027 ··· 1362 1316 e_mr->fmr_map_cnt = save_mr.fmr_map_cnt; 1363 1317 1364 1318 ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl, 1365 - e_pd, pginfo, lkey, rkey); 1319 + e_pd, pginfo, lkey, rkey, EHCA_REG_MR); 1366 1320 if (ret) { 1367 1321 u32 offset = (u64)(&e_mr->flags) - (u64)e_mr; 1368 1322 memcpy(&e_mr->flags, &(save_mr.flags), ··· 1455 1409 ret = ehca_reg_mr(shca, e_fmr, NULL, 1456 1410 (e_fmr->fmr_max_pages * e_fmr->fmr_page_size), 1457 1411 e_fmr->acl, e_pd, &pginfo, &tmp_lkey, 1458 - &tmp_rkey); 1412 + &tmp_rkey, EHCA_REG_MR); 1459 1413 if (ret) { 1460 1414 u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr; 1461 1415 memcpy(&e_fmr->flags, &(save_mr.flags), ··· 1524 1478 } /* end ehca_reg_smr() */ 1525 1479 1526 1480 /*----------------------------------------------------------------------*/ 1481 + static inline void *ehca_calc_sectbase(int top, int dir, int idx) 1482 + { 1483 + unsigned long ret = idx; 1484 + ret |= dir << EHCA_DIR_INDEX_SHIFT; 1485 + ret |= top << EHCA_TOP_INDEX_SHIFT; 1486 + return abs_to_virt(ret << SECTION_SIZE_BITS); 1487 + } 1488 + 1489 + #define ehca_bmap_valid(entry) \ 1490 + ((u64)entry != (u64)EHCA_INVAL_ADDR) 1491 + 1492 + static u64 ehca_reg_mr_section(int top, int dir, int idx, u64 *kpage, 1493 + struct ehca_shca *shca, struct ehca_mr *mr, 1494 + struct ehca_mr_pginfo *pginfo) 1495 + { 1496 + u64 h_ret = 0; 1497 + unsigned long page = 0; 1498 + u64 rpage = virt_to_abs(kpage); 1499 + int page_count; 1500 + 1501 + void *sectbase = ehca_calc_sectbase(top, dir, idx); 1502 + if ((unsigned long)sectbase & (pginfo->hwpage_size - 1)) { 1503 + ehca_err(&shca->ib_device, "reg_mr_section will probably fail:" 1504 + "hwpage_size does not fit to " 1505 + "section start address"); 1506 + } 1507 + page_count = EHCA_SECTSIZE / pginfo->hwpage_size; 1508 + 1509 + while (page < page_count) { 1510 + u64 rnum; 1511 + for (rnum = 0; (rnum < MAX_RPAGES) && (page < page_count); 1512 + rnum++) { 1513 + void *pg = sectbase + ((page++) * pginfo->hwpage_size); 1514 + kpage[rnum] = virt_to_abs(pg); 1515 + } 1516 + 1517 + h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, mr, 1518 + ehca_encode_hwpage_size(pginfo->hwpage_size), 1519 + 0, rpage, rnum); 1520 + 1521 + if ((h_ret != H_SUCCESS) && (h_ret != H_PAGE_REGISTERED)) { 1522 + ehca_err(&shca->ib_device, "register_rpage_mr failed"); 1523 + return h_ret; 1524 + } 1525 + } 1526 + return h_ret; 1527 + } 1528 + 1529 + static u64 ehca_reg_mr_sections(int top, int dir, u64 *kpage, 1530 + struct ehca_shca *shca, struct ehca_mr *mr, 1531 + struct ehca_mr_pginfo *pginfo) 1532 + { 1533 + u64 hret = H_SUCCESS; 1534 + int idx; 1535 + 1536 + for (idx = 0; idx < EHCA_MAP_ENTRIES; idx++) { 1537 + if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]->ent[idx])) 1538 + continue; 1539 + 1540 + hret = ehca_reg_mr_section(top, dir, idx, kpage, shca, mr, 1541 + pginfo); 1542 + if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) 1543 + return hret; 1544 + } 1545 + return hret; 1546 + } 1547 + 1548 + static u64 ehca_reg_mr_dir_sections(int top, u64 *kpage, struct ehca_shca *shca, 1549 + struct ehca_mr *mr, 1550 + struct ehca_mr_pginfo *pginfo) 1551 + { 1552 + u64 hret = H_SUCCESS; 1553 + int dir; 1554 + 1555 + for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) { 1556 + if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir])) 1557 + continue; 1558 + 1559 + hret = ehca_reg_mr_sections(top, dir, kpage, shca, mr, pginfo); 1560 + if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) 1561 + return hret; 1562 + } 1563 + return hret; 1564 + } 1527 1565 1528 1566 /* register internal max-MR to internal SHCA */ 1529 1567 int ehca_reg_internal_maxmr( ··· 1625 1495 u32 num_hwpages; 1626 1496 u64 hw_pgsize; 1627 1497 1498 + if (!ehca_bmap) { 1499 + ret = -EFAULT; 1500 + goto ehca_reg_internal_maxmr_exit0; 1501 + } 1502 + 1628 1503 e_mr = ehca_mr_new(); 1629 1504 if (!e_mr) { 1630 1505 ehca_err(&shca->ib_device, "out of memory"); ··· 1639 1504 e_mr->flags |= EHCA_MR_FLAG_MAXMR; 1640 1505 1641 1506 /* register internal max-MR on HCA */ 1642 - size_maxmr = (u64)high_memory - PAGE_OFFSET; 1643 - iova_start = (u64 *)KERNELBASE; 1507 + size_maxmr = ehca_mr_len; 1508 + iova_start = (u64 *)ehca_map_vaddr((void *)KERNELBASE); 1644 1509 ib_pbuf.addr = 0; 1645 1510 ib_pbuf.size = size_maxmr; 1646 1511 num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr, ··· 1659 1524 1660 1525 ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd, 1661 1526 &pginfo, &e_mr->ib.ib_mr.lkey, 1662 - &e_mr->ib.ib_mr.rkey); 1527 + &e_mr->ib.ib_mr.rkey, EHCA_REG_BUSMAP_MR); 1663 1528 if (ret) { 1664 1529 ehca_err(&shca->ib_device, "reg of internal max MR failed, " 1665 1530 "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x " ··· 2212 2077 u64 *iova_start) 2213 2078 { 2214 2079 /* a MR is treated as max-MR only if it fits following: */ 2215 - if ((size == ((u64)high_memory - PAGE_OFFSET)) && 2216 - (iova_start == (void *)KERNELBASE)) { 2080 + if ((size == ehca_mr_len) && 2081 + (iova_start == (void *)ehca_map_vaddr((void *)KERNELBASE))) { 2217 2082 ehca_gen_dbg("this is a max-MR"); 2218 2083 return 1; 2219 2084 } else ··· 2319 2184 if (mw_cache) 2320 2185 kmem_cache_destroy(mw_cache); 2321 2186 } 2187 + 2188 + static inline int ehca_init_top_bmap(struct ehca_top_bmap *ehca_top_bmap, 2189 + int dir) 2190 + { 2191 + if (!ehca_bmap_valid(ehca_top_bmap->dir[dir])) { 2192 + ehca_top_bmap->dir[dir] = 2193 + kmalloc(sizeof(struct ehca_dir_bmap), GFP_KERNEL); 2194 + if (!ehca_top_bmap->dir[dir]) 2195 + return -ENOMEM; 2196 + /* Set map block to 0xFF according to EHCA_INVAL_ADDR */ 2197 + memset(ehca_top_bmap->dir[dir], 0xFF, EHCA_ENT_MAP_SIZE); 2198 + } 2199 + return 0; 2200 + } 2201 + 2202 + static inline int ehca_init_bmap(struct ehca_bmap *ehca_bmap, int top, int dir) 2203 + { 2204 + if (!ehca_bmap_valid(ehca_bmap->top[top])) { 2205 + ehca_bmap->top[top] = 2206 + kmalloc(sizeof(struct ehca_top_bmap), GFP_KERNEL); 2207 + if (!ehca_bmap->top[top]) 2208 + return -ENOMEM; 2209 + /* Set map block to 0xFF according to EHCA_INVAL_ADDR */ 2210 + memset(ehca_bmap->top[top], 0xFF, EHCA_DIR_MAP_SIZE); 2211 + } 2212 + return ehca_init_top_bmap(ehca_bmap->top[top], dir); 2213 + } 2214 + 2215 + static inline int ehca_calc_index(unsigned long i, unsigned long s) 2216 + { 2217 + return (i >> s) & EHCA_INDEX_MASK; 2218 + } 2219 + 2220 + void ehca_destroy_busmap(void) 2221 + { 2222 + int top, dir; 2223 + 2224 + if (!ehca_bmap) 2225 + return; 2226 + 2227 + for (top = 0; top < EHCA_MAP_ENTRIES; top++) { 2228 + if (!ehca_bmap_valid(ehca_bmap->top[top])) 2229 + continue; 2230 + for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) { 2231 + if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir])) 2232 + continue; 2233 + 2234 + kfree(ehca_bmap->top[top]->dir[dir]); 2235 + } 2236 + 2237 + kfree(ehca_bmap->top[top]); 2238 + } 2239 + 2240 + kfree(ehca_bmap); 2241 + ehca_bmap = NULL; 2242 + } 2243 + 2244 + static int ehca_update_busmap(unsigned long pfn, unsigned long nr_pages) 2245 + { 2246 + unsigned long i, start_section, end_section; 2247 + int top, dir, idx; 2248 + 2249 + if (!nr_pages) 2250 + return 0; 2251 + 2252 + if (!ehca_bmap) { 2253 + ehca_bmap = kmalloc(sizeof(struct ehca_bmap), GFP_KERNEL); 2254 + if (!ehca_bmap) 2255 + return -ENOMEM; 2256 + /* Set map block to 0xFF according to EHCA_INVAL_ADDR */ 2257 + memset(ehca_bmap, 0xFF, EHCA_TOP_MAP_SIZE); 2258 + } 2259 + 2260 + start_section = phys_to_abs(pfn * PAGE_SIZE) / EHCA_SECTSIZE; 2261 + end_section = phys_to_abs((pfn + nr_pages) * PAGE_SIZE) / EHCA_SECTSIZE; 2262 + for (i = start_section; i < end_section; i++) { 2263 + int ret; 2264 + top = ehca_calc_index(i, EHCA_TOP_INDEX_SHIFT); 2265 + dir = ehca_calc_index(i, EHCA_DIR_INDEX_SHIFT); 2266 + idx = i & EHCA_INDEX_MASK; 2267 + 2268 + ret = ehca_init_bmap(ehca_bmap, top, dir); 2269 + if (ret) { 2270 + ehca_destroy_busmap(); 2271 + return ret; 2272 + } 2273 + ehca_bmap->top[top]->dir[dir]->ent[idx] = ehca_mr_len; 2274 + ehca_mr_len += EHCA_SECTSIZE; 2275 + } 2276 + return 0; 2277 + } 2278 + 2279 + static int ehca_is_hugepage(unsigned long pfn) 2280 + { 2281 + int page_order; 2282 + 2283 + if (pfn & EHCA_HUGEPAGE_PFN_MASK) 2284 + return 0; 2285 + 2286 + page_order = compound_order(pfn_to_page(pfn)); 2287 + if (page_order + PAGE_SHIFT != EHCA_HUGEPAGESHIFT) 2288 + return 0; 2289 + 2290 + return 1; 2291 + } 2292 + 2293 + static int ehca_create_busmap_callback(unsigned long initial_pfn, 2294 + unsigned long total_nr_pages, void *arg) 2295 + { 2296 + int ret; 2297 + unsigned long pfn, start_pfn, end_pfn, nr_pages; 2298 + 2299 + if ((total_nr_pages * PAGE_SIZE) < EHCA_HUGEPAGE_SIZE) 2300 + return ehca_update_busmap(initial_pfn, total_nr_pages); 2301 + 2302 + /* Given chunk is >= 16GB -> check for hugepages */ 2303 + start_pfn = initial_pfn; 2304 + end_pfn = initial_pfn + total_nr_pages; 2305 + pfn = start_pfn; 2306 + 2307 + while (pfn < end_pfn) { 2308 + if (ehca_is_hugepage(pfn)) { 2309 + /* Add mem found in front of the hugepage */ 2310 + nr_pages = pfn - start_pfn; 2311 + ret = ehca_update_busmap(start_pfn, nr_pages); 2312 + if (ret) 2313 + return ret; 2314 + /* Skip the hugepage */ 2315 + pfn += (EHCA_HUGEPAGE_SIZE / PAGE_SIZE); 2316 + start_pfn = pfn; 2317 + } else 2318 + pfn += (EHCA_SECTSIZE / PAGE_SIZE); 2319 + } 2320 + 2321 + /* Add mem found behind the hugepage(s) */ 2322 + nr_pages = pfn - start_pfn; 2323 + return ehca_update_busmap(start_pfn, nr_pages); 2324 + } 2325 + 2326 + int ehca_create_busmap(void) 2327 + { 2328 + int ret; 2329 + 2330 + ehca_mr_len = 0; 2331 + ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, NULL, 2332 + ehca_create_busmap_callback); 2333 + return ret; 2334 + } 2335 + 2336 + static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca, 2337 + struct ehca_mr *e_mr, 2338 + struct ehca_mr_pginfo *pginfo) 2339 + { 2340 + int top; 2341 + u64 hret, *kpage; 2342 + 2343 + kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL); 2344 + if (!kpage) { 2345 + ehca_err(&shca->ib_device, "kpage alloc failed"); 2346 + return -ENOMEM; 2347 + } 2348 + for (top = 0; top < EHCA_MAP_ENTRIES; top++) { 2349 + if (!ehca_bmap_valid(ehca_bmap->top[top])) 2350 + continue; 2351 + hret = ehca_reg_mr_dir_sections(top, kpage, shca, e_mr, pginfo); 2352 + if ((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS)) 2353 + break; 2354 + } 2355 + 2356 + ehca_free_fw_ctrlblock(kpage); 2357 + 2358 + if (hret == H_SUCCESS) 2359 + return 0; /* Everything is fine */ 2360 + else { 2361 + ehca_err(&shca->ib_device, "ehca_reg_bmap_mr_rpages failed, " 2362 + "h_ret=%lli e_mr=%p top=%x lkey=%x " 2363 + "hca_hndl=%llx mr_hndl=%llx", hret, e_mr, top, 2364 + e_mr->ib.ib_mr.lkey, 2365 + shca->ipz_hca_handle.handle, 2366 + e_mr->ipz_mr_handle.handle); 2367 + return ehca2ib_return_code(hret); 2368 + } 2369 + } 2370 + 2371 + static u64 ehca_map_vaddr(void *caddr) 2372 + { 2373 + int top, dir, idx; 2374 + unsigned long abs_addr, offset; 2375 + u64 entry; 2376 + 2377 + if (!ehca_bmap) 2378 + return EHCA_INVAL_ADDR; 2379 + 2380 + abs_addr = virt_to_abs(caddr); 2381 + top = ehca_calc_index(abs_addr, EHCA_TOP_INDEX_SHIFT + EHCA_SECTSHIFT); 2382 + if (!ehca_bmap_valid(ehca_bmap->top[top])) 2383 + return EHCA_INVAL_ADDR; 2384 + 2385 + dir = ehca_calc_index(abs_addr, EHCA_DIR_INDEX_SHIFT + EHCA_SECTSHIFT); 2386 + if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir])) 2387 + return EHCA_INVAL_ADDR; 2388 + 2389 + idx = ehca_calc_index(abs_addr, EHCA_SECTSHIFT); 2390 + 2391 + entry = ehca_bmap->top[top]->dir[dir]->ent[idx]; 2392 + if (ehca_bmap_valid(entry)) { 2393 + offset = (unsigned long)caddr & (EHCA_SECTSIZE - 1); 2394 + return entry | offset; 2395 + } else 2396 + return EHCA_INVAL_ADDR; 2397 + } 2398 + 2399 + static int ehca_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 2400 + { 2401 + return dma_addr == EHCA_INVAL_ADDR; 2402 + } 2403 + 2404 + static u64 ehca_dma_map_single(struct ib_device *dev, void *cpu_addr, 2405 + size_t size, enum dma_data_direction direction) 2406 + { 2407 + if (cpu_addr) 2408 + return ehca_map_vaddr(cpu_addr); 2409 + else 2410 + return EHCA_INVAL_ADDR; 2411 + } 2412 + 2413 + static void ehca_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, 2414 + enum dma_data_direction direction) 2415 + { 2416 + /* This is only a stub; nothing to be done here */ 2417 + } 2418 + 2419 + static u64 ehca_dma_map_page(struct ib_device *dev, struct page *page, 2420 + unsigned long offset, size_t size, 2421 + enum dma_data_direction direction) 2422 + { 2423 + u64 addr; 2424 + 2425 + if (offset + size > PAGE_SIZE) 2426 + return EHCA_INVAL_ADDR; 2427 + 2428 + addr = ehca_map_vaddr(page_address(page)); 2429 + if (!ehca_dma_mapping_error(dev, addr)) 2430 + addr += offset; 2431 + 2432 + return addr; 2433 + } 2434 + 2435 + static void ehca_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size, 2436 + enum dma_data_direction direction) 2437 + { 2438 + /* This is only a stub; nothing to be done here */ 2439 + } 2440 + 2441 + static int ehca_dma_map_sg(struct ib_device *dev, struct scatterlist *sgl, 2442 + int nents, enum dma_data_direction direction) 2443 + { 2444 + struct scatterlist *sg; 2445 + int i; 2446 + 2447 + for_each_sg(sgl, sg, nents, i) { 2448 + u64 addr; 2449 + addr = ehca_map_vaddr(sg_virt(sg)); 2450 + if (ehca_dma_mapping_error(dev, addr)) 2451 + return 0; 2452 + 2453 + sg->dma_address = addr; 2454 + sg->dma_length = sg->length; 2455 + } 2456 + return nents; 2457 + } 2458 + 2459 + static void ehca_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg, 2460 + int nents, enum dma_data_direction direction) 2461 + { 2462 + /* This is only a stub; nothing to be done here */ 2463 + } 2464 + 2465 + static u64 ehca_dma_address(struct ib_device *dev, struct scatterlist *sg) 2466 + { 2467 + return sg->dma_address; 2468 + } 2469 + 2470 + static unsigned int ehca_dma_len(struct ib_device *dev, struct scatterlist *sg) 2471 + { 2472 + return sg->length; 2473 + } 2474 + 2475 + static void ehca_dma_sync_single_for_cpu(struct ib_device *dev, u64 addr, 2476 + size_t size, 2477 + enum dma_data_direction dir) 2478 + { 2479 + dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); 2480 + } 2481 + 2482 + static void ehca_dma_sync_single_for_device(struct ib_device *dev, u64 addr, 2483 + size_t size, 2484 + enum dma_data_direction dir) 2485 + { 2486 + dma_sync_single_for_device(dev->dma_device, addr, size, dir); 2487 + } 2488 + 2489 + static void *ehca_dma_alloc_coherent(struct ib_device *dev, size_t size, 2490 + u64 *dma_handle, gfp_t flag) 2491 + { 2492 + struct page *p; 2493 + void *addr = NULL; 2494 + u64 dma_addr; 2495 + 2496 + p = alloc_pages(flag, get_order(size)); 2497 + if (p) { 2498 + addr = page_address(p); 2499 + dma_addr = ehca_map_vaddr(addr); 2500 + if (ehca_dma_mapping_error(dev, dma_addr)) { 2501 + free_pages((unsigned long)addr, get_order(size)); 2502 + return NULL; 2503 + } 2504 + if (dma_handle) 2505 + *dma_handle = dma_addr; 2506 + return addr; 2507 + } 2508 + return NULL; 2509 + } 2510 + 2511 + static void ehca_dma_free_coherent(struct ib_device *dev, size_t size, 2512 + void *cpu_addr, u64 dma_handle) 2513 + { 2514 + if (cpu_addr && size) 2515 + free_pages((unsigned long)cpu_addr, get_order(size)); 2516 + } 2517 + 2518 + 2519 + struct ib_dma_mapping_ops ehca_dma_mapping_ops = { 2520 + .mapping_error = ehca_dma_mapping_error, 2521 + .map_single = ehca_dma_map_single, 2522 + .unmap_single = ehca_dma_unmap_single, 2523 + .map_page = ehca_dma_map_page, 2524 + .unmap_page = ehca_dma_unmap_page, 2525 + .map_sg = ehca_dma_map_sg, 2526 + .unmap_sg = ehca_dma_unmap_sg, 2527 + .dma_address = ehca_dma_address, 2528 + .dma_len = ehca_dma_len, 2529 + .sync_single_for_cpu = ehca_dma_sync_single_for_cpu, 2530 + .sync_single_for_device = ehca_dma_sync_single_for_device, 2531 + .alloc_coherent = ehca_dma_alloc_coherent, 2532 + .free_coherent = ehca_dma_free_coherent, 2533 + };
+12 -1
drivers/infiniband/hw/ehca/ehca_mrmw.h
··· 42 42 #ifndef _EHCA_MRMW_H_ 43 43 #define _EHCA_MRMW_H_ 44 44 45 + enum ehca_reg_type { 46 + EHCA_REG_MR, 47 + EHCA_REG_BUSMAP_MR 48 + }; 49 + 45 50 int ehca_reg_mr(struct ehca_shca *shca, 46 51 struct ehca_mr *e_mr, 47 52 u64 *iova_start, ··· 55 50 struct ehca_pd *e_pd, 56 51 struct ehca_mr_pginfo *pginfo, 57 52 u32 *lkey, 58 - u32 *rkey); 53 + u32 *rkey, 54 + enum ehca_reg_type reg_type); 59 55 60 56 int ehca_reg_mr_rpages(struct ehca_shca *shca, 61 57 struct ehca_mr *e_mr, ··· 124 118 125 119 void ehca_mr_deletenew(struct ehca_mr *mr); 126 120 121 + int ehca_create_busmap(void); 122 + 123 + void ehca_destroy_busmap(void); 124 + 125 + extern struct ib_dma_mapping_ops ehca_dma_mapping_ops; 127 126 #endif /*_EHCA_MRMW_H_*/
+10 -3
drivers/infiniband/hw/mthca/mthca_mr.c
··· 352 352 353 353 BUG_ON(!mtts); 354 354 355 + dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, 356 + list_len * sizeof (u64), DMA_TO_DEVICE); 357 + 355 358 for (i = 0; i < list_len; ++i) 356 359 mtts[i] = cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT); 357 360 358 - dma_sync_single(&dev->pdev->dev, dma_handle, list_len * sizeof (u64), DMA_TO_DEVICE); 361 + dma_sync_single_for_device(&dev->pdev->dev, dma_handle, 362 + list_len * sizeof (u64), DMA_TO_DEVICE); 359 363 } 360 364 361 365 int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, ··· 807 803 808 804 wmb(); 809 805 806 + dma_sync_single_for_cpu(&dev->pdev->dev, fmr->mem.arbel.dma_handle, 807 + list_len * sizeof(u64), DMA_TO_DEVICE); 808 + 810 809 for (i = 0; i < list_len; ++i) 811 810 fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] | 812 811 MTHCA_MTT_FLAG_PRESENT); 813 812 814 - dma_sync_single(&dev->pdev->dev, fmr->mem.arbel.dma_handle, 815 - list_len * sizeof(u64), DMA_TO_DEVICE); 813 + dma_sync_single_for_device(&dev->pdev->dev, fmr->mem.arbel.dma_handle, 814 + list_len * sizeof(u64), DMA_TO_DEVICE); 816 815 817 816 fmr->mem.arbel.mpt->key = cpu_to_be32(key); 818 817 fmr->mem.arbel.mpt->lkey = cpu_to_be32(key);
+5 -3
drivers/infiniband/hw/nes/nes_cm.c
··· 472 472 473 473 static void nes_retrans_expired(struct nes_cm_node *cm_node) 474 474 { 475 + struct iw_cm_id *cm_id = cm_node->cm_id; 475 476 switch (cm_node->state) { 476 477 case NES_CM_STATE_SYN_RCVD: 477 478 case NES_CM_STATE_CLOSING: ··· 480 479 break; 481 480 case NES_CM_STATE_LAST_ACK: 482 481 case NES_CM_STATE_FIN_WAIT1: 483 - case NES_CM_STATE_MPAREJ_RCVD: 482 + if (cm_node->cm_id) 483 + cm_id->rem_ref(cm_id); 484 + cm_node->state = NES_CM_STATE_CLOSED; 484 485 send_reset(cm_node, NULL); 485 486 break; 486 487 default: ··· 1409 1406 case NES_CM_STATE_CLOSED: 1410 1407 drop_packet(skb); 1411 1408 break; 1409 + case NES_CM_STATE_FIN_WAIT1: 1412 1410 case NES_CM_STATE_LAST_ACK: 1413 1411 cm_node->cm_id->rem_ref(cm_node->cm_id); 1414 1412 case NES_CM_STATE_TIME_WAIT: ··· 1417 1413 rem_ref_cm_node(cm_node->cm_core, cm_node); 1418 1414 drop_packet(skb); 1419 1415 break; 1420 - case NES_CM_STATE_FIN_WAIT1: 1421 - nes_debug(NES_DBG_CM, "Bad state %s[%u]\n", __func__, __LINE__); 1422 1416 default: 1423 1417 drop_packet(skb); 1424 1418 break;
+1 -1
drivers/infiniband/hw/nes/nes_verbs.c
··· 654 654 default: 655 655 props->max_qp_rd_atom = 0; 656 656 } 657 - props->max_qp_init_rd_atom = props->max_qp_wr; 657 + props->max_qp_init_rd_atom = props->max_qp_rd_atom; 658 658 props->atomic_cap = IB_ATOMIC_NONE; 659 659 props->max_map_per_fmr = 1; 660 660
+10 -4
drivers/net/mlx4/mr.c
··· 399 399 if (!mtts) 400 400 return -ENOMEM; 401 401 402 + dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, 403 + npages * sizeof (u64), DMA_TO_DEVICE); 404 + 402 405 for (i = 0; i < npages; ++i) 403 406 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); 404 407 405 - dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, 406 - npages * sizeof (u64), DMA_TO_DEVICE); 408 + dma_sync_single_for_device(&dev->pdev->dev, dma_handle, 409 + npages * sizeof (u64), DMA_TO_DEVICE); 407 410 408 411 return 0; 409 412 } ··· 550 547 /* Make sure MPT status is visible before writing MTT entries */ 551 548 wmb(); 552 549 550 + dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle, 551 + npages * sizeof(u64), DMA_TO_DEVICE); 552 + 553 553 for (i = 0; i < npages; ++i) 554 554 fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); 555 555 556 - dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle, 557 - npages * sizeof(u64), DMA_TO_DEVICE); 556 + dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle, 557 + npages * sizeof(u64), DMA_TO_DEVICE); 558 558 559 559 fmr->mpt->key = cpu_to_be32(key); 560 560 fmr->mpt->lkey = cpu_to_be32(key);