Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
"5 patches.

Subsystems affected by this patch series: coda, overlayfs, and
mm (pagecache and memcg)"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
tools/cgroup/slabinfo.py: updated to work on current kernel
mm/filemap: fix mapping_seek_hole_data on THP & 32-bit
mm/filemap: fix find_lock_entries hang on 32-bit THP
ovl: fix reference counting in ovl_mmap error path
coda: fix reference counting in coda_file_mmap error path

+27 -29
+3 -3
fs/coda/file.c
··· 175 175 ret = call_mmap(vma->vm_file, vma); 176 176 177 177 if (ret) { 178 - /* if call_mmap fails, our caller will put coda_file so we 179 - * should drop the reference to the host_file that we got. 178 + /* if call_mmap fails, our caller will put host_file so we 179 + * should drop the reference to the coda_file that we got. 180 180 */ 181 - fput(host_file); 181 + fput(coda_file); 182 182 kfree(cvm_ops); 183 183 } else { 184 184 /* here we add redirects for the open/close vm_operations */
+1 -10
fs/overlayfs/file.c
··· 430 430 if (WARN_ON(file != vma->vm_file)) 431 431 return -EIO; 432 432 433 - vma->vm_file = get_file(realfile); 433 + vma_set_file(vma, realfile); 434 434 435 435 old_cred = ovl_override_creds(file_inode(file)->i_sb); 436 436 ret = call_mmap(vma->vm_file, vma); 437 437 revert_creds(old_cred); 438 - 439 - if (ret) { 440 - /* Drop reference count from new vm_file value */ 441 - fput(realfile); 442 - } else { 443 - /* Drop reference count from previous vm_file value */ 444 - fput(file); 445 - } 446 - 447 438 ovl_file_accessed(file); 448 439 449 440 return ret;
+19 -12
mm/filemap.c
··· 1969 1969 put: 1970 1970 put_page(page); 1971 1971 next: 1972 - if (!xa_is_value(page) && PageTransHuge(page)) 1973 - xas_set(&xas, page->index + thp_nr_pages(page)); 1972 + if (!xa_is_value(page) && PageTransHuge(page)) { 1973 + unsigned int nr_pages = thp_nr_pages(page); 1974 + 1975 + /* Final THP may cross MAX_LFS_FILESIZE on 32-bit */ 1976 + xas_set(&xas, page->index + nr_pages); 1977 + if (xas.xa_index < nr_pages) 1978 + break; 1979 + } 1974 1980 } 1975 1981 rcu_read_unlock(); 1976 1982 ··· 2678 2672 loff_t end, int whence) 2679 2673 { 2680 2674 XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT); 2681 - pgoff_t max = (end - 1) / PAGE_SIZE; 2675 + pgoff_t max = (end - 1) >> PAGE_SHIFT; 2682 2676 bool seek_data = (whence == SEEK_DATA); 2683 2677 struct page *page; 2684 2678 ··· 2687 2681 2688 2682 rcu_read_lock(); 2689 2683 while ((page = find_get_entry(&xas, max, XA_PRESENT))) { 2690 - loff_t pos = xas.xa_index * PAGE_SIZE; 2684 + loff_t pos = (u64)xas.xa_index << PAGE_SHIFT; 2685 + unsigned int seek_size; 2691 2686 2692 2687 if (start < pos) { 2693 2688 if (!seek_data) ··· 2696 2689 start = pos; 2697 2690 } 2698 2691 2699 - pos += seek_page_size(&xas, page); 2692 + seek_size = seek_page_size(&xas, page); 2693 + pos = round_up(pos + 1, seek_size); 2700 2694 start = page_seek_hole_data(&xas, mapping, page, start, pos, 2701 2695 seek_data); 2702 2696 if (start < pos) 2703 2697 goto unlock; 2698 + if (start >= end) 2699 + break; 2700 + if (seek_size > PAGE_SIZE) 2701 + xas_set(&xas, pos >> PAGE_SHIFT); 2704 2702 if (!xa_is_value(page)) 2705 2703 put_page(page); 2706 2704 } 2707 - rcu_read_unlock(); 2708 - 2709 2705 if (seek_data) 2710 - return -ENXIO; 2711 - goto out; 2712 - 2706 + start = -ENXIO; 2713 2707 unlock: 2714 2708 rcu_read_unlock(); 2715 - if (!xa_is_value(page)) 2709 + if (page && !xa_is_value(page)) 2716 2710 put_page(page); 2717 - out: 2718 2711 if (start > end) 2719 2712 return end; 2720 2713 return start;
+4 -4
tools/cgroup/memcg_slabinfo.py
··· 128 128 129 129 cfg['nr_nodes'] = prog['nr_online_nodes'].value_() 130 130 131 - if prog.type('struct kmem_cache').members[1][1] == 'flags': 131 + if prog.type('struct kmem_cache').members[1].name == 'flags': 132 132 cfg['allocator'] = 'SLUB' 133 - elif prog.type('struct kmem_cache').members[1][1] == 'batchcount': 133 + elif prog.type('struct kmem_cache').members[1].name == 'batchcount': 134 134 cfg['allocator'] = 'SLAB' 135 135 else: 136 136 err('Can\'t determine the slab allocator') ··· 193 193 # look over all slab pages, belonging to non-root memcgs 194 194 # and look for objects belonging to the given memory cgroup 195 195 for page in for_each_slab_page(prog): 196 - objcg_vec_raw = page.obj_cgroups.value_() 196 + objcg_vec_raw = page.memcg_data.value_() 197 197 if objcg_vec_raw == 0: 198 198 continue 199 199 cache = page.slab_cache ··· 202 202 addr = cache.value_() 203 203 caches[addr] = cache 204 204 # clear the lowest bit to get the true obj_cgroups 205 - objcg_vec = Object(prog, page.obj_cgroups.type_, 205 + objcg_vec = Object(prog, 'struct obj_cgroup **', 206 206 value=objcg_vec_raw & ~1) 207 207 208 208 if addr not in stats: