Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'akpm' (patches from Andrew)

Merge third patchbomb from Andrew Morton:

- the rest of MM

- scripts/gdb updates

- ipc/ updates

- lib/ updates

- MAINTAINERS updates

- various other misc things

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (67 commits)
genalloc: rename of_get_named_gen_pool() to of_gen_pool_get()
genalloc: rename dev_get_gen_pool() to gen_pool_get()
x86: opt into HAVE_COPY_THREAD_TLS, for both 32-bit and 64-bit
MAINTAINERS: add zpool
MAINTAINERS: BCACHE: Kent Overstreet has changed email address
MAINTAINERS: move Jens Osterkamp to CREDITS
MAINTAINERS: remove unused nbd.h pattern
MAINTAINERS: update brcm gpio filename pattern
MAINTAINERS: update brcm dts pattern
MAINTAINERS: update sound soc intel patterns
MAINTAINERS: remove website for paride
MAINTAINERS: update Emulex ocrdma email addresses
bcache: use kvfree() in various places
libcxgbi: use kvfree() in cxgbi_free_big_mem()
target: use kvfree() in session alloc and free
IB/ehca: use kvfree() in ipz_queue_{cd}tor()
drm/nouveau/gem: use kvfree() in u_free()
drm: use kvfree() in drm_free_large()
cxgb4: use kvfree() in t4_free_mem()
cxgb3: use kvfree() in cxgb_free_mem()
...

+894 -339
+4
CREDITS
··· 2740 2740 S: Valladolid 47009 2741 2741 S: Spain 2742 2742 2743 + N: Jens Osterkamp 2744 + E: jens@de.ibm.com 2745 + D: Maintainer of Spidernet network driver for Cell 2746 + 2743 2747 N: Gadi Oxman 2744 2748 E: gadio@netvision.net.il 2745 2749 D: Original author and maintainer of IDE/ATAPI floppy/tape drivers
+18 -15
MAINTAINERS
··· 2026 2026 F: drivers/net/hamradio/baycom* 2027 2027 2028 2028 BCACHE (BLOCK LAYER CACHE) 2029 - M: Kent Overstreet <kmo@daterainc.com> 2029 + M: Kent Overstreet <kent.overstreet@gmail.com> 2030 2030 L: linux-bcache@vger.kernel.org 2031 2031 W: http://bcache.evilpiepirate.org 2032 - S: Maintained: 2032 + S: Maintained 2033 2033 F: drivers/md/bcache/ 2034 2034 2035 2035 BDISP ST MEDIA DRIVER ··· 2280 2280 F: arch/mips/bmips/* 2281 2281 F: arch/mips/include/asm/mach-bmips/* 2282 2282 F: arch/mips/kernel/*bmips* 2283 - F: arch/mips/boot/dts/bcm*.dts* 2283 + F: arch/mips/boot/dts/brcm/bcm*.dts* 2284 2284 F: drivers/irqchip/irq-bcm7* 2285 2285 F: drivers/irqchip/irq-brcmstb* 2286 2286 ··· 2339 2339 L: bcm-kernel-feedback-list@broadcom.com 2340 2340 S: Supported 2341 2341 F: drivers/gpio/gpio-bcm-kona.c 2342 - F: Documentation/devicetree/bindings/gpio/gpio-bcm-kona.txt 2342 + F: Documentation/devicetree/bindings/gpio/brcm,kona-gpio.txt 2343 2343 2344 2344 BROADCOM NVRAM DRIVER 2345 2345 M: Rafał Miłecki <zajec5@gmail.com> ··· 5285 5285 M: Jie Yang <yang.jie@linux.intel.com> 5286 5286 L: alsa-devel@alsa-project.org (moderated for non-subscribers) 5287 5287 S: Supported 5288 - F: sound/soc/intel/sst-haswell* 5289 - F: sound/soc/intel/sst-dsp* 5290 - F: sound/soc/intel/sst-firmware.c 5291 - F: sound/soc/intel/broadwell.c 5292 - F: sound/soc/intel/haswell.c 5288 + F: sound/soc/intel/common/sst-dsp* 5289 + F: sound/soc/intel/common/sst-firmware.c 5290 + F: sound/soc/intel/boards/broadwell.c 5291 + F: sound/soc/intel/haswell/ 5293 5292 5294 5293 INTEL C600 SERIES SAS CONTROLLER DRIVER 5295 5294 M: Intel SCU Linux support <intel-linux-scu@intel.com> ··· 7018 7019 T: git git://git.pengutronix.de/git/mpa/linux-nbd.git 7019 7020 F: Documentation/blockdev/nbd.txt 7020 7021 F: drivers/block/nbd.c 7021 - F: include/linux/nbd.h 7022 7022 F: include/uapi/linux/nbd.h 7023 7023 7024 7024 NETWORK DROP MONITOR ··· 7645 7647 PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES 7646 7648 M: Tim Waugh <tim@cyberelk.net> 7647 7649 L: linux-parport@lists.infradead.org (subscribers-only) 7648 - W: http://www.torque.net/linux-pp.html 7649 7650 S: Maintained 7650 7651 F: Documentation/blockdev/paride.txt 7651 7652 F: drivers/block/paride/ ··· 9088 9091 F: drivers/net/ethernet/emulex/benet/ 9089 9092 9090 9093 EMULEX ONECONNECT ROCE DRIVER 9091 - M: Selvin Xavier <selvin.xavier@emulex.com> 9092 - M: Devesh Sharma <devesh.sharma@emulex.com> 9093 - M: Mitesh Ahuja <mitesh.ahuja@emulex.com> 9094 + M: Selvin Xavier <selvin.xavier@avagotech.com> 9095 + M: Devesh Sharma <devesh.sharma@avagotech.com> 9096 + M: Mitesh Ahuja <mitesh.ahuja@avagotech.com> 9094 9097 L: linux-rdma@vger.kernel.org 9095 9098 W: http://www.emulex.com 9096 9099 S: Supported ··· 9590 9593 9591 9594 SPIDERNET NETWORK DRIVER for CELL 9592 9595 M: Ishizaki Kou <kou.ishizaki@toshiba.co.jp> 9593 - M: Jens Osterkamp <jens@de.ibm.com> 9594 9596 L: netdev@vger.kernel.org 9595 9597 S: Supported 9596 9598 F: Documentation/networking/spider_net.txt ··· 11349 11353 L: zd1211-devs@lists.sourceforge.net (subscribers-only) 11350 11354 S: Maintained 11351 11355 F: drivers/net/wireless/zd1211rw/ 11356 + 11357 + ZPOOL COMPRESSED PAGE STORAGE API 11358 + M: Dan Streetman <ddstreet@ieee.org> 11359 + L: linux-mm@kvack.org 11360 + S: Maintained 11361 + F: mm/zpool.c 11362 + F: include/linux/zpool.h 11352 11363 11353 11364 ZR36067 VIDEO FOR LINUX DRIVER 11354 11365 L: mjpeg-users@lists.sourceforge.net
+7 -5
arch/arc/include/asm/dma-mapping.h
··· 157 157 } 158 158 159 159 static inline void 160 - dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, 160 + dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems, 161 161 enum dma_data_direction dir) 162 162 { 163 163 int i; 164 + struct scatterlist *sg; 164 165 165 - for (i = 0; i < nelems; i++, sg++) 166 + for_each_sg(sglist, sg, nelems, i) 166 167 _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); 167 168 } 168 169 169 170 static inline void 170 - dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, 171 - enum dma_data_direction dir) 171 + dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, 172 + int nelems, enum dma_data_direction dir) 172 173 { 173 174 int i; 175 + struct scatterlist *sg; 174 176 175 - for (i = 0; i < nelems; i++, sg++) 177 + for_each_sg(sglist, sg, nelems, i) 176 178 _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); 177 179 } 178 180
+1 -1
arch/arm/mach-at91/pm.c
··· 369 369 return; 370 370 } 371 371 372 - sram_pool = dev_get_gen_pool(&pdev->dev); 372 + sram_pool = gen_pool_get(&pdev->dev); 373 373 if (!sram_pool) { 374 374 pr_warn("%s: sram pool unavailable!\n", __func__); 375 375 return;
+1 -1
arch/arm/mach-imx/pm-imx5.c
··· 297 297 goto put_node; 298 298 } 299 299 300 - ocram_pool = dev_get_gen_pool(&pdev->dev); 300 + ocram_pool = gen_pool_get(&pdev->dev); 301 301 if (!ocram_pool) { 302 302 pr_warn("%s: ocram pool unavailable!\n", __func__); 303 303 ret = -ENODEV;
+1 -1
arch/arm/mach-imx/pm-imx6.c
··· 451 451 goto put_node; 452 452 } 453 453 454 - ocram_pool = dev_get_gen_pool(&pdev->dev); 454 + ocram_pool = gen_pool_get(&pdev->dev); 455 455 if (!ocram_pool) { 456 456 pr_warn("%s: ocram pool unavailable!\n", __func__); 457 457 ret = -ENODEV;
+1 -1
arch/arm/mach-socfpga/pm.c
··· 56 56 goto put_node; 57 57 } 58 58 59 - ocram_pool = dev_get_gen_pool(&pdev->dev); 59 + ocram_pool = gen_pool_get(&pdev->dev); 60 60 if (!ocram_pool) { 61 61 pr_warn("%s: ocram pool unavailable!\n", __func__); 62 62 ret = -ENODEV;
+7 -12
arch/ia64/mm/numa.c
··· 58 58 * SPARSEMEM to allocate the SPARSEMEM sectionmap on the NUMA node where 59 59 * the section resides. 60 60 */ 61 - int __meminit __early_pfn_to_nid(unsigned long pfn) 61 + int __meminit __early_pfn_to_nid(unsigned long pfn, 62 + struct mminit_pfnnid_cache *state) 62 63 { 63 64 int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec; 64 - /* 65 - * NOTE: The following SMP-unsafe globals are only used early in boot 66 - * when the kernel is running single-threaded. 67 - */ 68 - static int __meminitdata last_ssec, last_esec; 69 - static int __meminitdata last_nid; 70 65 71 - if (section >= last_ssec && section < last_esec) 72 - return last_nid; 66 + if (section >= state->last_start && section < state->last_end) 67 + return state->last_nid; 73 68 74 69 for (i = 0; i < num_node_memblks; i++) { 75 70 ssec = node_memblk[i].start_paddr >> PA_SECTION_SHIFT; 76 71 esec = (node_memblk[i].start_paddr + node_memblk[i].size + 77 72 ((1L << PA_SECTION_SHIFT) - 1)) >> PA_SECTION_SHIFT; 78 73 if (section >= ssec && section < esec) { 79 - last_ssec = ssec; 80 - last_esec = esec; 81 - last_nid = node_memblk[i].nid; 74 + state->last_start = ssec; 75 + state->last_end = esec; 76 + state->last_nid = node_memblk[i].nid; 82 77 return node_memblk[i].nid; 83 78 } 84 79 }
+2 -2
arch/unicore32/kernel/fpu-ucf64.c
··· 90 90 tmp &= ~(FPSCR_CON); 91 91 exc &= ~(FPSCR_CMPINSTR_BIT | FPSCR_CON); 92 92 } else { 93 - pr_debug(KERN_ERR "UniCore-F64 Error: unhandled exceptions\n"); 94 - pr_debug(KERN_ERR "UniCore-F64 FPSCR 0x%08x INST 0x%08x\n", 93 + pr_debug("UniCore-F64 Error: unhandled exceptions\n"); 94 + pr_debug("UniCore-F64 FPSCR 0x%08x INST 0x%08x\n", 95 95 cff(FPSCR), inst); 96 96 97 97 ucf64_raise_sigfpe(0, regs);
+2
arch/x86/Kconfig
··· 34 34 select ARCH_MIGHT_HAVE_PC_PARPORT 35 35 select ARCH_MIGHT_HAVE_PC_SERIO 36 36 select ARCH_SUPPORTS_ATOMIC_RMW 37 + select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT 37 38 select ARCH_SUPPORTS_INT128 if X86_64 38 39 select ARCH_SUPPORTS_NUMA_BALANCING if X86_64 39 40 select ARCH_USE_BUILTIN_BSWAP ··· 88 87 select HAVE_CMPXCHG_DOUBLE 89 88 select HAVE_CMPXCHG_LOCAL 90 89 select HAVE_CONTEXT_TRACKING if X86_64 90 + select HAVE_COPY_THREAD_TLS 91 91 select HAVE_C_RECORDMCOUNT 92 92 select HAVE_DEBUG_KMEMLEAK 93 93 select HAVE_DEBUG_STACKOVERFLOW
+6 -5
arch/x86/kernel/kexec-bzimage64.c
··· 72 72 unsigned long cmdline_len) 73 73 { 74 74 char *cmdline_ptr = ((char *)params) + cmdline_offset; 75 - unsigned long cmdline_ptr_phys, len; 75 + unsigned long cmdline_ptr_phys, len = 0; 76 76 uint32_t cmdline_low_32, cmdline_ext_32; 77 77 78 - memcpy(cmdline_ptr, cmdline, cmdline_len); 79 78 if (image->type == KEXEC_TYPE_CRASH) { 80 - len = sprintf(cmdline_ptr + cmdline_len - 1, 81 - " elfcorehdr=0x%lx", image->arch.elf_load_addr); 82 - cmdline_len += len; 79 + len = sprintf(cmdline_ptr, 80 + "elfcorehdr=0x%lx ", image->arch.elf_load_addr); 83 81 } 82 + memcpy(cmdline_ptr + len, cmdline, cmdline_len); 83 + cmdline_len += len; 84 + 84 85 cmdline_ptr[cmdline_len - 1] = '\0'; 85 86 86 87 pr_debug("Final command line is: %s\n", cmdline_ptr);
+3 -3
arch/x86/kernel/process_32.c
··· 128 128 release_vm86_irqs(dead_task); 129 129 } 130 130 131 - int copy_thread(unsigned long clone_flags, unsigned long sp, 132 - unsigned long arg, struct task_struct *p) 131 + int copy_thread_tls(unsigned long clone_flags, unsigned long sp, 132 + unsigned long arg, struct task_struct *p, unsigned long tls) 133 133 { 134 134 struct pt_regs *childregs = task_pt_regs(p); 135 135 struct task_struct *tsk; ··· 184 184 */ 185 185 if (clone_flags & CLONE_SETTLS) 186 186 err = do_set_thread_area(p, -1, 187 - (struct user_desc __user *)childregs->si, 0); 187 + (struct user_desc __user *)tls, 0); 188 188 189 189 if (err && p->thread.io_bitmap_ptr) { 190 190 kfree(p->thread.io_bitmap_ptr);
+4 -4
arch/x86/kernel/process_64.c
··· 150 150 return get_desc_base(&t->thread.tls_array[tls]); 151 151 } 152 152 153 - int copy_thread(unsigned long clone_flags, unsigned long sp, 154 - unsigned long arg, struct task_struct *p) 153 + int copy_thread_tls(unsigned long clone_flags, unsigned long sp, 154 + unsigned long arg, struct task_struct *p, unsigned long tls) 155 155 { 156 156 int err; 157 157 struct pt_regs *childregs; ··· 207 207 #ifdef CONFIG_IA32_EMULATION 208 208 if (is_ia32_task()) 209 209 err = do_set_thread_area(p, -1, 210 - (struct user_desc __user *)childregs->si, 0); 210 + (struct user_desc __user *)tls, 0); 211 211 else 212 212 #endif 213 - err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); 213 + err = do_arch_prctl(p, ARCH_SET_FS, tls); 214 214 if (err) 215 215 goto out; 216 216 }
+5 -1
drivers/base/node.c
··· 359 359 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE 360 360 #define page_initialized(page) (page->lru.next) 361 361 362 - static int get_nid_for_pfn(unsigned long pfn) 362 + static int __init_refok get_nid_for_pfn(unsigned long pfn) 363 363 { 364 364 struct page *page; 365 365 366 366 if (!pfn_valid_within(pfn)) 367 367 return -1; 368 + #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 369 + if (system_state == SYSTEM_BOOTING) 370 + return early_pfn_to_nid(pfn); 371 + #endif 368 372 page = pfn_to_page(pfn); 369 373 if (!page_initialized(page)) 370 374 return -1;
+2 -3
drivers/crypto/marvell/cesa.c
··· 321 321 const char *res_name = "sram"; 322 322 struct resource *res; 323 323 324 - engine->pool = of_get_named_gen_pool(cesa->dev->of_node, 325 - "marvell,crypto-srams", 326 - idx); 324 + engine->pool = of_gen_pool_get(cesa->dev->of_node, 325 + "marvell,crypto-srams", idx); 327 326 if (engine->pool) { 328 327 engine->sram = gen_pool_dma_alloc(engine->pool, 329 328 cesa->sram_size,
+1 -1
drivers/dma/mmp_tdma.c
··· 657 657 INIT_LIST_HEAD(&tdev->device.channels); 658 658 659 659 if (pdev->dev.of_node) 660 - pool = of_get_named_gen_pool(pdev->dev.of_node, "asram", 0); 660 + pool = of_gen_pool_get(pdev->dev.of_node, "asram", 0); 661 661 else 662 662 pool = sram_get_gpool("asram"); 663 663 if (!pool) {
+1 -4
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 555 555 static inline void 556 556 u_free(void *addr) 557 557 { 558 - if (!is_vmalloc_addr(addr)) 559 - kfree(addr); 560 - else 561 - vfree(addr); 558 + kvfree(addr); 562 559 } 563 560 564 561 static inline void *
+2 -8
drivers/infiniband/hw/ehca/ipz_pt_fn.c
··· 245 245 ipz_queue_ctor_exit0: 246 246 ehca_gen_err("Couldn't alloc pages queue=%p " 247 247 "nr_of_pages=%x", queue, nr_of_pages); 248 - if (is_vmalloc_addr(queue->queue_pages)) 249 - vfree(queue->queue_pages); 250 - else 251 - kfree(queue->queue_pages); 248 + kvfree(queue->queue_pages); 252 249 253 250 return 0; 254 251 } ··· 267 270 free_page((unsigned long)queue->queue_pages[i]); 268 271 } 269 272 270 - if (is_vmalloc_addr(queue->queue_pages)) 271 - vfree(queue->queue_pages); 272 - else 273 - kfree(queue->queue_pages); 273 + kvfree(queue->queue_pages); 274 274 275 275 return 1; 276 276 }
+1 -1
drivers/md/bcache/journal.c
··· 157 157 158 158 for_each_cache(ca, c, iter) { 159 159 struct journal_device *ja = &ca->journal; 160 - unsigned long bitmap[SB_JOURNAL_BUCKETS / BITS_PER_LONG]; 160 + DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS); 161 161 unsigned i, l, r, m; 162 162 uint64_t seq; 163 163
+2 -8
drivers/md/bcache/super.c
··· 760 760 bio_split_pool_free(&d->bio_split_hook); 761 761 if (d->bio_split) 762 762 bioset_free(d->bio_split); 763 - if (is_vmalloc_addr(d->full_dirty_stripes)) 764 - vfree(d->full_dirty_stripes); 765 - else 766 - kfree(d->full_dirty_stripes); 767 - if (is_vmalloc_addr(d->stripe_sectors_dirty)) 768 - vfree(d->stripe_sectors_dirty); 769 - else 770 - kfree(d->stripe_sectors_dirty); 763 + kvfree(d->full_dirty_stripes); 764 + kvfree(d->stripe_sectors_dirty); 771 765 772 766 closure_debug_destroy(&d->cl); 773 767 }
+2 -8
drivers/md/bcache/util.h
··· 52 52 53 53 #define free_heap(heap) \ 54 54 do { \ 55 - if (is_vmalloc_addr((heap)->data)) \ 56 - vfree((heap)->data); \ 57 - else \ 58 - kfree((heap)->data); \ 55 + kvfree((heap)->data); \ 59 56 (heap)->data = NULL; \ 60 57 } while (0) 61 58 ··· 160 163 161 164 #define free_fifo(fifo) \ 162 165 do { \ 163 - if (is_vmalloc_addr((fifo)->data)) \ 164 - vfree((fifo)->data); \ 165 - else \ 166 - kfree((fifo)->data); \ 166 + kvfree((fifo)->data); \ 167 167 (fifo)->data = NULL; \ 168 168 } while (0) 169 169
+2 -2
drivers/media/platform/coda/coda-common.c
··· 2155 2155 } 2156 2156 2157 2157 /* Get IRAM pool from device tree or platform data */ 2158 - pool = of_get_named_gen_pool(np, "iram", 0); 2158 + pool = of_gen_pool_get(np, "iram", 0); 2159 2159 if (!pool && pdata) 2160 - pool = dev_get_gen_pool(pdata->iram_dev); 2160 + pool = gen_pool_get(pdata->iram_dev); 2161 2161 if (!pool) { 2162 2162 dev_err(&pdev->dev, "iram pool not available\n"); 2163 2163 return -ENOMEM;
+6 -6
drivers/memstick/host/jmb38x_ms.c
··· 419 419 } 420 420 421 421 if (host->cmd_flags & DMA_DATA) { 422 - if (1 != pci_map_sg(host->chip->pdev, &host->req->sg, 1, 422 + if (1 != dma_map_sg(&host->chip->pdev->dev, &host->req->sg, 1, 423 423 host->req->data_dir == READ 424 - ? PCI_DMA_FROMDEVICE 425 - : PCI_DMA_TODEVICE)) { 424 + ? DMA_FROM_DEVICE 425 + : DMA_TO_DEVICE)) { 426 426 host->req->error = -ENOMEM; 427 427 return host->req->error; 428 428 } ··· 487 487 writel(0, host->addr + DMA_CONTROL); 488 488 489 489 if (host->cmd_flags & DMA_DATA) { 490 - pci_unmap_sg(host->chip->pdev, &host->req->sg, 1, 490 + dma_unmap_sg(&host->chip->pdev->dev, &host->req->sg, 1, 491 491 host->req->data_dir == READ 492 - ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); 492 + ? DMA_FROM_DEVICE : DMA_TO_DEVICE); 493 493 } else { 494 494 t_val = readl(host->addr + INT_STATUS_ENABLE); 495 495 if (host->req->data_dir == READ) ··· 925 925 int pci_dev_busy = 0; 926 926 int rc, cnt; 927 927 928 - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 928 + rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 929 929 if (rc) 930 930 return rc; 931 931
+5 -5
drivers/memstick/host/r592.c
··· 754 754 goto error2; 755 755 756 756 pci_set_master(pdev); 757 - error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 757 + error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 758 758 if (error) 759 759 goto error3; 760 760 ··· 787 787 } 788 788 789 789 /* This is just a precation, so don't fail */ 790 - dev->dummy_dma_page = pci_alloc_consistent(pdev, PAGE_SIZE, 791 - &dev->dummy_dma_page_physical_address); 790 + dev->dummy_dma_page = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 791 + &dev->dummy_dma_page_physical_address, GFP_KERNEL); 792 792 r592_stop_dma(dev , 0); 793 793 794 794 if (request_irq(dev->irq, &r592_irq, IRQF_SHARED, ··· 805 805 free_irq(dev->irq, dev); 806 806 error6: 807 807 if (dev->dummy_dma_page) 808 - pci_free_consistent(pdev, PAGE_SIZE, dev->dummy_dma_page, 808 + dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->dummy_dma_page, 809 809 dev->dummy_dma_page_physical_address); 810 810 811 811 kthread_stop(dev->io_thread); ··· 845 845 memstick_free_host(dev->host); 846 846 847 847 if (dev->dummy_dma_page) 848 - pci_free_consistent(pdev, PAGE_SIZE, dev->dummy_dma_page, 848 + dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->dummy_dma_page, 849 849 dev->dummy_dma_page_physical_address); 850 850 } 851 851
+1 -4
drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
··· 1169 1169 */ 1170 1170 void cxgb_free_mem(void *addr) 1171 1171 { 1172 - if (is_vmalloc_addr(addr)) 1173 - vfree(addr); 1174 - else 1175 - kfree(addr); 1172 + kvfree(addr); 1176 1173 } 1177 1174 1178 1175 /*
+1 -4
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
··· 1150 1150 */ 1151 1151 void t4_free_mem(void *addr) 1152 1152 { 1153 - if (is_vmalloc_addr(addr)) 1154 - vfree(addr); 1155 - else 1156 - kfree(addr); 1153 + kvfree(addr); 1157 1154 } 1158 1155 1159 1156 /* Send a Work Request to write the filter at a specified index. We construct
+1 -4
drivers/scsi/cxgbi/libcxgbi.h
··· 685 685 686 686 static inline void cxgbi_free_big_mem(void *addr) 687 687 { 688 - if (is_vmalloc_addr(addr)) 689 - vfree(addr); 690 - else 691 - kfree(addr); 688 + kvfree(addr); 692 689 } 693 690 694 691 static inline void cxgbi_set_iscsi_ipv4(struct cxgbi_hba *chba, __be32 ipaddr)
+4 -8
drivers/scsi/scsi_debug.c
··· 2363 2363 u64 block, rest = 0; 2364 2364 struct scsi_data_buffer *sdb; 2365 2365 enum dma_data_direction dir; 2366 - size_t (*func)(struct scatterlist *, unsigned int, void *, size_t, 2367 - off_t); 2368 2366 2369 2367 if (do_write) { 2370 2368 sdb = scsi_out(scmd); 2371 2369 dir = DMA_TO_DEVICE; 2372 - func = sg_pcopy_to_buffer; 2373 2370 } else { 2374 2371 sdb = scsi_in(scmd); 2375 2372 dir = DMA_FROM_DEVICE; 2376 - func = sg_pcopy_from_buffer; 2377 2373 } 2378 2374 2379 2375 if (!sdb->length) ··· 2381 2385 if (block + num > sdebug_store_sectors) 2382 2386 rest = block + num - sdebug_store_sectors; 2383 2387 2384 - ret = func(sdb->table.sgl, sdb->table.nents, 2388 + ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents, 2385 2389 fake_storep + (block * scsi_debug_sector_size), 2386 - (num - rest) * scsi_debug_sector_size, 0); 2390 + (num - rest) * scsi_debug_sector_size, 0, do_write); 2387 2391 if (ret != (num - rest) * scsi_debug_sector_size) 2388 2392 return ret; 2389 2393 2390 2394 if (rest) { 2391 - ret += func(sdb->table.sgl, sdb->table.nents, 2395 + ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents, 2392 2396 fake_storep, rest * scsi_debug_sector_size, 2393 - (num - rest) * scsi_debug_sector_size); 2397 + (num - rest) * scsi_debug_sector_size, do_write); 2394 2398 } 2395 2399 2396 2400 return ret;
+2 -8
drivers/target/target_core_transport.c
··· 279 279 if (rc < 0) { 280 280 pr_err("Unable to init se_sess->sess_tag_pool," 281 281 " tag_num: %u\n", tag_num); 282 - if (is_vmalloc_addr(se_sess->sess_cmd_map)) 283 - vfree(se_sess->sess_cmd_map); 284 - else 285 - kfree(se_sess->sess_cmd_map); 282 + kvfree(se_sess->sess_cmd_map); 286 283 se_sess->sess_cmd_map = NULL; 287 284 return -ENOMEM; 288 285 } ··· 486 489 { 487 490 if (se_sess->sess_cmd_map) { 488 491 percpu_ida_destroy(&se_sess->sess_tag_pool); 489 - if (is_vmalloc_addr(se_sess->sess_cmd_map)) 490 - vfree(se_sess->sess_cmd_map); 491 - else 492 - kfree(se_sess->sess_cmd_map); 492 + kvfree(se_sess->sess_cmd_map); 493 493 } 494 494 kmem_cache_free(se_sess_cache, se_sess); 495 495 }
+1 -1
fs/adfs/super.c
··· 242 242 static struct inode *adfs_alloc_inode(struct super_block *sb) 243 243 { 244 244 struct adfs_inode_info *ei; 245 - ei = (struct adfs_inode_info *)kmem_cache_alloc(adfs_inode_cachep, GFP_KERNEL); 245 + ei = kmem_cache_alloc(adfs_inode_cachep, GFP_KERNEL); 246 246 if (!ei) 247 247 return NULL; 248 248 return &ei->vfs_inode;
+1 -1
fs/affs/amigaffs.c
··· 140 140 { 141 141 struct inode *dir, *inode = d_inode(dentry); 142 142 struct super_block *sb = inode->i_sb; 143 - struct buffer_head *bh = NULL, *link_bh = NULL; 143 + struct buffer_head *bh, *link_bh = NULL; 144 144 u32 link_ino, ino; 145 145 int retval; 146 146
+1 -1
fs/affs/inode.c
··· 346 346 { 347 347 struct super_block *sb = dir->i_sb; 348 348 struct buffer_head *inode_bh = NULL; 349 - struct buffer_head *bh = NULL; 349 + struct buffer_head *bh; 350 350 u32 block = 0; 351 351 int retval; 352 352
+1 -3
fs/affs/symlink.c
··· 16 16 struct inode *inode = page->mapping->host; 17 17 char *link = kmap(page); 18 18 struct slink_front *lf; 19 - int err; 20 19 int i, j; 21 20 char c; 22 21 char lc; 23 22 24 23 pr_debug("follow_link(ino=%lu)\n", inode->i_ino); 25 24 26 - err = -EIO; 27 25 bh = affs_bread(inode->i_sb, inode->i_ino); 28 26 if (!bh) 29 27 goto fail; ··· 64 66 SetPageError(page); 65 67 kunmap(page); 66 68 unlock_page(page); 67 - return err; 69 + return -EIO; 68 70 } 69 71 70 72 const struct address_space_operations affs_symlink_aops = {
+24 -7
fs/devpts/inode.c
··· 142 142 if (inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) 143 143 return inode->i_sb; 144 144 #endif 145 + if (!devpts_mnt) 146 + return NULL; 145 147 return devpts_mnt->mnt_sb; 146 148 } 147 149 ··· 527 525 int devpts_new_index(struct inode *ptmx_inode) 528 526 { 529 527 struct super_block *sb = pts_sb_from_inode(ptmx_inode); 530 - struct pts_fs_info *fsi = DEVPTS_SB(sb); 528 + struct pts_fs_info *fsi; 531 529 int index; 532 530 int ida_ret; 533 531 532 + if (!sb) 533 + return -ENODEV; 534 + 535 + fsi = DEVPTS_SB(sb); 534 536 retry: 535 537 if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL)) 536 538 return -ENOMEM; ··· 590 584 struct dentry *dentry; 591 585 struct super_block *sb = pts_sb_from_inode(ptmx_inode); 592 586 struct inode *inode; 593 - struct dentry *root = sb->s_root; 594 - struct pts_fs_info *fsi = DEVPTS_SB(sb); 595 - struct pts_mount_opts *opts = &fsi->mount_opts; 587 + struct dentry *root; 588 + struct pts_fs_info *fsi; 589 + struct pts_mount_opts *opts; 596 590 char s[12]; 591 + 592 + if (!sb) 593 + return ERR_PTR(-ENODEV); 594 + 595 + root = sb->s_root; 596 + fsi = DEVPTS_SB(sb); 597 + opts = &fsi->mount_opts; 597 598 598 599 inode = new_inode(sb); 599 600 if (!inode) ··· 689 676 struct ctl_table_header *table; 690 677 691 678 if (!err) { 679 + struct vfsmount *mnt; 680 + 692 681 table = register_sysctl_table(pty_root_table); 693 - devpts_mnt = kern_mount(&devpts_fs_type); 694 - if (IS_ERR(devpts_mnt)) { 695 - err = PTR_ERR(devpts_mnt); 682 + mnt = kern_mount(&devpts_fs_type); 683 + if (IS_ERR(mnt)) { 684 + err = PTR_ERR(mnt); 696 685 unregister_filesystem(&devpts_fs_type); 697 686 unregister_sysctl_table(table); 687 + } else { 688 + devpts_mnt = mnt; 698 689 } 699 690 } 700 691 return err;
-3
fs/mount.h
··· 118 118 } 119 119 120 120 struct proc_mounts { 121 - struct seq_file m; 122 121 struct mnt_namespace *ns; 123 122 struct path root; 124 123 int (*show)(struct seq_file *, struct vfsmount *); ··· 125 126 u64 cached_event; 126 127 loff_t cached_index; 127 128 }; 128 - 129 - #define proc_mounts(p) (container_of((p), struct proc_mounts, m)) 130 129 131 130 extern const struct seq_operations mounts_op; 132 131
+3 -3
fs/namespace.c
··· 1226 1226 /* iterator; we want it to have access to namespace_sem, thus here... */ 1227 1227 static void *m_start(struct seq_file *m, loff_t *pos) 1228 1228 { 1229 - struct proc_mounts *p = proc_mounts(m); 1229 + struct proc_mounts *p = m->private; 1230 1230 1231 1231 down_read(&namespace_sem); 1232 1232 if (p->cached_event == p->ns->event) { ··· 1247 1247 1248 1248 static void *m_next(struct seq_file *m, void *v, loff_t *pos) 1249 1249 { 1250 - struct proc_mounts *p = proc_mounts(m); 1250 + struct proc_mounts *p = m->private; 1251 1251 1252 1252 p->cached_mount = seq_list_next(v, &p->ns->list, pos); 1253 1253 p->cached_index = *pos; ··· 1261 1261 1262 1262 static int m_show(struct seq_file *m, void *v) 1263 1263 { 1264 - struct proc_mounts *p = proc_mounts(m); 1264 + struct proc_mounts *p = m->private; 1265 1265 struct mount *r = list_entry(v, struct mount, mnt_list); 1266 1266 return p->show(m, &r->mnt); 1267 1267 }
+16 -18
fs/proc_namespace.c
··· 17 17 18 18 static unsigned mounts_poll(struct file *file, poll_table *wait) 19 19 { 20 - struct proc_mounts *p = proc_mounts(file->private_data); 20 + struct seq_file *m = file->private_data; 21 + struct proc_mounts *p = m->private; 21 22 struct mnt_namespace *ns = p->ns; 22 23 unsigned res = POLLIN | POLLRDNORM; 23 24 int event; ··· 26 25 poll_wait(file, &p->ns->poll, wait); 27 26 28 27 event = ACCESS_ONCE(ns->event); 29 - if (p->m.poll_event != event) { 30 - p->m.poll_event = event; 28 + if (m->poll_event != event) { 29 + m->poll_event = event; 31 30 res |= POLLERR | POLLPRI; 32 31 } 33 32 ··· 93 92 94 93 static int show_vfsmnt(struct seq_file *m, struct vfsmount *mnt) 95 94 { 96 - struct proc_mounts *p = proc_mounts(m); 95 + struct proc_mounts *p = m->private; 97 96 struct mount *r = real_mount(mnt); 98 97 int err = 0; 99 98 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; ··· 127 126 128 127 static int show_mountinfo(struct seq_file *m, struct vfsmount *mnt) 129 128 { 130 - struct proc_mounts *p = proc_mounts(m); 129 + struct proc_mounts *p = m->private; 131 130 struct mount *r = real_mount(mnt); 132 131 struct super_block *sb = mnt->mnt_sb; 133 132 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; ··· 187 186 188 187 static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt) 189 188 { 190 - struct proc_mounts *p = proc_mounts(m); 189 + struct proc_mounts *p = m->private; 191 190 struct mount *r = real_mount(mnt); 192 191 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; 193 192 struct super_block *sb = mnt_path.dentry->d_sb; ··· 237 236 struct mnt_namespace *ns = NULL; 238 237 struct path root; 239 238 struct proc_mounts *p; 239 + struct seq_file *m; 240 240 int ret = -EINVAL; 241 241 242 242 if (!task) ··· 262 260 task_unlock(task); 263 261 put_task_struct(task); 264 262 265 - ret = -ENOMEM; 266 - p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL); 267 - if (!p) 263 + ret = seq_open_private(file, &mounts_op, sizeof(struct proc_mounts)); 264 + if (ret) 268 265 goto err_put_path; 269 266 270 - file->private_data = &p->m; 271 - ret = seq_open(file, &mounts_op); 272 - if (ret) 273 - goto err_free; 267 + m = file->private_data; 268 + m->poll_event = ns->event; 274 269 270 + p = m->private; 275 271 p->ns = ns; 276 272 p->root = root; 277 - p->m.poll_event = ns->event; 278 273 p->show = show; 279 274 p->cached_event = ~0ULL; 280 275 281 276 return 0; 282 277 283 - err_free: 284 - kfree(p); 285 278 err_put_path: 286 279 path_put(&root); 287 280 err_put_ns: ··· 287 290 288 291 static int mounts_release(struct inode *inode, struct file *file) 289 292 { 290 - struct proc_mounts *p = proc_mounts(file->private_data); 293 + struct seq_file *m = file->private_data; 294 + struct proc_mounts *p = m->private; 291 295 path_put(&p->root); 292 296 put_mnt_ns(p->ns); 293 - return seq_release(inode, file); 297 + return seq_release_private(inode, file); 294 298 } 295 299 296 300 static int mounts_open(struct inode *inode, struct file *file)
+11 -8
fs/seq_file.c
··· 48 48 * ERR_PTR(error). In the end of sequence they return %NULL. ->show() 49 49 * returns 0 in case of success and negative number in case of error. 50 50 * Returning SEQ_SKIP means "discard this element and move on". 51 + * Note: seq_open() will allocate a struct seq_file and store its 52 + * pointer in @file->private_data. This pointer should not be modified. 51 53 */ 52 54 int seq_open(struct file *file, const struct seq_operations *op) 53 55 { 54 - struct seq_file *p = file->private_data; 56 + struct seq_file *p; 55 57 56 - if (!p) { 57 - p = kmalloc(sizeof(*p), GFP_KERNEL); 58 - if (!p) 59 - return -ENOMEM; 60 - file->private_data = p; 61 - } 62 - memset(p, 0, sizeof(*p)); 58 + WARN_ON(file->private_data); 59 + 60 + p = kzalloc(sizeof(*p), GFP_KERNEL); 61 + if (!p) 62 + return -ENOMEM; 63 + 64 + file->private_data = p; 65 + 63 66 mutex_init(&p->lock); 64 67 p->op = op; 65 68 #ifdef CONFIG_USER_NS
+1 -4
include/drm/drm_mem_util.h
··· 56 56 57 57 static __inline void drm_free_large(void *ptr) 58 58 { 59 - if (!is_vmalloc_addr(ptr)) 60 - return kfree(ptr); 61 - 62 - vfree(ptr); 59 + kvfree(ptr); 63 60 } 64 61 65 62 #endif
+3 -3
include/linux/genalloc.h
··· 119 119 120 120 extern struct gen_pool *devm_gen_pool_create(struct device *dev, 121 121 int min_alloc_order, int nid); 122 - extern struct gen_pool *dev_get_gen_pool(struct device *dev); 122 + extern struct gen_pool *gen_pool_get(struct device *dev); 123 123 124 124 bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, 125 125 size_t size); 126 126 127 127 #ifdef CONFIG_OF 128 - extern struct gen_pool *of_get_named_gen_pool(struct device_node *np, 128 + extern struct gen_pool *of_gen_pool_get(struct device_node *np, 129 129 const char *propname, int index); 130 130 #else 131 - static inline struct gen_pool *of_get_named_gen_pool(struct device_node *np, 131 + static inline struct gen_pool *of_gen_pool_get(struct device_node *np, 132 132 const char *propname, int index) 133 133 { 134 134 return NULL;
+8
include/linux/gfp.h
··· 384 384 void drain_all_pages(struct zone *zone); 385 385 void drain_local_pages(struct zone *zone); 386 386 387 + #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 388 + void page_alloc_init_late(void); 389 + #else 390 + static inline void page_alloc_init_late(void) 391 + { 392 + } 393 + #endif 394 + 387 395 /* 388 396 * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what 389 397 * GFP flags are used before interrupts are enabled. Once interrupts are
+3
include/linux/kernel.h
··· 439 439 extern int panic_on_io_nmi; 440 440 extern int panic_on_warn; 441 441 extern int sysctl_panic_on_stackoverflow; 442 + 443 + extern bool crash_kexec_post_notifiers; 444 + 442 445 /* 443 446 * Only to be used by arch init code. If the user over-wrote the default 444 447 * CONFIG_PANIC_TIMEOUT, honor it.
+18
include/linux/memblock.h
··· 101 101 struct memblock_type *type_b, phys_addr_t *out_start, 102 102 phys_addr_t *out_end, int *out_nid); 103 103 104 + void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, 105 + phys_addr_t *out_end); 106 + 104 107 /** 105 108 * for_each_mem_range - iterate through memblock areas from type_a and not 106 109 * included in type_b. Or just type_a if type_b is NULL. ··· 144 141 i != (u64)ULLONG_MAX; \ 145 142 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \ 146 143 p_start, p_end, p_nid)) 144 + 145 + /** 146 + * for_each_reserved_mem_region - iterate over all reserved memblock areas 147 + * @i: u64 used as loop variable 148 + * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 149 + * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 150 + * 151 + * Walks over reserved areas of memblock. Available as soon as memblock 152 + * is initialized. 153 + */ 154 + #define for_each_reserved_mem_region(i, p_start, p_end) \ 155 + for (i = 0UL, \ 156 + __next_reserved_mem_region(&i, p_start, p_end); \ 157 + i != (u64)ULLONG_MAX; \ 158 + __next_reserved_mem_region(&i, p_start, p_end)) 147 159 148 160 #ifdef CONFIG_MOVABLE_NODE 149 161 static inline bool memblock_is_hotpluggable(struct memblock_region *m)
+6 -2
include/linux/mm.h
··· 1635 1635 extern void adjust_managed_page_count(struct page *page, long count); 1636 1636 extern void mem_init_print_info(const char *str); 1637 1637 1638 + extern void reserve_bootmem_region(unsigned long start, unsigned long end); 1639 + 1638 1640 /* Free the reserved page into the buddy system, so it gets managed. */ 1639 1641 static inline void __free_reserved_page(struct page *page) 1640 1642 { ··· 1726 1724 1727 1725 #if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \ 1728 1726 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) 1729 - static inline int __early_pfn_to_nid(unsigned long pfn) 1727 + static inline int __early_pfn_to_nid(unsigned long pfn, 1728 + struct mminit_pfnnid_cache *state) 1730 1729 { 1731 1730 return 0; 1732 1731 } ··· 1735 1732 /* please see mm/page_alloc.c */ 1736 1733 extern int __meminit early_pfn_to_nid(unsigned long pfn); 1737 1734 /* there is a per-arch backend function. */ 1738 - extern int __meminit __early_pfn_to_nid(unsigned long pfn); 1735 + extern int __meminit __early_pfn_to_nid(unsigned long pfn, 1736 + struct mminit_pfnnid_cache *state); 1739 1737 #endif 1740 1738 1741 1739 extern void set_dma_reserve(unsigned long new_dma_reserve);
+18 -5
include/linux/mmzone.h
··· 762 762 /* Number of pages migrated during the rate limiting time interval */ 763 763 unsigned long numabalancing_migrate_nr_pages; 764 764 #endif 765 + 766 + #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 767 + /* 768 + * If memory initialisation on large machines is deferred then this 769 + * is the first PFN that needs to be initialised. 770 + */ 771 + unsigned long first_deferred_pfn; 772 + #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 765 773 } pg_data_t; 766 774 767 775 #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) ··· 1224 1216 #define sparse_index_init(_sec, _nid) do {} while (0) 1225 1217 #endif /* CONFIG_SPARSEMEM */ 1226 1218 1227 - #ifdef CONFIG_NODES_SPAN_OTHER_NODES 1228 - bool early_pfn_in_nid(unsigned long pfn, int nid); 1229 - #else 1230 - #define early_pfn_in_nid(pfn, nid) (1) 1231 - #endif 1219 + /* 1220 + * During memory init memblocks map pfns to nids. The search is expensive and 1221 + * this caches recent lookups. The implementation of __early_pfn_to_nid 1222 + * may treat start/end as pfns or sections. 1223 + */ 1224 + struct mminit_pfnnid_cache { 1225 + unsigned long last_start; 1226 + unsigned long last_end; 1227 + int last_nid; 1228 + }; 1232 1229 1233 1230 #ifndef early_pfn_valid 1234 1231 #define early_pfn_valid(pfn) (1)
+5 -2
include/linux/scatterlist.h
··· 265 265 unsigned long offset, unsigned long size, 266 266 gfp_t gfp_mask); 267 267 268 + size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, 269 + size_t buflen, off_t skip, bool to_buffer); 270 + 268 271 size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, 269 - void *buf, size_t buflen); 272 + const void *buf, size_t buflen); 270 273 size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, 271 274 void *buf, size_t buflen); 272 275 273 276 size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, 274 - void *buf, size_t buflen, off_t skip); 277 + const void *buf, size_t buflen, off_t skip); 275 278 size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, 276 279 void *buf, size_t buflen, off_t skip); 277 280
+2
init/main.c
··· 1004 1004 smp_init(); 1005 1005 sched_init_smp(); 1006 1006 1007 + page_alloc_init_late(); 1008 + 1007 1009 do_basic_setup(); 1008 1010 1009 1011 /* Open the /dev/console on the rootfs, this should never fail */
+39 -11
ipc/msg.c
··· 76 76 77 77 static inline struct msg_queue *msq_obtain_object(struct ipc_namespace *ns, int id) 78 78 { 79 - struct kern_ipc_perm *ipcp = ipc_obtain_object(&msg_ids(ns), id); 79 + struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&msg_ids(ns), id); 80 80 81 81 if (IS_ERR(ipcp)) 82 82 return ERR_CAST(ipcp); ··· 196 196 * or dealing with -EAGAIN cases. See lockless receive part 1 197 197 * and 2 in do_msgrcv(). 198 198 */ 199 - smp_mb(); 199 + smp_wmb(); /* barrier (B) */ 200 200 msr->r_msg = ERR_PTR(res); 201 201 } 202 202 } ··· 580 580 /* initialize pipelined send ordering */ 581 581 msr->r_msg = NULL; 582 582 wake_up_process(msr->r_tsk); 583 - smp_mb(); /* see barrier comment below */ 583 + /* barrier (B) see barrier comment below */ 584 + smp_wmb(); 584 585 msr->r_msg = ERR_PTR(-E2BIG); 585 586 } else { 586 587 msr->r_msg = NULL; ··· 590 589 wake_up_process(msr->r_tsk); 591 590 /* 592 591 * Ensure that the wakeup is visible before 593 - * setting r_msg, as the receiving end depends 594 - * on it. See lockless receive part 1 and 2 in 595 - * do_msgrcv(). 592 + * setting r_msg, as the receiving can otherwise 593 + * exit - once r_msg is set, the receiver can 594 + * continue. See lockless receive part 1 and 2 595 + * in do_msgrcv(). Barrier (B). 596 596 */ 597 - smp_mb(); 597 + smp_wmb(); 598 598 msr->r_msg = msg; 599 599 600 600 return 1; ··· 934 932 /* Lockless receive, part 2: 935 933 * Wait until pipelined_send or expunge_all are outside of 936 934 * wake_up_process(). There is a race with exit(), see 937 - * ipc/mqueue.c for the details. 935 + * ipc/mqueue.c for the details. The correct serialization 936 + * ensures that a receiver cannot continue without the wakeup 937 + * being visibible _before_ setting r_msg: 938 + * 939 + * CPU 0 CPU 1 940 + * <loop receiver> 941 + * smp_rmb(); (A) <-- pair -. <waker thread> 942 + * <load ->r_msg> | msr->r_msg = NULL; 943 + * | wake_up_process(); 944 + * <continue> `------> smp_wmb(); (B) 945 + * msr->r_msg = msg; 946 + * 947 + * Where (A) orders the message value read and where (B) orders 948 + * the write to the r_msg -- done in both pipelined_send and 949 + * expunge_all. 938 950 */ 939 - msg = (struct msg_msg *)msr_d.r_msg; 940 - while (msg == NULL) { 941 - cpu_relax(); 951 + for (;;) { 952 + /* 953 + * Pairs with writer barrier in pipelined_send 954 + * or expunge_all. 955 + */ 956 + smp_rmb(); /* barrier (A) */ 942 957 msg = (struct msg_msg *)msr_d.r_msg; 958 + if (msg) 959 + break; 960 + 961 + /* 962 + * The cpu_relax() call is a compiler barrier 963 + * which forces everything in this loop to be 964 + * re-loaded. 965 + */ 966 + cpu_relax(); 943 967 } 944 968 945 969 /* Lockless receive, part 3:
+2 -2
ipc/sem.c
··· 391 391 struct kern_ipc_perm *ipcp; 392 392 struct sem_array *sma; 393 393 394 - ipcp = ipc_obtain_object(&sem_ids(ns), id); 394 + ipcp = ipc_obtain_object_idr(&sem_ids(ns), id); 395 395 if (IS_ERR(ipcp)) 396 396 return ERR_CAST(ipcp); 397 397 ··· 410 410 411 411 static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id) 412 412 { 413 - struct kern_ipc_perm *ipcp = ipc_obtain_object(&sem_ids(ns), id); 413 + struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id); 414 414 415 415 if (IS_ERR(ipcp)) 416 416 return ERR_CAST(ipcp);
+6 -6
ipc/shm.c
··· 129 129 130 130 static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id) 131 131 { 132 - struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id); 132 + struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id); 133 133 134 134 if (IS_ERR(ipcp)) 135 135 return ERR_CAST(ipcp); ··· 155 155 { 156 156 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id); 157 157 158 - if (IS_ERR(ipcp)) 159 - return (struct shmid_kernel *)ipcp; 158 + /* 159 + * We raced in the idr lookup or with shm_destroy(). Either way, the 160 + * ID is busted. 161 + */ 162 + BUG_ON(IS_ERR(ipcp)); 160 163 161 164 return container_of(ipcp, struct shmid_kernel, shm_perm); 162 165 } ··· 194 191 struct shmid_kernel *shp; 195 192 196 193 shp = shm_lock(sfd->ns, sfd->id); 197 - BUG_ON(IS_ERR(shp)); 198 194 shp->shm_atim = get_seconds(); 199 195 shp->shm_lprid = task_tgid_vnr(current); 200 196 shp->shm_nattch++; ··· 260 258 down_write(&shm_ids(ns).rwsem); 261 259 /* remove from the list of attaches of the shm segment */ 262 260 shp = shm_lock(ns, sfd->id); 263 - BUG_ON(IS_ERR(shp)); 264 261 shp->shm_lprid = task_tgid_vnr(current); 265 262 shp->shm_dtim = get_seconds(); 266 263 shp->shm_nattch--; ··· 1192 1191 out_nattch: 1193 1192 down_write(&shm_ids(ns).rwsem); 1194 1193 shp = shm_lock(ns, shmid); 1195 - BUG_ON(IS_ERR(shp)); 1196 1194 shp->shm_nattch--; 1197 1195 if (shm_may_destroy(ns, shp)) 1198 1196 shm_destroy(ns, shp);
+14 -14
ipc/util.c
··· 467 467 { 468 468 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); 469 469 470 - if (is_vmalloc_addr(p)) 471 - vfree(p); 472 - else 473 - kfree(p); 470 + kvfree(p); 474 471 } 475 472 476 473 /** ··· 555 558 * Call inside the RCU critical section. 556 559 * The ipc object is *not* locked on exit. 557 560 */ 558 - struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id) 561 + struct kern_ipc_perm *ipc_obtain_object_idr(struct ipc_ids *ids, int id) 559 562 { 560 563 struct kern_ipc_perm *out; 561 564 int lid = ipcid_to_idx(id); ··· 581 584 struct kern_ipc_perm *out; 582 585 583 586 rcu_read_lock(); 584 - out = ipc_obtain_object(ids, id); 587 + out = ipc_obtain_object_idr(ids, id); 585 588 if (IS_ERR(out)) 586 - goto err1; 589 + goto err; 587 590 588 591 spin_lock(&out->lock); 589 592 590 - /* ipc_rmid() may have already freed the ID while ipc_lock 591 - * was spinning: here verify that the structure is still valid 593 + /* 594 + * ipc_rmid() may have already freed the ID while ipc_lock() 595 + * was spinning: here verify that the structure is still valid. 596 + * Upon races with RMID, return -EIDRM, thus indicating that 597 + * the ID points to a removed identifier. 592 598 */ 593 599 if (ipc_valid_object(out)) 594 600 return out; 595 601 596 602 spin_unlock(&out->lock); 597 - out = ERR_PTR(-EINVAL); 598 - err1: 603 + out = ERR_PTR(-EIDRM); 604 + err: 599 605 rcu_read_unlock(); 600 606 return out; 601 607 } ··· 608 608 * @ids: ipc identifier set 609 609 * @id: ipc id to look for 610 610 * 611 - * Similar to ipc_obtain_object() but also checks 611 + * Similar to ipc_obtain_object_idr() but also checks 612 612 * the ipc object reference counter. 613 613 * 614 614 * Call inside the RCU critical section. ··· 616 616 */ 617 617 struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id) 618 618 { 619 - struct kern_ipc_perm *out = ipc_obtain_object(ids, id); 619 + struct kern_ipc_perm *out = ipc_obtain_object_idr(ids, id); 620 620 621 621 if (IS_ERR(out)) 622 622 goto out; 623 623 624 624 if (ipc_checkid(out, id)) 625 - return ERR_PTR(-EIDRM); 625 + return ERR_PTR(-EINVAL); 626 626 out: 627 627 return out; 628 628 }
+1 -1
ipc/util.h
··· 132 132 void ipc_rcu_free(struct rcu_head *head); 133 133 134 134 struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int); 135 - struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id); 135 + struct kern_ipc_perm *ipc_obtain_object_idr(struct ipc_ids *ids, int id); 136 136 137 137 void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out); 138 138 void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out);
+6
kernel/gcov/base.c
··· 92 92 } 93 93 EXPORT_SYMBOL(__gcov_merge_time_profile); 94 94 95 + void __gcov_merge_icall_topn(gcov_type *counters, unsigned int n_counters) 96 + { 97 + /* Unused. */ 98 + } 99 + EXPORT_SYMBOL(__gcov_merge_icall_topn); 100 + 95 101 /** 96 102 * gcov_enable_events - enable event reporting through gcov_event() 97 103 *
+3 -1
kernel/gcov/gcc_4_7.c
··· 18 18 #include <linux/vmalloc.h> 19 19 #include "gcov.h" 20 20 21 - #if __GNUC__ == 4 && __GNUC_MINOR__ >= 9 21 + #if __GNUC__ == 5 && __GNUC_MINOR__ >= 1 22 + #define GCOV_COUNTERS 10 23 + #elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9 22 24 #define GCOV_COUNTERS 9 23 25 #else 24 26 #define GCOV_COUNTERS 8
+11
kernel/kexec.c
··· 84 84 85 85 int kexec_should_crash(struct task_struct *p) 86 86 { 87 + /* 88 + * If crash_kexec_post_notifiers is enabled, don't run 89 + * crash_kexec() here yet, which must be run after panic 90 + * notifiers in panic(). 91 + */ 92 + if (crash_kexec_post_notifiers) 93 + return 0; 94 + /* 95 + * There are 4 panic() calls in do_exit() path, each of which 96 + * corresponds to each of these 4 conditions. 97 + */ 87 98 if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops) 88 99 return 1; 89 100 return 0;
+3 -2
kernel/panic.c
··· 32 32 static int pause_on_oops; 33 33 static int pause_on_oops_flag; 34 34 static DEFINE_SPINLOCK(pause_on_oops_lock); 35 - static bool crash_kexec_post_notifiers; 35 + bool crash_kexec_post_notifiers; 36 36 int panic_on_warn __read_mostly; 37 37 38 38 int panic_timeout = CONFIG_PANIC_TIMEOUT; ··· 142 142 * Note: since some panic_notifiers can make crashed kernel 143 143 * more unstable, it can increase risks of the kdump failure too. 144 144 */ 145 - crash_kexec(NULL); 145 + if (crash_kexec_post_notifiers) 146 + crash_kexec(NULL); 146 147 147 148 bust_spinlocks(0); 148 149
+4 -4
kernel/printk/printk.c
··· 207 207 * need to be changed in the future, when the requirements change. 208 208 * 209 209 * /dev/kmsg exports the structured data in the following line format: 210 - * "<level>,<sequnum>,<timestamp>,<contflag>;<message text>\n" 210 + * "<level>,<sequnum>,<timestamp>,<contflag>[,additional_values, ... ];<message text>\n" 211 + * 212 + * Users of the export format should ignore possible additional values 213 + * separated by ',', and find the message after the ';' character. 211 214 * 212 215 * The optional key/value pairs are attached as continuation lines starting 213 216 * with a space character and terminated by a newline. All possible 214 217 * non-prinatable characters are escaped in the "\xff" notation. 215 - * 216 - * Users of the export format should ignore possible additional values 217 - * separated by ',', and find the message after the ';' character. 218 218 */ 219 219 220 220 enum log_flags {
+1 -4
kernel/relay.c
··· 81 81 */ 82 82 static void relay_free_page_array(struct page **array) 83 83 { 84 - if (is_vmalloc_addr(array)) 85 - vfree(array); 86 - else 87 - kfree(array); 84 + kvfree(array); 88 85 } 89 86 90 87 /**
+7 -7
lib/genalloc.c
··· 602 602 EXPORT_SYMBOL(devm_gen_pool_create); 603 603 604 604 /** 605 - * dev_get_gen_pool - Obtain the gen_pool (if any) for a device 605 + * gen_pool_get - Obtain the gen_pool (if any) for a device 606 606 * @dev: device to retrieve the gen_pool from 607 607 * 608 608 * Returns the gen_pool for the device if one is present, or NULL. 609 609 */ 610 - struct gen_pool *dev_get_gen_pool(struct device *dev) 610 + struct gen_pool *gen_pool_get(struct device *dev) 611 611 { 612 612 struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL, 613 613 NULL); ··· 616 616 return NULL; 617 617 return *p; 618 618 } 619 - EXPORT_SYMBOL_GPL(dev_get_gen_pool); 619 + EXPORT_SYMBOL_GPL(gen_pool_get); 620 620 621 621 #ifdef CONFIG_OF 622 622 /** 623 - * of_get_named_gen_pool - find a pool by phandle property 623 + * of_gen_pool_get - find a pool by phandle property 624 624 * @np: device node 625 625 * @propname: property name containing phandle(s) 626 626 * @index: index into the phandle array ··· 629 629 * address of the device tree node pointed at by the phandle property, 630 630 * or NULL if not found. 631 631 */ 632 - struct gen_pool *of_get_named_gen_pool(struct device_node *np, 632 + struct gen_pool *of_gen_pool_get(struct device_node *np, 633 633 const char *propname, int index) 634 634 { 635 635 struct platform_device *pdev; ··· 642 642 of_node_put(np_pool); 643 643 if (!pdev) 644 644 return NULL; 645 - return dev_get_gen_pool(&pdev->dev); 645 + return gen_pool_get(&pdev->dev); 646 646 } 647 - EXPORT_SYMBOL_GPL(of_get_named_gen_pool); 647 + EXPORT_SYMBOL_GPL(of_gen_pool_get); 648 648 #endif /* CONFIG_OF */
+9 -9
lib/scatterlist.c
··· 650 650 * Returns the number of copied bytes. 651 651 * 652 652 **/ 653 - static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, 654 - void *buf, size_t buflen, off_t skip, 655 - bool to_buffer) 653 + size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, 654 + size_t buflen, off_t skip, bool to_buffer) 656 655 { 657 656 unsigned int offset = 0; 658 657 struct sg_mapping_iter miter; ··· 688 689 local_irq_restore(flags); 689 690 return offset; 690 691 } 692 + EXPORT_SYMBOL(sg_copy_buffer); 691 693 692 694 /** 693 695 * sg_copy_from_buffer - Copy from a linear buffer to an SG list ··· 701 701 * 702 702 **/ 703 703 size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, 704 - void *buf, size_t buflen) 704 + const void *buf, size_t buflen) 705 705 { 706 - return sg_copy_buffer(sgl, nents, buf, buflen, 0, false); 706 + return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false); 707 707 } 708 708 EXPORT_SYMBOL(sg_copy_from_buffer); 709 709 ··· 729 729 * @sgl: The SG list 730 730 * @nents: Number of SG entries 731 731 * @buf: Where to copy from 732 - * @skip: Number of bytes to skip before copying 733 732 * @buflen: The number of bytes to copy 733 + * @skip: Number of bytes to skip before copying 734 734 * 735 735 * Returns the number of copied bytes. 736 736 * 737 737 **/ 738 738 size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, 739 - void *buf, size_t buflen, off_t skip) 739 + const void *buf, size_t buflen, off_t skip) 740 740 { 741 - return sg_copy_buffer(sgl, nents, buf, buflen, skip, false); 741 + return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false); 742 742 } 743 743 EXPORT_SYMBOL(sg_pcopy_from_buffer); 744 744 ··· 747 747 * @sgl: The SG list 748 748 * @nents: Number of SG entries 749 749 * @buf: Where to copy to 750 - * @skip: Number of bytes to skip before copying 751 750 * @buflen: The number of bytes to copy 751 + * @skip: Number of bytes to skip before copying 752 752 * 753 753 * Returns the number of copied bytes. 754 754 *
+18
mm/Kconfig
··· 636 636 changed to a smaller value in which case that is used. 637 637 638 638 A sane initial value is 80 MB. 639 + 640 + # For architectures that support deferred memory initialisation 641 + config ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT 642 + bool 643 + 644 + config DEFERRED_STRUCT_PAGE_INIT 645 + bool "Defer initialisation of struct pages to kswapd" 646 + default n 647 + depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT 648 + depends on MEMORY_HOTPLUG 649 + help 650 + Ordinarily all struct pages are initialised during early boot in a 651 + single thread. On very large machines this can take a considerable 652 + amount of time. If this option is set, large machines will bring up 653 + a subset of memmap at boot and then initialise the rest in parallel 654 + when kswapd starts. This has a potential performance impact on 655 + processes running early in the lifetime of the systemm until kswapd 656 + finishes the initialisation.
+7 -6
mm/bootmem.c
··· 164 164 end = PFN_DOWN(physaddr + size); 165 165 166 166 for (; cursor < end; cursor++) { 167 - __free_pages_bootmem(pfn_to_page(cursor), 0); 167 + __free_pages_bootmem(pfn_to_page(cursor), cursor, 0); 168 168 totalram_pages++; 169 169 } 170 170 } ··· 172 172 static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) 173 173 { 174 174 struct page *page; 175 - unsigned long *map, start, end, pages, count = 0; 175 + unsigned long *map, start, end, pages, cur, count = 0; 176 176 177 177 if (!bdata->node_bootmem_map) 178 178 return 0; ··· 210 210 if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) { 211 211 int order = ilog2(BITS_PER_LONG); 212 212 213 - __free_pages_bootmem(pfn_to_page(start), order); 213 + __free_pages_bootmem(pfn_to_page(start), start, order); 214 214 count += BITS_PER_LONG; 215 215 start += BITS_PER_LONG; 216 216 } else { 217 - unsigned long cur = start; 217 + cur = start; 218 218 219 219 start = ALIGN(start + 1, BITS_PER_LONG); 220 220 while (vec && cur != start) { 221 221 if (vec & 1) { 222 222 page = pfn_to_page(cur); 223 - __free_pages_bootmem(page, 0); 223 + __free_pages_bootmem(page, cur, 0); 224 224 count++; 225 225 } 226 226 vec >>= 1; ··· 229 229 } 230 230 } 231 231 232 + cur = bdata->node_min_pfn; 232 233 page = virt_to_page(bdata->node_bootmem_map); 233 234 pages = bdata->node_low_pfn - bdata->node_min_pfn; 234 235 pages = bootmem_bootmap_pages(pages); 235 236 count += pages; 236 237 while (pages--) 237 - __free_pages_bootmem(page++, 0); 238 + __free_pages_bootmem(page++, cur++, 0); 238 239 239 240 bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count); 240 241
+2 -9
mm/internal.h
··· 155 155 } 156 156 157 157 extern int __isolate_free_page(struct page *page, unsigned int order); 158 - extern void __free_pages_bootmem(struct page *page, unsigned int order); 158 + extern void __free_pages_bootmem(struct page *page, unsigned long pfn, 159 + unsigned int order); 159 160 extern void prep_compound_page(struct page *page, unsigned long order); 160 161 #ifdef CONFIG_MEMORY_FAILURE 161 162 extern bool is_free_buddy_page(struct page *page); ··· 362 361 } while (0) 363 362 364 363 extern void mminit_verify_pageflags_layout(void); 365 - extern void mminit_verify_page_links(struct page *page, 366 - enum zone_type zone, unsigned long nid, unsigned long pfn); 367 364 extern void mminit_verify_zonelist(void); 368 - 369 365 #else 370 366 371 367 static inline void mminit_dprintk(enum mminit_level level, ··· 371 373 } 372 374 373 375 static inline void mminit_verify_pageflags_layout(void) 374 - { 375 - } 376 - 377 - static inline void mminit_verify_page_links(struct page *page, 378 - enum zone_type zone, unsigned long nid, unsigned long pfn) 379 376 { 380 377 } 381 378
+33 -1
mm/memblock.c
··· 820 820 821 821 822 822 /** 823 + * __next_reserved_mem_region - next function for for_each_reserved_region() 824 + * @idx: pointer to u64 loop variable 825 + * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL 826 + * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL 827 + * 828 + * Iterate over all reserved memory regions. 829 + */ 830 + void __init_memblock __next_reserved_mem_region(u64 *idx, 831 + phys_addr_t *out_start, 832 + phys_addr_t *out_end) 833 + { 834 + struct memblock_type *rsv = &memblock.reserved; 835 + 836 + if (*idx >= 0 && *idx < rsv->cnt) { 837 + struct memblock_region *r = &rsv->regions[*idx]; 838 + phys_addr_t base = r->base; 839 + phys_addr_t size = r->size; 840 + 841 + if (out_start) 842 + *out_start = base; 843 + if (out_end) 844 + *out_end = base + size - 1; 845 + 846 + *idx += 1; 847 + return; 848 + } 849 + 850 + /* signal end of iteration */ 851 + *idx = ULLONG_MAX; 852 + } 853 + 854 + /** 823 855 * __next__mem_range - next function for for_each_free_mem_range() etc. 824 856 * @idx: pointer to u64 loop variable 825 857 * @nid: node selector, %NUMA_NO_NODE for all nodes ··· 1419 1387 end = PFN_DOWN(base + size); 1420 1388 1421 1389 for (; cursor < end; cursor++) { 1422 - __free_pages_bootmem(pfn_to_page(cursor), 0); 1390 + __free_pages_bootmem(pfn_to_page(cursor), cursor, 0); 1423 1391 totalram_pages++; 1424 1392 } 1425 1393 }
+1 -8
mm/mm_init.c
··· 11 11 #include <linux/export.h> 12 12 #include <linux/memory.h> 13 13 #include <linux/notifier.h> 14 + #include <linux/sched.h> 14 15 #include "internal.h" 15 16 16 17 #ifdef CONFIG_DEBUG_MEMORY_INIT ··· 129 128 (NODES_MASK << NODES_PGSHIFT) + 130 129 (SECTIONS_MASK << SECTIONS_PGSHIFT); 131 130 BUG_ON(or_mask != add_mask); 132 - } 133 - 134 - void __meminit mminit_verify_page_links(struct page *page, enum zone_type zone, 135 - unsigned long nid, unsigned long pfn) 136 - { 137 - BUG_ON(page_to_nid(page) != nid); 138 - BUG_ON(page_zonenum(page) != zone); 139 - BUG_ON(page_to_pfn(page) != pfn); 140 131 } 141 132 142 133 static __init int set_mminit_loglevel(char *str)
+5 -2
mm/nobootmem.c
··· 86 86 end = PFN_DOWN(addr + size); 87 87 88 88 for (; cursor < end; cursor++) { 89 - __free_pages_bootmem(pfn_to_page(cursor), 0); 89 + __free_pages_bootmem(pfn_to_page(cursor), cursor, 0); 90 90 totalram_pages++; 91 91 } 92 92 } ··· 101 101 while (start + (1UL << order) > end) 102 102 order--; 103 103 104 - __free_pages_bootmem(pfn_to_page(start), order); 104 + __free_pages_bootmem(pfn_to_page(start), start, order); 105 105 106 106 start += (1UL << order); 107 107 } ··· 129 129 u64 i; 130 130 131 131 memblock_clear_hotplug(0, -1); 132 + 133 + for_each_reserved_mem_region(i, &start, &end) 134 + reserve_bootmem_region(start, end); 132 135 133 136 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, 134 137 NULL)
+387 -55
mm/page_alloc.c
··· 18 18 #include <linux/mm.h> 19 19 #include <linux/swap.h> 20 20 #include <linux/interrupt.h> 21 + #include <linux/rwsem.h> 21 22 #include <linux/pagemap.h> 22 23 #include <linux/jiffies.h> 23 24 #include <linux/bootmem.h> ··· 62 61 #include <linux/hugetlb.h> 63 62 #include <linux/sched/rt.h> 64 63 #include <linux/page_owner.h> 64 + #include <linux/kthread.h> 65 65 66 66 #include <asm/sections.h> 67 67 #include <asm/tlbflush.h> ··· 236 234 #endif 237 235 238 236 int page_group_by_mobility_disabled __read_mostly; 237 + 238 + #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 239 + static inline void reset_deferred_meminit(pg_data_t *pgdat) 240 + { 241 + pgdat->first_deferred_pfn = ULONG_MAX; 242 + } 243 + 244 + /* Returns true if the struct page for the pfn is uninitialised */ 245 + static inline bool __meminit early_page_uninitialised(unsigned long pfn) 246 + { 247 + int nid = early_pfn_to_nid(pfn); 248 + 249 + if (pfn >= NODE_DATA(nid)->first_deferred_pfn) 250 + return true; 251 + 252 + return false; 253 + } 254 + 255 + static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid) 256 + { 257 + if (pfn >= NODE_DATA(nid)->first_deferred_pfn) 258 + return true; 259 + 260 + return false; 261 + } 262 + 263 + /* 264 + * Returns false when the remaining initialisation should be deferred until 265 + * later in the boot cycle when it can be parallelised. 266 + */ 267 + static inline bool update_defer_init(pg_data_t *pgdat, 268 + unsigned long pfn, unsigned long zone_end, 269 + unsigned long *nr_initialised) 270 + { 271 + /* Always populate low zones for address-contrained allocations */ 272 + if (zone_end < pgdat_end_pfn(pgdat)) 273 + return true; 274 + 275 + /* Initialise at least 2G of the highest zone */ 276 + (*nr_initialised)++; 277 + if (*nr_initialised > (2UL << (30 - PAGE_SHIFT)) && 278 + (pfn & (PAGES_PER_SECTION - 1)) == 0) { 279 + pgdat->first_deferred_pfn = pfn; 280 + return false; 281 + } 282 + 283 + return true; 284 + } 285 + #else 286 + static inline void reset_deferred_meminit(pg_data_t *pgdat) 287 + { 288 + } 289 + 290 + static inline bool early_page_uninitialised(unsigned long pfn) 291 + { 292 + return false; 293 + } 294 + 295 + static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid) 296 + { 297 + return false; 298 + } 299 + 300 + static inline bool update_defer_init(pg_data_t *pgdat, 301 + unsigned long pfn, unsigned long zone_end, 302 + unsigned long *nr_initialised) 303 + { 304 + return true; 305 + } 306 + #endif 307 + 239 308 240 309 void set_pageblock_migratetype(struct page *page, int migratetype) 241 310 { ··· 837 764 return 0; 838 765 } 839 766 767 + static void __meminit __init_single_page(struct page *page, unsigned long pfn, 768 + unsigned long zone, int nid) 769 + { 770 + set_page_links(page, zone, nid, pfn); 771 + init_page_count(page); 772 + page_mapcount_reset(page); 773 + page_cpupid_reset_last(page); 774 + 775 + INIT_LIST_HEAD(&page->lru); 776 + #ifdef WANT_PAGE_VIRTUAL 777 + /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 778 + if (!is_highmem_idx(zone)) 779 + set_page_address(page, __va(pfn << PAGE_SHIFT)); 780 + #endif 781 + } 782 + 783 + static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone, 784 + int nid) 785 + { 786 + return __init_single_page(pfn_to_page(pfn), pfn, zone, nid); 787 + } 788 + 789 + #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 790 + static void init_reserved_page(unsigned long pfn) 791 + { 792 + pg_data_t *pgdat; 793 + int nid, zid; 794 + 795 + if (!early_page_uninitialised(pfn)) 796 + return; 797 + 798 + nid = early_pfn_to_nid(pfn); 799 + pgdat = NODE_DATA(nid); 800 + 801 + for (zid = 0; zid < MAX_NR_ZONES; zid++) { 802 + struct zone *zone = &pgdat->node_zones[zid]; 803 + 804 + if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) 805 + break; 806 + } 807 + __init_single_pfn(pfn, zid, nid); 808 + } 809 + #else 810 + static inline void init_reserved_page(unsigned long pfn) 811 + { 812 + } 813 + #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 814 + 815 + /* 816 + * Initialised pages do not have PageReserved set. This function is 817 + * called for each range allocated by the bootmem allocator and 818 + * marks the pages PageReserved. The remaining valid pages are later 819 + * sent to the buddy page allocator. 820 + */ 821 + void __meminit reserve_bootmem_region(unsigned long start, unsigned long end) 822 + { 823 + unsigned long start_pfn = PFN_DOWN(start); 824 + unsigned long end_pfn = PFN_UP(end); 825 + 826 + for (; start_pfn < end_pfn; start_pfn++) { 827 + if (pfn_valid(start_pfn)) { 828 + struct page *page = pfn_to_page(start_pfn); 829 + 830 + init_reserved_page(start_pfn); 831 + SetPageReserved(page); 832 + } 833 + } 834 + } 835 + 840 836 static bool free_pages_prepare(struct page *page, unsigned int order) 841 837 { 842 838 bool compound = PageCompound(page); ··· 960 818 local_irq_restore(flags); 961 819 } 962 820 963 - void __init __free_pages_bootmem(struct page *page, unsigned int order) 821 + static void __init __free_pages_boot_core(struct page *page, 822 + unsigned long pfn, unsigned int order) 964 823 { 965 824 unsigned int nr_pages = 1 << order; 966 825 struct page *p = page; ··· 980 837 set_page_refcounted(page); 981 838 __free_pages(page, order); 982 839 } 840 + 841 + #if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \ 842 + defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) 843 + /* Only safe to use early in boot when initialisation is single-threaded */ 844 + static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; 845 + 846 + int __meminit early_pfn_to_nid(unsigned long pfn) 847 + { 848 + int nid; 849 + 850 + /* The system will behave unpredictably otherwise */ 851 + BUG_ON(system_state != SYSTEM_BOOTING); 852 + 853 + nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); 854 + if (nid >= 0) 855 + return nid; 856 + /* just returns 0 */ 857 + return 0; 858 + } 859 + #endif 860 + 861 + #ifdef CONFIG_NODES_SPAN_OTHER_NODES 862 + static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node, 863 + struct mminit_pfnnid_cache *state) 864 + { 865 + int nid; 866 + 867 + nid = __early_pfn_to_nid(pfn, state); 868 + if (nid >= 0 && nid != node) 869 + return false; 870 + return true; 871 + } 872 + 873 + /* Only safe to use early in boot when initialisation is single-threaded */ 874 + static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node) 875 + { 876 + return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache); 877 + } 878 + 879 + #else 880 + 881 + static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node) 882 + { 883 + return true; 884 + } 885 + static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node, 886 + struct mminit_pfnnid_cache *state) 887 + { 888 + return true; 889 + } 890 + #endif 891 + 892 + 893 + void __init __free_pages_bootmem(struct page *page, unsigned long pfn, 894 + unsigned int order) 895 + { 896 + if (early_page_uninitialised(pfn)) 897 + return; 898 + return __free_pages_boot_core(page, pfn, order); 899 + } 900 + 901 + #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 902 + static void __init deferred_free_range(struct page *page, 903 + unsigned long pfn, int nr_pages) 904 + { 905 + int i; 906 + 907 + if (!page) 908 + return; 909 + 910 + /* Free a large naturally-aligned chunk if possible */ 911 + if (nr_pages == MAX_ORDER_NR_PAGES && 912 + (pfn & (MAX_ORDER_NR_PAGES-1)) == 0) { 913 + set_pageblock_migratetype(page, MIGRATE_MOVABLE); 914 + __free_pages_boot_core(page, pfn, MAX_ORDER-1); 915 + return; 916 + } 917 + 918 + for (i = 0; i < nr_pages; i++, page++, pfn++) 919 + __free_pages_boot_core(page, pfn, 0); 920 + } 921 + 922 + static __initdata DECLARE_RWSEM(pgdat_init_rwsem); 923 + 924 + /* Initialise remaining memory on a node */ 925 + static int __init deferred_init_memmap(void *data) 926 + { 927 + pg_data_t *pgdat = data; 928 + int nid = pgdat->node_id; 929 + struct mminit_pfnnid_cache nid_init_state = { }; 930 + unsigned long start = jiffies; 931 + unsigned long nr_pages = 0; 932 + unsigned long walk_start, walk_end; 933 + int i, zid; 934 + struct zone *zone; 935 + unsigned long first_init_pfn = pgdat->first_deferred_pfn; 936 + const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 937 + 938 + if (first_init_pfn == ULONG_MAX) { 939 + up_read(&pgdat_init_rwsem); 940 + return 0; 941 + } 942 + 943 + /* Bind memory initialisation thread to a local node if possible */ 944 + if (!cpumask_empty(cpumask)) 945 + set_cpus_allowed_ptr(current, cpumask); 946 + 947 + /* Sanity check boundaries */ 948 + BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn); 949 + BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); 950 + pgdat->first_deferred_pfn = ULONG_MAX; 951 + 952 + /* Only the highest zone is deferred so find it */ 953 + for (zid = 0; zid < MAX_NR_ZONES; zid++) { 954 + zone = pgdat->node_zones + zid; 955 + if (first_init_pfn < zone_end_pfn(zone)) 956 + break; 957 + } 958 + 959 + for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) { 960 + unsigned long pfn, end_pfn; 961 + struct page *page = NULL; 962 + struct page *free_base_page = NULL; 963 + unsigned long free_base_pfn = 0; 964 + int nr_to_free = 0; 965 + 966 + end_pfn = min(walk_end, zone_end_pfn(zone)); 967 + pfn = first_init_pfn; 968 + if (pfn < walk_start) 969 + pfn = walk_start; 970 + if (pfn < zone->zone_start_pfn) 971 + pfn = zone->zone_start_pfn; 972 + 973 + for (; pfn < end_pfn; pfn++) { 974 + if (!pfn_valid_within(pfn)) 975 + goto free_range; 976 + 977 + /* 978 + * Ensure pfn_valid is checked every 979 + * MAX_ORDER_NR_PAGES for memory holes 980 + */ 981 + if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) { 982 + if (!pfn_valid(pfn)) { 983 + page = NULL; 984 + goto free_range; 985 + } 986 + } 987 + 988 + if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) { 989 + page = NULL; 990 + goto free_range; 991 + } 992 + 993 + /* Minimise pfn page lookups and scheduler checks */ 994 + if (page && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) { 995 + page++; 996 + } else { 997 + nr_pages += nr_to_free; 998 + deferred_free_range(free_base_page, 999 + free_base_pfn, nr_to_free); 1000 + free_base_page = NULL; 1001 + free_base_pfn = nr_to_free = 0; 1002 + 1003 + page = pfn_to_page(pfn); 1004 + cond_resched(); 1005 + } 1006 + 1007 + if (page->flags) { 1008 + VM_BUG_ON(page_zone(page) != zone); 1009 + goto free_range; 1010 + } 1011 + 1012 + __init_single_page(page, pfn, zid, nid); 1013 + if (!free_base_page) { 1014 + free_base_page = page; 1015 + free_base_pfn = pfn; 1016 + nr_to_free = 0; 1017 + } 1018 + nr_to_free++; 1019 + 1020 + /* Where possible, batch up pages for a single free */ 1021 + continue; 1022 + free_range: 1023 + /* Free the current block of pages to allocator */ 1024 + nr_pages += nr_to_free; 1025 + deferred_free_range(free_base_page, free_base_pfn, 1026 + nr_to_free); 1027 + free_base_page = NULL; 1028 + free_base_pfn = nr_to_free = 0; 1029 + } 1030 + 1031 + first_init_pfn = max(end_pfn, first_init_pfn); 1032 + } 1033 + 1034 + /* Sanity check that the next zone really is unpopulated */ 1035 + WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); 1036 + 1037 + pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages, 1038 + jiffies_to_msecs(jiffies - start)); 1039 + up_read(&pgdat_init_rwsem); 1040 + return 0; 1041 + } 1042 + 1043 + void __init page_alloc_init_late(void) 1044 + { 1045 + int nid; 1046 + 1047 + for_each_node_state(nid, N_MEMORY) { 1048 + down_read(&pgdat_init_rwsem); 1049 + kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); 1050 + } 1051 + 1052 + /* Block until all are initialised */ 1053 + down_write(&pgdat_init_rwsem); 1054 + up_write(&pgdat_init_rwsem); 1055 + } 1056 + #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 983 1057 984 1058 #ifdef CONFIG_CMA 985 1059 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */ ··· 4510 4150 zone->nr_migrate_reserve_block = reserve; 4511 4151 4512 4152 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 4153 + if (!early_page_nid_uninitialised(pfn, zone_to_nid(zone))) 4154 + return; 4155 + 4513 4156 if (!pfn_valid(pfn)) 4514 4157 continue; 4515 4158 page = pfn_to_page(pfn); ··· 4575 4212 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 4576 4213 unsigned long start_pfn, enum memmap_context context) 4577 4214 { 4578 - struct page *page; 4215 + pg_data_t *pgdat = NODE_DATA(nid); 4579 4216 unsigned long end_pfn = start_pfn + size; 4580 4217 unsigned long pfn; 4581 4218 struct zone *z; 4219 + unsigned long nr_initialised = 0; 4582 4220 4583 4221 if (highest_memmap_pfn < end_pfn - 1) 4584 4222 highest_memmap_pfn = end_pfn - 1; 4585 4223 4586 - z = &NODE_DATA(nid)->node_zones[zone]; 4224 + z = &pgdat->node_zones[zone]; 4587 4225 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 4588 4226 /* 4589 4227 * There can be holes in boot-time mem_map[]s ··· 4596 4232 continue; 4597 4233 if (!early_pfn_in_nid(pfn, nid)) 4598 4234 continue; 4235 + if (!update_defer_init(pgdat, pfn, end_pfn, 4236 + &nr_initialised)) 4237 + break; 4599 4238 } 4600 - page = pfn_to_page(pfn); 4601 - set_page_links(page, zone, nid, pfn); 4602 - mminit_verify_page_links(page, zone, nid, pfn); 4603 - init_page_count(page); 4604 - page_mapcount_reset(page); 4605 - page_cpupid_reset_last(page); 4606 - SetPageReserved(page); 4239 + 4607 4240 /* 4608 4241 * Mark the block movable so that blocks are reserved for 4609 4242 * movable at startup. This will force kernel allocations ··· 4615 4254 * check here not to call set_pageblock_migratetype() against 4616 4255 * pfn out of zone. 4617 4256 */ 4618 - if ((z->zone_start_pfn <= pfn) 4619 - && (pfn < zone_end_pfn(z)) 4620 - && !(pfn & (pageblock_nr_pages - 1))) 4621 - set_pageblock_migratetype(page, MIGRATE_MOVABLE); 4257 + if (!(pfn & (pageblock_nr_pages - 1))) { 4258 + struct page *page = pfn_to_page(pfn); 4622 4259 4623 - INIT_LIST_HEAD(&page->lru); 4624 - #ifdef WANT_PAGE_VIRTUAL 4625 - /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 4626 - if (!is_highmem_idx(zone)) 4627 - set_page_address(page, __va(pfn << PAGE_SHIFT)); 4628 - #endif 4260 + __init_single_page(page, pfn, zone, nid); 4261 + set_pageblock_migratetype(page, MIGRATE_MOVABLE); 4262 + } else { 4263 + __init_single_pfn(pfn, zone, nid); 4264 + } 4629 4265 } 4630 4266 } 4631 4267 ··· 4880 4522 4881 4523 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 4882 4524 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID 4525 + 4883 4526 /* 4884 4527 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 4885 4528 */ 4886 - int __meminit __early_pfn_to_nid(unsigned long pfn) 4529 + int __meminit __early_pfn_to_nid(unsigned long pfn, 4530 + struct mminit_pfnnid_cache *state) 4887 4531 { 4888 4532 unsigned long start_pfn, end_pfn; 4889 4533 int nid; 4890 - /* 4891 - * NOTE: The following SMP-unsafe globals are only used early in boot 4892 - * when the kernel is running single-threaded. 4893 - */ 4894 - static unsigned long __meminitdata last_start_pfn, last_end_pfn; 4895 - static int __meminitdata last_nid; 4896 4534 4897 - if (last_start_pfn <= pfn && pfn < last_end_pfn) 4898 - return last_nid; 4535 + if (state->last_start <= pfn && pfn < state->last_end) 4536 + return state->last_nid; 4899 4537 4900 4538 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); 4901 4539 if (nid != -1) { 4902 - last_start_pfn = start_pfn; 4903 - last_end_pfn = end_pfn; 4904 - last_nid = nid; 4540 + state->last_start = start_pfn; 4541 + state->last_end = end_pfn; 4542 + state->last_nid = nid; 4905 4543 } 4906 4544 4907 4545 return nid; 4908 4546 } 4909 4547 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ 4910 - 4911 - int __meminit early_pfn_to_nid(unsigned long pfn) 4912 - { 4913 - int nid; 4914 - 4915 - nid = __early_pfn_to_nid(pfn); 4916 - if (nid >= 0) 4917 - return nid; 4918 - /* just returns 0 */ 4919 - return 0; 4920 - } 4921 - 4922 - #ifdef CONFIG_NODES_SPAN_OTHER_NODES 4923 - bool __meminit early_pfn_in_nid(unsigned long pfn, int node) 4924 - { 4925 - int nid; 4926 - 4927 - nid = __early_pfn_to_nid(pfn); 4928 - if (nid >= 0 && nid != node) 4929 - return false; 4930 - return true; 4931 - } 4932 - #endif 4933 4548 4934 4549 /** 4935 4550 * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range ··· 5421 5090 /* pg_data_t should be reset to zero when it's allocated */ 5422 5091 WARN_ON(pgdat->nr_zones || pgdat->classzone_idx); 5423 5092 5093 + reset_deferred_meminit(pgdat); 5424 5094 pgdat->node_id = nid; 5425 5095 pgdat->node_start_pfn = node_start_pfn; 5426 5096 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
-1
scripts/gdb/linux/dmesg.py
··· 12 12 # 13 13 14 14 import gdb 15 - import string 16 15 17 16 from linux import utils 18 17
+92
scripts/gdb/linux/lists.py
··· 1 + # 2 + # gdb helper commands and functions for Linux kernel debugging 3 + # 4 + # list tools 5 + # 6 + # Copyright (c) Thiebaud Weksteen, 2015 7 + # 8 + # Authors: 9 + # Thiebaud Weksteen <thiebaud@weksteen.fr> 10 + # 11 + # This work is licensed under the terms of the GNU GPL version 2. 12 + # 13 + 14 + import gdb 15 + 16 + from linux import utils 17 + 18 + list_head = utils.CachedType("struct list_head") 19 + 20 + 21 + def list_check(head): 22 + nb = 0 23 + if (head.type == list_head.get_type().pointer()): 24 + head = head.dereference() 25 + elif (head.type != list_head.get_type()): 26 + raise gdb.GdbError('argument must be of type (struct list_head [*])') 27 + c = head 28 + try: 29 + gdb.write("Starting with: {}\n".format(c)) 30 + except gdb.MemoryError: 31 + gdb.write('head is not accessible\n') 32 + return 33 + while True: 34 + p = c['prev'].dereference() 35 + n = c['next'].dereference() 36 + try: 37 + if p['next'] != c.address: 38 + gdb.write('prev.next != current: ' 39 + 'current@{current_addr}={current} ' 40 + 'prev@{p_addr}={p}\n'.format( 41 + current_addr=c.address, 42 + current=c, 43 + p_addr=p.address, 44 + p=p, 45 + )) 46 + return 47 + except gdb.MemoryError: 48 + gdb.write('prev is not accessible: ' 49 + 'current@{current_addr}={current}\n'.format( 50 + current_addr=c.address, 51 + current=c 52 + )) 53 + return 54 + try: 55 + if n['prev'] != c.address: 56 + gdb.write('next.prev != current: ' 57 + 'current@{current_addr}={current} ' 58 + 'next@{n_addr}={n}\n'.format( 59 + current_addr=c.address, 60 + current=c, 61 + n_addr=n.address, 62 + n=n, 63 + )) 64 + return 65 + except gdb.MemoryError: 66 + gdb.write('next is not accessible: ' 67 + 'current@{current_addr}={current}\n'.format( 68 + current_addr=c.address, 69 + current=c 70 + )) 71 + return 72 + c = n 73 + nb += 1 74 + if c == head: 75 + gdb.write("list is consistent: {} node(s)\n".format(nb)) 76 + return 77 + 78 + 79 + class LxListChk(gdb.Command): 80 + """Verify a list consistency""" 81 + 82 + def __init__(self): 83 + super(LxListChk, self).__init__("lx-list-check", gdb.COMMAND_DATA, 84 + gdb.COMPLETE_EXPRESSION) 85 + 86 + def invoke(self, arg, from_tty): 87 + argv = gdb.string_to_argv(arg) 88 + if len(argv) != 1: 89 + raise gdb.GdbError("lx-list-check takes one argument") 90 + list_check(gdb.parse_and_eval(argv[0])) 91 + 92 + LxListChk()
+4 -5
scripts/gdb/linux/symbols.py
··· 14 14 import gdb 15 15 import os 16 16 import re 17 - import string 18 17 19 - from linux import modules, utils 18 + from linux import modules 20 19 21 20 22 21 if hasattr(gdb, 'Breakpoint'): ··· 96 97 return "" 97 98 attrs = sect_attrs['attrs'] 98 99 section_name_to_address = { 99 - attrs[n]['name'].string() : attrs[n]['address'] 100 + attrs[n]['name'].string(): attrs[n]['address'] 100 101 for n in range(int(sect_attrs['nsections']))} 101 102 args = [] 102 103 for section_name in [".data", ".data..read_mostly", ".rodata", ".bss"]: ··· 123 124 addr=module_addr, 124 125 sections=self._section_arguments(module)) 125 126 gdb.execute(cmdline, to_string=True) 126 - if not module_name in self.loaded_modules: 127 + if module_name not in self.loaded_modules: 127 128 self.loaded_modules.append(module_name) 128 129 else: 129 130 gdb.write("no module object found for '{0}'\n".format(module_name)) ··· 163 164 self.load_all_symbols() 164 165 165 166 if hasattr(gdb, 'Breakpoint'): 166 - if not self.breakpoint is None: 167 + if self.breakpoint is not None: 167 168 self.breakpoint.delete() 168 169 self.breakpoint = None 169 170 self.breakpoint = LoadModuleBreakpoint(
+18 -2
scripts/gdb/linux/tasks.py
··· 18 18 19 19 task_type = utils.CachedType("struct task_struct") 20 20 21 + 21 22 def task_lists(): 22 - global task_type 23 23 task_ptr_type = task_type.get_type().pointer() 24 24 init_task = gdb.parse_and_eval("init_task").address 25 25 t = g = init_task ··· 37 37 task_ptr_type, "tasks") 38 38 if t == init_task: 39 39 return 40 + 40 41 41 42 def get_task_by_pid(pid): 42 43 for task in task_lists(): ··· 66 65 LxTaskByPidFunc() 67 66 68 67 68 + class LxPs(gdb.Command): 69 + """Dump Linux tasks.""" 70 + 71 + def __init__(self): 72 + super(LxPs, self).__init__("lx-ps", gdb.COMMAND_DATA) 73 + 74 + def invoke(self, arg, from_tty): 75 + for task in task_lists(): 76 + gdb.write("{address} {pid} {comm}\n".format( 77 + address=task, 78 + pid=task["pid"], 79 + comm=task["comm"].string())) 80 + 81 + LxPs() 82 + 83 + 69 84 thread_info_type = utils.CachedType("struct thread_info") 70 85 71 86 ia64_task_size = None 72 87 73 88 74 89 def get_thread_info(task): 75 - global thread_info_type 76 90 thread_info_ptr_type = thread_info_type.get_type().pointer() 77 91 if utils.is_target_arch("ia64"): 78 92 global ia64_task_size
+2 -2
scripts/gdb/linux/utils.py
··· 83 83 elif "big endian" in endian: 84 84 target_endianness = BIG_ENDIAN 85 85 else: 86 - raise gdb.GdgError("unknown endianness '{0}'".format(str(endian))) 86 + raise gdb.GdbError("unknown endianness '{0}'".format(str(endian))) 87 87 return target_endianness 88 88 89 89 ··· 151 151 gdbserver_type = GDBSERVER_QEMU 152 152 elif probe_kgdb(): 153 153 gdbserver_type = GDBSERVER_KGDB 154 - if not gdbserver_type is None and hasattr(gdb, 'events'): 154 + if gdbserver_type is not None and hasattr(gdb, 'events'): 155 155 gdb.events.exited.connect(exit_handler) 156 156 return gdbserver_type
+1
scripts/gdb/vmlinux-gdb.py
··· 28 28 import linux.dmesg 29 29 import linux.tasks 30 30 import linux.cpus 31 + import linux.lists
+1 -1
sound/core/memalloc.c
··· 124 124 dmab->addr = 0; 125 125 126 126 if (dev->of_node) 127 - pool = of_get_named_gen_pool(dev->of_node, "iram", 0); 127 + pool = of_gen_pool_get(dev->of_node, "iram", 0); 128 128 129 129 if (!pool) 130 130 return;