···351 mmu_vmalloc_psize = MMU_PAGE_64K;352 if (mmu_linear_psize == MMU_PAGE_4K)353 mmu_linear_psize = MMU_PAGE_64K;354+ if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) {355+ /*356+ * Don't use 64k pages for ioremap on pSeries, since357+ * that would stop us accessing the HEA ethernet.358+ */359+ if (!machine_is(pseries))360+ mmu_io_psize = MMU_PAGE_64K;361+ } else362 mmu_ci_restrictions = 1;363 }364#endif /* CONFIG_PPC_64K_PAGES */
+6-2
arch/powerpc/sysdev/bestcomm/bestcomm.c
···52 int i, tasknum = -1;53 struct bcom_task *tsk;54000055 /* Get and reserve a task num */56 spin_lock(&bcom_eng->lock);57···488}489490static struct of_device_id mpc52xx_bcom_of_match[] = {491- { .type = "dma-controller", .compatible = "fsl,mpc5200-bestcomm", },492- { .type = "dma-controller", .compatible = "mpc5200-bestcomm", },493 {},494};495
···52 int i, tasknum = -1;53 struct bcom_task *tsk;5455+ /* Don't try to do anything if bestcomm init failed */56+ if (!bcom_eng)57+ return NULL;58+59 /* Get and reserve a task num */60 spin_lock(&bcom_eng->lock);61···484}485486static struct of_device_id mpc52xx_bcom_of_match[] = {487+ { .compatible = "fsl,mpc5200-bestcomm", },488+ { .compatible = "mpc5200-bestcomm", },489 {},490};491
+1-1
arch/powerpc/sysdev/ipic.c
···906{907 int rc;908909- if (!primary_ipic->regs)910 return -ENODEV;911 printk(KERN_DEBUG "Registering ipic with sysfs...\n");912
···906{907 int rc;908909+ if (!primary_ipic || !primary_ipic->regs)910 return -ENODEV;911 printk(KERN_DEBUG "Registering ipic with sysfs...\n");912
···166unsigned long sparc64_kern_pri_nuc_bits __read_mostly;167unsigned long sparc64_kern_sec_context __read_mostly;168169-int bigkernel = 0;170171#ifdef CONFIG_DEBUG_DCFLUSH172atomic_t dcpage_flushes = ATOMIC_INIT(0);···572static void __init remap_kernel(void)573{574 unsigned long phys_page, tte_vaddr, tte_data;575- int tlb_ent = sparc64_highest_locked_tlbent();576577 tte_vaddr = (unsigned long) KERNBASE;578 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;···582583 /* Now lock us into the TLBs via Hypervisor or OBP. */584 if (tlb_type == hypervisor) {585- hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);586- hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);587- if (bigkernel) {588- tte_vaddr += 0x400000;589- tte_data += 0x400000;590 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);591 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);00592 }593 } else {594- prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);595- prom_itlb_load(tlb_ent, tte_data, tte_vaddr);596- if (bigkernel) {597- tlb_ent -= 1;598- prom_dtlb_load(tlb_ent,599- tte_data + 0x400000, 600- tte_vaddr + 0x400000);601- prom_itlb_load(tlb_ent,602- tte_data + 0x400000, 603- tte_vaddr + 0x400000);604 }605- sparc64_highest_unlocked_tlb_ent = tlb_ent - 1;606 }607 if (tlb_type == cheetah_plus) {608 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |···1345 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);13461347 real_end = (unsigned long)_end;1348- if ((real_end > ((unsigned long)KERNBASE + 0x400000)))1349- bigkernel = 1;1350- if ((real_end > ((unsigned long)KERNBASE + 0x800000))) {1351- prom_printf("paging_init: Kernel > 8MB, too large.\n");1352- prom_halt();1353- }13541355 /* Set kernel pgd to upper alias so physical page computations1356 * work.
···166unsigned long sparc64_kern_pri_nuc_bits __read_mostly;167unsigned long sparc64_kern_sec_context __read_mostly;168169+int num_kernel_image_mappings;170171#ifdef CONFIG_DEBUG_DCFLUSH172atomic_t dcpage_flushes = ATOMIC_INIT(0);···572static void __init remap_kernel(void)573{574 unsigned long phys_page, tte_vaddr, tte_data;575+ int i, tlb_ent = sparc64_highest_locked_tlbent();576577 tte_vaddr = (unsigned long) KERNBASE;578 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;···582583 /* Now lock us into the TLBs via Hypervisor or OBP. */584 if (tlb_type == hypervisor) {585+ for (i = 0; i < num_kernel_image_mappings; i++) {0000586 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);587 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);588+ tte_vaddr += 0x400000;589+ tte_data += 0x400000;590 }591 } else {592+ for (i = 0; i < num_kernel_image_mappings; i++) {593+ prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);594+ prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);595+ tte_vaddr += 0x400000;596+ tte_data += 0x400000;00000597 }598+ sparc64_highest_unlocked_tlb_ent = tlb_ent - i;599 }600 if (tlb_type == cheetah_plus) {601 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |···1352 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);13531354 real_end = (unsigned long)_end;1355+ num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22);1356+ printk("Kernel: Using %d locked TLB entries for main kernel image.\n",1357+ num_kernel_image_mappings);00013581359 /* Set kernel pgd to upper alias so physical page computations1360 * work.
+3-3
arch/x86/mm/ioremap.c
···106 * have to convert them into an offset in a page-aligned mapping, but the107 * caller shouldn't need to know that small detail.108 */109-static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,110 enum ioremap_mode mode)111{112 unsigned long pfn, offset, last_addr, vaddr;···193 *194 * Must be freed with iounmap.195 */196-void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)197{198 return __ioremap(phys_addr, size, IOR_MODE_UNCACHED);199}200EXPORT_SYMBOL(ioremap_nocache);201202-void __iomem *ioremap_cache(unsigned long phys_addr, unsigned long size)203{204 return __ioremap(phys_addr, size, IOR_MODE_CACHED);205}
···106 * have to convert them into an offset in a page-aligned mapping, but the107 * caller shouldn't need to know that small detail.108 */109+static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,110 enum ioremap_mode mode)111{112 unsigned long pfn, offset, last_addr, vaddr;···193 *194 * Must be freed with iounmap.195 */196+void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)197{198 return __ioremap(phys_addr, size, IOR_MODE_UNCACHED);199}200EXPORT_SYMBOL(ioremap_nocache);201202+void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)203{204 return __ioremap(phys_addr, size, IOR_MODE_CACHED);205}
···63 SPITFIRE_HIGHEST_LOCKED_TLBENT : \64 CHEETAH_HIGHEST_LOCKED_TLBENT)650066/* The data cache is write through, so this just invalidates the67 * specified line.68 */
···63 SPITFIRE_HIGHEST_LOCKED_TLBENT : \64 CHEETAH_HIGHEST_LOCKED_TLBENT)6566+extern int num_kernel_image_mappings;67+68/* The data cache is write through, so this just invalidates the69 * specified line.70 */
+3-3
include/asm-x86/io_32.h
···114 * If the area you are trying to map is a PCI BAR you should have a115 * look at pci_iomap().116 */117-extern void __iomem *ioremap_nocache(unsigned long offset, unsigned long size);118-extern void __iomem *ioremap_cache(unsigned long offset, unsigned long size);119120/*121 * The default ioremap() behavior is non-cached:122 */123-static inline void __iomem *ioremap(unsigned long offset, unsigned long size)124{125 return ioremap_nocache(offset, size);126}
···114 * If the area you are trying to map is a PCI BAR you should have a115 * look at pci_iomap().116 */117+extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);118+extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);119120/*121 * The default ioremap() behavior is non-cached:122 */123+static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)124{125 return ioremap_nocache(offset, size);126}
+3-3
include/asm-x86/io_64.h
···158 * it's useful if some control registers are in such an area and write combining159 * or read caching is not desirable:160 */161-extern void __iomem *ioremap_nocache(unsigned long offset, unsigned long size);162-extern void __iomem *ioremap_cache(unsigned long offset, unsigned long size);163164/*165 * The default ioremap() behavior is non-cached:166 */167-static inline void __iomem *ioremap(unsigned long offset, unsigned long size)168{169 return ioremap_nocache(offset, size);170}
···158 * it's useful if some control registers are in such an area and write combining159 * or read caching is not desirable:160 */161+extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);162+extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);163164/*165 * The default ioremap() behavior is non-cached:166 */167+static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)168{169 return ioremap_nocache(offset, size);170}
···191192 tick_clock_notify();193000194 printk(KERN_INFO "Time: %s clocksource has been installed.\n",195 clock->name);0196}197#else198static inline void change_clocksource(void) { }
···191192 tick_clock_notify();193194+ /*195+ * We're holding xtime lock and waking up klogd would deadlock196+ * us on enqueue. So no printing!197 printk(KERN_INFO "Time: %s clocksource has been installed.\n",198 clock->name);199+ */200}201#else202static inline void change_clocksource(void) { }
+1-1
lib/iomap.c
···256 * */257void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)258{259- unsigned long start = pci_resource_start(dev, bar);260 unsigned long len = pci_resource_len(dev, bar);261 unsigned long flags = pci_resource_flags(dev, bar);262
···256 * */257void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)258{259+ resource_size_t start = pci_resource_start(dev, bar);260 unsigned long len = pci_resource_len(dev, bar);261 unsigned long flags = pci_resource_flags(dev, bar);262
···177 return rcu_dereference(ret);178}179180+/* Same as rcu_assign_pointer181+ * but that macro() assumes that value is a pointer.182+ */183static inline void node_set_parent(struct node *node, struct tnode *ptr)184{185+ smp_wmb();186+ node->parent = (unsigned long)ptr | NODE_TYPE(node);187}188189static inline struct node *tnode_get_child(struct tnode *tn, unsigned int i)
+1-1
net/ipv4/ip_fragment.c
···568569 IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS);570571- net = skb->dev->nd_net;572 /* Start by cleaning up the memory. */573 if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh)574 ip_evictor(net);
···568569 IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS);570571+ net = skb->dev ? skb->dev->nd_net : skb->dst->dev->nd_net;572 /* Start by cleaning up the memory. */573 if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh)574 ip_evictor(net);
+2-2
net/ipv4/tcp.c
···735 if (!(psize -= copy))736 goto out;737738- if (skb->len < mss_now || (flags & MSG_OOB))739 continue;740741 if (forced_push(tp)) {···981 if ((seglen -= copy) == 0 && iovlen == 0)982 goto out;983984- if (skb->len < mss_now || (flags & MSG_OOB))985 continue;986987 if (forced_push(tp)) {
···735 if (!(psize -= copy))736 goto out;737738+ if (skb->len < size_goal || (flags & MSG_OOB))739 continue;740741 if (forced_push(tp)) {···981 if ((seglen -= copy) == 0 && iovlen == 0)982 goto out;983984+ if (skb->len < size_goal || (flags & MSG_OOB))985 continue;986987 if (forced_push(tp)) {
-2
net/ipv6/ndisc.c
···1420 u8 *opt;1421 int rd_len;1422 int err;1423- int hlen;1424 u8 ha_buf[MAX_ADDR_LEN], *ha = NULL;14251426 dev = skb->dev;···1490 return;1491 }14921493- hlen = 0;14941495 skb_reserve(buff, LL_RESERVED_SPACE(dev));1496 ip6_nd_hdr(sk, buff, dev, &saddr_buf, &ipv6_hdr(skb)->saddr,
···1420 u8 *opt;1421 int rd_len;1422 int err;01423 u8 ha_buf[MAX_ADDR_LEN], *ha = NULL;14241425 dev = skb->dev;···1491 return;1492 }1493014941495 skb_reserve(buff, LL_RESERVED_SPACE(dev));1496 ip6_nd_hdr(sk, buff, dev, &saddr_buf, &ipv6_hdr(skb)->saddr,
+7-6
net/sched/sch_htb.c
···711 */712static psched_time_t htb_do_events(struct htb_sched *q, int level)713{714- int i;715-716- for (i = 0; i < 500; i++) {00717 struct htb_class *cl;718 long diff;719 struct rb_node *p = rb_first(&q->wait_pq[level]);···733 if (cl->cmode != HTB_CAN_SEND)734 htb_add_to_wait_tree(q, cl, diff);735 }736- if (net_ratelimit())737- printk(KERN_WARNING "htb: too many events !\n");738- return q->now + PSCHED_TICKS_PER_SEC / 10;739}740741/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
···711 */712static psched_time_t htb_do_events(struct htb_sched *q, int level)713{714+ /* don't run for longer than 2 jiffies; 2 is used instead of715+ 1 to simplify things when jiffy is going to be incremented716+ too soon */717+ unsigned long stop_at = jiffies + 2;718+ while (time_before(jiffies, stop_at)) {719 struct htb_class *cl;720 long diff;721 struct rb_node *p = rb_first(&q->wait_pq[level]);···731 if (cl->cmode != HTB_CAN_SEND)732 htb_add_to_wait_tree(q, cl, diff);733 }734+ /* too much load - let's continue on next jiffie */735+ return q->now + PSCHED_TICKS_PER_SEC / HZ;0736}737738/* Returns class->node+prio from id-tree where classe's id is >= id. NULL