···351351 mmu_vmalloc_psize = MMU_PAGE_64K;352352 if (mmu_linear_psize == MMU_PAGE_4K)353353 mmu_linear_psize = MMU_PAGE_64K;354354- if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE))355355- mmu_io_psize = MMU_PAGE_64K;356356- else354354+ if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) {355355+ /*356356+ * Don't use 64k pages for ioremap on pSeries, since357357+ * that would stop us accessing the HEA ethernet.358358+ */359359+ if (!machine_is(pseries))360360+ mmu_io_psize = MMU_PAGE_64K;361361+ } else357362 mmu_ci_restrictions = 1;358363 }359364#endif /* CONFIG_PPC_64K_PAGES */
+6-2
arch/powerpc/sysdev/bestcomm/bestcomm.c
···5252 int i, tasknum = -1;5353 struct bcom_task *tsk;54545555+ /* Don't try to do anything if bestcomm init failed */5656+ if (!bcom_eng)5757+ return NULL;5858+5559 /* Get and reserve a task num */5660 spin_lock(&bcom_eng->lock);5761···488484}489485490486static struct of_device_id mpc52xx_bcom_of_match[] = {491491- { .type = "dma-controller", .compatible = "fsl,mpc5200-bestcomm", },492492- { .type = "dma-controller", .compatible = "mpc5200-bestcomm", },487487+ { .compatible = "fsl,mpc5200-bestcomm", },488488+ { .compatible = "mpc5200-bestcomm", },493489 {},494490};495491
+1-1
arch/powerpc/sysdev/ipic.c
···906906{907907 int rc;908908909909- if (!primary_ipic->regs)909909+ if (!primary_ipic || !primary_ipic->regs)910910 return -ENODEV;911911 printk(KERN_DEBUG "Registering ipic with sysfs...\n");912912
···166166unsigned long sparc64_kern_pri_nuc_bits __read_mostly;167167unsigned long sparc64_kern_sec_context __read_mostly;168168169169-int bigkernel = 0;169169+int num_kernel_image_mappings;170170171171#ifdef CONFIG_DEBUG_DCFLUSH172172atomic_t dcpage_flushes = ATOMIC_INIT(0);···572572static void __init remap_kernel(void)573573{574574 unsigned long phys_page, tte_vaddr, tte_data;575575- int tlb_ent = sparc64_highest_locked_tlbent();575575+ int i, tlb_ent = sparc64_highest_locked_tlbent();576576577577 tte_vaddr = (unsigned long) KERNBASE;578578 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;···582582583583 /* Now lock us into the TLBs via Hypervisor or OBP. */584584 if (tlb_type == hypervisor) {585585- hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);586586- hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);587587- if (bigkernel) {588588- tte_vaddr += 0x400000;589589- tte_data += 0x400000;585585+ for (i = 0; i < num_kernel_image_mappings; i++) {590586 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);591587 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);588588+ tte_vaddr += 0x400000;589589+ tte_data += 0x400000;592590 }593591 } else {594594- prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);595595- prom_itlb_load(tlb_ent, tte_data, tte_vaddr);596596- if (bigkernel) {597597- tlb_ent -= 1;598598- prom_dtlb_load(tlb_ent,599599- tte_data + 0x400000, 600600- tte_vaddr + 0x400000);601601- prom_itlb_load(tlb_ent,602602- tte_data + 0x400000, 603603- tte_vaddr + 0x400000);592592+ for (i = 0; i < num_kernel_image_mappings; i++) {593593+ prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);594594+ prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);595595+ tte_vaddr += 0x400000;596596+ tte_data += 0x400000;604597 }605605- sparc64_highest_unlocked_tlb_ent = tlb_ent - 1;598598+ sparc64_highest_unlocked_tlb_ent = tlb_ent - i;606599 }607600 if (tlb_type == cheetah_plus) {608601 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |···13451352 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);1346135313471354 real_end = (unsigned long)_end;13481348- if ((real_end > ((unsigned long)KERNBASE + 0x400000)))13491349- bigkernel = 1;13501350- if ((real_end > ((unsigned long)KERNBASE + 0x800000))) {13511351- prom_printf("paging_init: Kernel > 8MB, too large.\n");13521352- prom_halt();13531353- }13551355+ num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22);13561356+ printk("Kernel: Using %d locked TLB entries for main kernel image.\n",13571357+ num_kernel_image_mappings);1354135813551359 /* Set kernel pgd to upper alias so physical page computations13561360 * work.
+3-3
arch/x86/mm/ioremap.c
···106106 * have to convert them into an offset in a page-aligned mapping, but the107107 * caller shouldn't need to know that small detail.108108 */109109-static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,109109+static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,110110 enum ioremap_mode mode)111111{112112 unsigned long pfn, offset, last_addr, vaddr;···193193 *194194 * Must be freed with iounmap.195195 */196196-void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)196196+void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)197197{198198 return __ioremap(phys_addr, size, IOR_MODE_UNCACHED);199199}200200EXPORT_SYMBOL(ioremap_nocache);201201202202-void __iomem *ioremap_cache(unsigned long phys_addr, unsigned long size)202202+void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)203203{204204 return __ioremap(phys_addr, size, IOR_MODE_CACHED);205205}
···6363 SPITFIRE_HIGHEST_LOCKED_TLBENT : \6464 CHEETAH_HIGHEST_LOCKED_TLBENT)65656666+extern int num_kernel_image_mappings;6767+6668/* The data cache is write through, so this just invalidates the6769 * specified line.6870 */
+3-3
include/asm-x86/io_32.h
···114114 * If the area you are trying to map is a PCI BAR you should have a115115 * look at pci_iomap().116116 */117117-extern void __iomem *ioremap_nocache(unsigned long offset, unsigned long size);118118-extern void __iomem *ioremap_cache(unsigned long offset, unsigned long size);117117+extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);118118+extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);119119120120/*121121 * The default ioremap() behavior is non-cached:122122 */123123-static inline void __iomem *ioremap(unsigned long offset, unsigned long size)123123+static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)124124{125125 return ioremap_nocache(offset, size);126126}
+3-3
include/asm-x86/io_64.h
···158158 * it's useful if some control registers are in such an area and write combining159159 * or read caching is not desirable:160160 */161161-extern void __iomem *ioremap_nocache(unsigned long offset, unsigned long size);162162-extern void __iomem *ioremap_cache(unsigned long offset, unsigned long size);161161+extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);162162+extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);163163164164/*165165 * The default ioremap() behavior is non-cached:166166 */167167-static inline void __iomem *ioremap(unsigned long offset, unsigned long size)167167+static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)168168{169169 return ioremap_nocache(offset, size);170170}
···191191192192 tick_clock_notify();193193194194+ /*195195+ * We're holding xtime lock and waking up klogd would deadlock196196+ * us on enqueue. So no printing!194197 printk(KERN_INFO "Time: %s clocksource has been installed.\n",195198 clock->name);199199+ */196200}197201#else198202static inline void change_clocksource(void) { }
+1-1
lib/iomap.c
···256256 * */257257void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)258258{259259- unsigned long start = pci_resource_start(dev, bar);259259+ resource_size_t start = pci_resource_start(dev, bar);260260 unsigned long len = pci_resource_len(dev, bar);261261 unsigned long flags = pci_resource_flags(dev, bar);262262
···12501250 struct proc_dir_entry *p;1251125112521252 p = proc_create("lec", S_IRUGO, atm_proc_root, &lec_seq_fops);12531253+ if (!p) {12541254+ printk(KERN_ERR "Unable to initialize /proc/net/atm/lec\n");12551255+ return -ENOMEM;12561256+ }12531257#endif1254125812551259 register_atm_ioctl(&lane_ioctl_ops);
+5-2
net/ipv4/fib_trie.c
···177177 return rcu_dereference(ret);178178}179179180180+/* Same as rcu_assign_pointer181181+ * but that macro() assumes that value is a pointer.182182+ */180183static inline void node_set_parent(struct node *node, struct tnode *ptr)181184{182182- rcu_assign_pointer(node->parent,183183- (unsigned long)ptr | NODE_TYPE(node));185185+ smp_wmb();186186+ node->parent = (unsigned long)ptr | NODE_TYPE(node);184187}185188186189static inline struct node *tnode_get_child(struct tnode *tn, unsigned int i)
+1-1
net/ipv4/ip_fragment.c
···568568569569 IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS);570570571571- net = skb->dev->nd_net;571571+ net = skb->dev ? skb->dev->nd_net : skb->dst->dev->nd_net;572572 /* Start by cleaning up the memory. */573573 if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh)574574 ip_evictor(net);
+2-2
net/ipv4/tcp.c
···735735 if (!(psize -= copy))736736 goto out;737737738738- if (skb->len < mss_now || (flags & MSG_OOB))738738+ if (skb->len < size_goal || (flags & MSG_OOB))739739 continue;740740741741 if (forced_push(tp)) {···981981 if ((seglen -= copy) == 0 && iovlen == 0)982982 goto out;983983984984- if (skb->len < mss_now || (flags & MSG_OOB))984984+ if (skb->len < size_goal || (flags & MSG_OOB))985985 continue;986986987987 if (forced_push(tp)) {
···711711 */712712static psched_time_t htb_do_events(struct htb_sched *q, int level)713713{714714- int i;715715-716716- for (i = 0; i < 500; i++) {714714+ /* don't run for longer than 2 jiffies; 2 is used instead of715715+ 1 to simplify things when jiffy is going to be incremented716716+ too soon */717717+ unsigned long stop_at = jiffies + 2;718718+ while (time_before(jiffies, stop_at)) {717719 struct htb_class *cl;718720 long diff;719721 struct rb_node *p = rb_first(&q->wait_pq[level]);···733731 if (cl->cmode != HTB_CAN_SEND)734732 htb_add_to_wait_tree(q, cl, diff);735733 }736736- if (net_ratelimit())737737- printk(KERN_WARNING "htb: too many events !\n");738738- return q->now + PSCHED_TICKS_PER_SEC / 10;734734+ /* too much load - let's continue on next jiffie */735735+ return q->now + PSCHED_TICKS_PER_SEC / HZ;739736}740737741738/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
+3-4
net/socket.c
···909909 if (!dlci_ioctl_hook)910910 request_module("dlci");911911912912- if (dlci_ioctl_hook) {913913- mutex_lock(&dlci_ioctl_mutex);912912+ mutex_lock(&dlci_ioctl_mutex);913913+ if (dlci_ioctl_hook)914914 err = dlci_ioctl_hook(cmd, argp);915915- mutex_unlock(&dlci_ioctl_mutex);916916- }915915+ mutex_unlock(&dlci_ioctl_mutex);917916 break;918917 default:919918 err = sock->ops->ioctl(sock, cmd, arg);
-2
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
···237237238238static int rdma_read_max_sge(struct svcxprt_rdma *xprt, int sge_count)239239{240240-#ifdef RDMA_TRANSPORT_IWARP241240 if ((RDMA_TRANSPORT_IWARP ==242241 rdma_node_get_transport(xprt->sc_cm_id->243242 device->node_type))244243 && sge_count > 1)245244 return 1;246245 else247247-#endif248246 return min_t(int, sge_count, xprt->sc_max_sge);249247}250248