Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: Change u64/s64 to a long long integer type

Convert arch/powerpc/ over to long long based u64:

-#ifdef __powerpc64__
-# include <asm-generic/int-l64.h>
-#else
-# include <asm-generic/int-ll64.h>
-#endif
+#include <asm-generic/int-ll64.h>

This will avoid reoccuring spurious warnings in core kernel code that
comes when people test on their own hardware. (i.e. x86 in ~98% of the
cases) This is what x86 uses and it generally helps keep 64-bit code
32-bit clean too.

[Adjusted to not impact user mode (from paulus) - sfr]

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

authored by

Ingo Molnar and committed by
Benjamin Herrenschmidt
fe333321 66c721e1

+70 -66
+1 -1
arch/powerpc/include/asm/rtas.h
··· 18 18 */ 19 19 20 20 #define RTAS_UNKNOWN_SERVICE (-1) 21 - #define RTAS_INSTANTIATE_MAX (1UL<<30) /* Don't instantiate rtas at/above this value */ 21 + #define RTAS_INSTANTIATE_MAX (1ULL<<30) /* Don't instantiate rtas at/above this value */ 22 22 23 23 /* Buffer size for ppc_rtas system call. */ 24 24 #define RTAS_RMOBUF_MAX (64 * 1024)
+6 -1
arch/powerpc/include/asm/types.h
··· 1 1 #ifndef _ASM_POWERPC_TYPES_H 2 2 #define _ASM_POWERPC_TYPES_H 3 3 4 - #ifdef __powerpc64__ 4 + /* 5 + * This is here because we used to use l64 for 64bit powerpc 6 + * and we don't want to impact user mode with our change to ll64 7 + * in the kernel. 8 + */ 9 + #if defined(__powerpc64__) && !defined(__KERNEL__) 5 10 # include <asm-generic/int-l64.h> 6 11 #else 7 12 # include <asm-generic/int-ll64.h>
+2 -2
arch/powerpc/kernel/dma-iommu.c
··· 79 79 "Warning: IOMMU offset too big for device mask\n"); 80 80 if (tbl) 81 81 printk(KERN_INFO 82 - "mask: 0x%08lx, table offset: 0x%08lx\n", 82 + "mask: 0x%08llx, table offset: 0x%08lx\n", 83 83 mask, tbl->it_offset); 84 84 else 85 - printk(KERN_INFO "mask: 0x%08lx, table unavailable\n", 85 + printk(KERN_INFO "mask: 0x%08llx, table unavailable\n", 86 86 mask); 87 87 return 0; 88 88 } else
+6 -6
arch/powerpc/kernel/iommu.c
··· 239 239 if (printk_ratelimit()) { 240 240 printk(KERN_INFO "iommu_free: invalid entry\n"); 241 241 printk(KERN_INFO "\tentry = 0x%lx\n", entry); 242 - printk(KERN_INFO "\tdma_addr = 0x%lx\n", (u64)dma_addr); 243 - printk(KERN_INFO "\tTable = 0x%lx\n", (u64)tbl); 244 - printk(KERN_INFO "\tbus# = 0x%lx\n", (u64)tbl->it_busno); 245 - printk(KERN_INFO "\tsize = 0x%lx\n", (u64)tbl->it_size); 246 - printk(KERN_INFO "\tstartOff = 0x%lx\n", (u64)tbl->it_offset); 247 - printk(KERN_INFO "\tindex = 0x%lx\n", (u64)tbl->it_index); 242 + printk(KERN_INFO "\tdma_addr = 0x%llx\n", (u64)dma_addr); 243 + printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl); 244 + printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno); 245 + printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size); 246 + printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset); 247 + printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index); 248 248 WARN_ON(1); 249 249 } 250 250 return;
+5 -5
arch/powerpc/kernel/lparcfg.c
··· 240 240 if (rc) 241 241 return; 242 242 243 - seq_printf(m, "partition_entitled_capacity=%ld\n", 243 + seq_printf(m, "partition_entitled_capacity=%lld\n", 244 244 ppp_data.entitlement); 245 245 seq_printf(m, "group=%d\n", ppp_data.group_num); 246 246 seq_printf(m, "system_active_processors=%d\n", ··· 265 265 ppp_data.unallocated_weight); 266 266 seq_printf(m, "capacity_weight=%d\n", ppp_data.weight); 267 267 seq_printf(m, "capped=%d\n", ppp_data.capped); 268 - seq_printf(m, "unallocated_capacity=%ld\n", 268 + seq_printf(m, "unallocated_capacity=%lld\n", 269 269 ppp_data.unallocated_entitlement); 270 270 } 271 271 ··· 509 509 } else 510 510 return -EINVAL; 511 511 512 - pr_debug("%s: current_entitled = %lu, current_weight = %u\n", 512 + pr_debug("%s: current_entitled = %llu, current_weight = %u\n", 513 513 __func__, ppp_data.entitlement, ppp_data.weight); 514 514 515 - pr_debug("%s: new_entitled = %lu, new_weight = %u\n", 515 + pr_debug("%s: new_entitled = %llu, new_weight = %u\n", 516 516 __func__, new_entitled, new_weight); 517 517 518 518 retval = plpar_hcall_norets(H_SET_PPP, new_entitled, new_weight); ··· 558 558 pr_debug("%s: current_entitled = %lu, current_weight = %u\n", 559 559 __func__, mpp_data.entitled_mem, mpp_data.mem_weight); 560 560 561 - pr_debug("%s: new_entitled = %lu, new_weight = %u\n", 561 + pr_debug("%s: new_entitled = %llu, new_weight = %u\n", 562 562 __func__, new_entitled, new_weight); 563 563 564 564 rc = plpar_hcall_norets(H_SET_MPP, new_entitled, new_weight);
+3 -3
arch/powerpc/kernel/setup_64.c
··· 434 434 printk("Starting Linux PPC64 %s\n", init_utsname()->version); 435 435 436 436 printk("-----------------------------------------------------\n"); 437 - printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size); 438 - printk("physicalMemorySize = 0x%lx\n", lmb_phys_mem_size()); 437 + printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size); 438 + printk("physicalMemorySize = 0x%llx\n", lmb_phys_mem_size()); 439 439 if (ppc64_caches.dline_size != 0x80) 440 440 printk("ppc64_caches.dcache_line_size = 0x%x\n", 441 441 ppc64_caches.dline_size); ··· 493 493 * bringup, we need to get at them in real mode. This means they 494 494 * must also be within the RMO region. 495 495 */ 496 - limit = min(0x10000000UL, lmb.rmo_size); 496 + limit = min(0x10000000ULL, lmb.rmo_size); 497 497 498 498 for_each_possible_cpu(i) { 499 499 unsigned long sp;
+2 -2
arch/powerpc/mm/stab.c
··· 251 251 252 252 paca[cpu].stab_addr = newstab; 253 253 paca[cpu].stab_real = virt_to_abs(newstab); 254 - printk(KERN_INFO "Segment table for CPU %d at 0x%lx " 255 - "virtual, 0x%lx absolute\n", 254 + printk(KERN_INFO "Segment table for CPU %d at 0x%llx " 255 + "virtual, 0x%llx absolute\n", 256 256 cpu, paca[cpu].stab_addr, paca[cpu].stab_real); 257 257 } 258 258 }
+3 -3
arch/powerpc/oprofile/op_model_pa6t.c
··· 132 132 for (pmc = 0; pmc < cur_cpu_spec->num_pmcs; pmc++) { 133 133 /* counters are 40 bit. Move to cputable at some point? */ 134 134 reset_value[pmc] = (0x1UL << 39) - ctr[pmc].count; 135 - pr_debug("reset_value for pmc%u inited to 0x%lx\n", 135 + pr_debug("reset_value for pmc%u inited to 0x%llx\n", 136 136 pmc, reset_value[pmc]); 137 137 } 138 138 ··· 177 177 178 178 oprofile_running = 1; 179 179 180 - pr_debug("start on cpu %d, mmcr0 %lx\n", smp_processor_id(), mmcr0); 180 + pr_debug("start on cpu %d, mmcr0 %llx\n", smp_processor_id(), mmcr0); 181 181 182 182 return 0; 183 183 } ··· 193 193 194 194 oprofile_running = 0; 195 195 196 - pr_debug("stop on cpu %d, mmcr0 %lx\n", smp_processor_id(), mmcr0); 196 + pr_debug("stop on cpu %d, mmcr0 %llx\n", smp_processor_id(), mmcr0); 197 197 } 198 198 199 199 /* handle the perfmon overflow vector */
+1 -1
arch/powerpc/platforms/cell/beat_interrupt.c
··· 99 99 err = beat_downcount_of_interrupt(irq_plug); 100 100 if (err != 0) { 101 101 if ((err & 0xFFFFFFFF) != 0xFFFFFFF5) /* -11: wrong state */ 102 - panic("Failed to downcount IRQ! Error = %16lx", err); 102 + panic("Failed to downcount IRQ! Error = %16llx", err); 103 103 104 104 printk(KERN_ERR "IRQ over-downcounted, plug %d\n", irq_plug); 105 105 }
+2 -2
arch/powerpc/platforms/cell/celleb_scc_epci.c
··· 405 405 hose->cfg_addr = ioremap(r.start, (r.end - r.start + 1)); 406 406 if (!hose->cfg_addr) 407 407 goto error; 408 - pr_debug("EPCI: cfg_addr map 0x%016lx->0x%016lx + 0x%016lx\n", 408 + pr_debug("EPCI: cfg_addr map 0x%016llx->0x%016lx + 0x%016llx\n", 409 409 r.start, (unsigned long)hose->cfg_addr, (r.end - r.start + 1)); 410 410 411 411 if (of_address_to_resource(node, 2, &r)) ··· 413 413 hose->cfg_data = ioremap(r.start, (r.end - r.start + 1)); 414 414 if (!hose->cfg_data) 415 415 goto error; 416 - pr_debug("EPCI: cfg_data map 0x%016lx->0x%016lx + 0x%016lx\n", 416 + pr_debug("EPCI: cfg_data map 0x%016llx->0x%016lx + 0x%016llx\n", 417 417 r.start, (unsigned long)hose->cfg_data, (r.end - r.start + 1)); 418 418 419 419 hose->ops = &celleb_epci_ops;
+2 -2
arch/powerpc/platforms/cell/iommu.c
··· 855 855 */ 856 856 if (np && size < lmb_end_of_DRAM()) { 857 857 printk(KERN_WARNING "iommu: force-enabled, dma window" 858 - " (%ldMB) smaller than total memory (%ldMB)\n", 858 + " (%ldMB) smaller than total memory (%lldMB)\n", 859 859 size >> 20, lmb_end_of_DRAM() >> 20); 860 860 return -ENODEV; 861 861 } ··· 985 985 addr = cell_iommu_get_fixed_address(dev) + dma_iommu_fixed_base; 986 986 archdata->dma_data = (void *)addr; 987 987 988 - dev_dbg(dev, "iommu: fixed addr = %lx\n", addr); 988 + dev_dbg(dev, "iommu: fixed addr = %llx\n", addr); 989 989 } 990 990 991 991 static void insert_16M_pte(unsigned long addr, unsigned long *ptab,
+4 -4
arch/powerpc/platforms/cell/ras.c
··· 38 38 /* Todo: do some nicer parsing of bits and based on them go down 39 39 * to other sub-units FIRs and not only IIC 40 40 */ 41 - printk(KERN_ERR "Global Checkstop FIR : 0x%016lx\n", 41 + printk(KERN_ERR "Global Checkstop FIR : 0x%016llx\n", 42 42 in_be64(&pregs->checkstop_fir)); 43 - printk(KERN_ERR "Global Recoverable FIR : 0x%016lx\n", 43 + printk(KERN_ERR "Global Recoverable FIR : 0x%016llx\n", 44 44 in_be64(&pregs->checkstop_fir)); 45 - printk(KERN_ERR "Global MachineCheck FIR : 0x%016lx\n", 45 + printk(KERN_ERR "Global MachineCheck FIR : 0x%016llx\n", 46 46 in_be64(&pregs->spec_att_mchk_fir)); 47 47 48 48 if (iregs == NULL) 49 49 return; 50 - printk(KERN_ERR "IOC FIR : 0x%016lx\n", 50 + printk(KERN_ERR "IOC FIR : 0x%016llx\n", 51 51 in_be64(&iregs->ioc_fir)); 52 52 53 53 }
+2 -2
arch/powerpc/platforms/cell/spu_base.c
··· 151 151 { 152 152 struct spu_priv2 __iomem *priv2 = spu->priv2; 153 153 154 - pr_debug("%s: adding SLB[%d] 0x%016lx 0x%016lx\n", 154 + pr_debug("%s: adding SLB[%d] 0x%016llx 0x%016llx\n", 155 155 __func__, slbe, slb->vsid, slb->esid); 156 156 157 157 out_be64(&priv2->slb_index_W, slbe); ··· 221 221 { 222 222 int ret; 223 223 224 - pr_debug("%s, %lx, %lx\n", __func__, dsisr, ea); 224 + pr_debug("%s, %llx, %lx\n", __func__, dsisr, ea); 225 225 226 226 /* 227 227 * Handle kernel space hash faults immediately. User hash
+1 -1
arch/powerpc/platforms/cell/spu_callbacks.c
··· 54 54 long (*syscall)(u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6); 55 55 56 56 if (s->nr_ret >= ARRAY_SIZE(spu_syscall_table)) { 57 - pr_debug("%s: invalid syscall #%ld", __func__, s->nr_ret); 57 + pr_debug("%s: invalid syscall #%lld", __func__, s->nr_ret); 58 58 return -ENOSYS; 59 59 } 60 60
+2 -2
arch/powerpc/platforms/iseries/iommu.c
··· 66 66 67 67 rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, tce); 68 68 if (rc) 69 - panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%lx\n", 69 + panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%llx\n", 70 70 rc); 71 71 index++; 72 72 uaddr += TCE_PAGE_SIZE; ··· 81 81 while (npages--) { 82 82 rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0); 83 83 if (rc) 84 - panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%lx\n", 84 + panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%llx\n", 85 85 rc); 86 86 index++; 87 87 }
+17 -18
arch/powerpc/platforms/pseries/iommu.c
··· 127 127 } 128 128 129 129 if (rc && printk_ratelimit()) { 130 - printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc); 131 - printk("\tindex = 0x%lx\n", (u64)tbl->it_index); 132 - printk("\ttcenum = 0x%lx\n", (u64)tcenum); 133 - printk("\ttce val = 0x%lx\n", tce ); 130 + printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc); 131 + printk("\tindex = 0x%llx\n", (u64)tbl->it_index); 132 + printk("\ttcenum = 0x%llx\n", (u64)tcenum); 133 + printk("\ttce val = 0x%llx\n", tce ); 134 134 show_stack(current, (unsigned long *)__get_SP()); 135 135 } 136 136 ··· 210 210 } 211 211 212 212 if (rc && printk_ratelimit()) { 213 - printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc); 214 - printk("\tindex = 0x%lx\n", (u64)tbl->it_index); 215 - printk("\tnpages = 0x%lx\n", (u64)npages); 216 - printk("\ttce[0] val = 0x%lx\n", tcep[0]); 213 + printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc); 214 + printk("\tindex = 0x%llx\n", (u64)tbl->it_index); 215 + printk("\tnpages = 0x%llx\n", (u64)npages); 216 + printk("\ttce[0] val = 0x%llx\n", tcep[0]); 217 217 show_stack(current, (unsigned long *)__get_SP()); 218 218 } 219 219 return ret; ··· 227 227 rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0); 228 228 229 229 if (rc && printk_ratelimit()) { 230 - printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc); 231 - printk("\tindex = 0x%lx\n", (u64)tbl->it_index); 232 - printk("\ttcenum = 0x%lx\n", (u64)tcenum); 230 + printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc); 231 + printk("\tindex = 0x%llx\n", (u64)tbl->it_index); 232 + printk("\ttcenum = 0x%llx\n", (u64)tcenum); 233 233 show_stack(current, (unsigned long *)__get_SP()); 234 234 } 235 235 ··· 246 246 247 247 if (rc && printk_ratelimit()) { 248 248 printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n"); 249 - printk("\trc = %ld\n", rc); 250 - printk("\tindex = 0x%lx\n", (u64)tbl->it_index); 251 - printk("\tnpages = 0x%lx\n", (u64)npages); 249 + printk("\trc = %lld\n", rc); 250 + printk("\tindex = 0x%llx\n", (u64)tbl->it_index); 251 + printk("\tnpages = 0x%llx\n", (u64)npages); 252 252 show_stack(current, (unsigned long *)__get_SP()); 253 253 } 254 254 } ··· 261 261 rc = plpar_tce_get((u64)tbl->it_index, (u64)tcenum << 12, &tce_ret); 262 262 263 263 if (rc && printk_ratelimit()) { 264 - printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%ld\n", 265 - rc); 266 - printk("\tindex = 0x%lx\n", (u64)tbl->it_index); 267 - printk("\ttcenum = 0x%lx\n", (u64)tcenum); 264 + printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc); 265 + printk("\tindex = 0x%llx\n", (u64)tbl->it_index); 266 + printk("\ttcenum = 0x%llx\n", (u64)tcenum); 268 267 show_stack(current, (unsigned long *)__get_SP()); 269 268 } 270 269
+1 -1
arch/powerpc/sysdev/mpic.c
··· 435 435 addr = addr | ((u64)readl(base + HT_MSI_ADDR_HI) << 32); 436 436 } 437 437 438 - printk(KERN_DEBUG "mpic: - HT:%02x.%x %s MSI mapping found @ 0x%lx\n", 438 + printk(KERN_DEBUG "mpic: - HT:%02x.%x %s MSI mapping found @ 0x%llx\n", 439 439 PCI_SLOT(devfn), PCI_FUNC(devfn), 440 440 flags & HT_MSI_FLAGS_ENABLE ? "enabled" : "disabled", addr); 441 441
+3 -3
drivers/net/pasemi_mac.c
··· 712 712 rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); 713 713 ccmdsta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno)); 714 714 715 - printk(KERN_ERR "pasemi_mac: rx error. macrx %016lx, rx status %lx\n", 715 + printk(KERN_ERR "pasemi_mac: rx error. macrx %016llx, rx status %llx\n", 716 716 macrx, *chan->status); 717 717 718 718 printk(KERN_ERR "pasemi_mac: rcmdsta %08x ccmdsta %08x\n", ··· 730 730 731 731 cmdsta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno)); 732 732 733 - printk(KERN_ERR "pasemi_mac: tx error. mactx 0x%016lx, "\ 734 - "tx status 0x%016lx\n", mactx, *chan->status); 733 + printk(KERN_ERR "pasemi_mac: tx error. mactx 0x%016llx, "\ 734 + "tx status 0x%016llx\n", mactx, *chan->status); 735 735 736 736 printk(KERN_ERR "pasemi_mac: tcmdsta 0x%08x\n", cmdsta); 737 737 }
+1 -1
drivers/pcmcia/electra_cf.c
··· 297 297 goto fail3; 298 298 } 299 299 300 - dev_info(device, "at mem 0x%lx io 0x%lx irq %d\n", 300 + dev_info(device, "at mem 0x%lx io 0x%llx irq %d\n", 301 301 cf->mem_phys, io.start, cf->irq); 302 302 303 303 cf->active = 1;
+6 -6
drivers/scsi/ibmvscsi/ibmvscsi.c
··· 1061 1061 } 1062 1062 1063 1063 sdev_printk(KERN_INFO, cmd->device, 1064 - "aborting command. lun 0x%lx, tag 0x%lx\n", 1064 + "aborting command. lun 0x%llx, tag 0x%llx\n", 1065 1065 (((u64) lun) << 48), (u64) found_evt); 1066 1066 1067 1067 wait_for_completion(&evt->comp); ··· 1082 1082 if (rsp_rc) { 1083 1083 if (printk_ratelimit()) 1084 1084 sdev_printk(KERN_WARNING, cmd->device, 1085 - "abort code %d for task tag 0x%lx\n", 1085 + "abort code %d for task tag 0x%llx\n", 1086 1086 rsp_rc, tsk_mgmt->task_tag); 1087 1087 return FAILED; 1088 1088 } ··· 1102 1102 1103 1103 if (found_evt == NULL) { 1104 1104 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1105 - sdev_printk(KERN_INFO, cmd->device, "aborted task tag 0x%lx completed\n", 1105 + sdev_printk(KERN_INFO, cmd->device, "aborted task tag 0x%llx completed\n", 1106 1106 tsk_mgmt->task_tag); 1107 1107 return SUCCESS; 1108 1108 } 1109 1109 1110 - sdev_printk(KERN_INFO, cmd->device, "successfully aborted task tag 0x%lx\n", 1110 + sdev_printk(KERN_INFO, cmd->device, "successfully aborted task tag 0x%llx\n", 1111 1111 tsk_mgmt->task_tag); 1112 1112 1113 1113 cmd->result = (DID_ABORT << 16); ··· 1182 1182 return FAILED; 1183 1183 } 1184 1184 1185 - sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%lx\n", 1185 + sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%llx\n", 1186 1186 (((u64) lun) << 48)); 1187 1187 1188 1188 wait_for_completion(&evt->comp); ··· 1203 1203 if (rsp_rc) { 1204 1204 if (printk_ratelimit()) 1205 1205 sdev_printk(KERN_WARNING, cmd->device, 1206 - "reset code %d for task tag 0x%lx\n", 1206 + "reset code %d for task tag 0x%llx\n", 1207 1207 rsp_rc, tsk_mgmt->task_tag); 1208 1208 return FAILED; 1209 1209 }