Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6:
[SPARC64]: Fix inline directive in pci_iommu.c
[SPARC64]: Fix arg passing to compat_sys_ipc().
[SPARC]: Fix section mismatch warnings in pci.c and pcic.c
[SUNRPC]: Make sure on-stack cmsg buffer is properly aligned.
[SPARC]: avoid CHILD_MAX and OPEN_MAX constants
[SPARC64]: Fix SBUS IOMMU allocation code.

+274 -361
+2 -2
arch/sparc/kernel/pcic.c
··· 601 601 /* 602 602 * Normally called from {do_}pci_scan_bus... 603 603 */ 604 - void __init pcibios_fixup_bus(struct pci_bus *bus) 604 + void __devinit pcibios_fixup_bus(struct pci_bus *bus) 605 605 { 606 606 struct pci_dev *dev; 607 607 int i, has_io, has_mem; ··· 842 842 /* 843 843 * Other archs parse arguments here. 844 844 */ 845 - char * __init pcibios_setup(char *str) 845 + char * __devinit pcibios_setup(char *str) 846 846 { 847 847 return str; 848 848 }
+2 -2
arch/sparc/kernel/sys_sunos.c
··· 910 910 ret = ARG_MAX; 911 911 break; 912 912 case _SC_CHILD_MAX: 913 - ret = -1; /* no limit */ 913 + ret = current->signal->rlim[RLIMIT_NPROC].rlim_cur; 914 914 break; 915 915 case _SC_CLK_TCK: 916 916 ret = HZ; ··· 919 919 ret = NGROUPS_MAX; 920 920 break; 921 921 case _SC_OPEN_MAX: 922 - ret = OPEN_MAX; 922 + ret = current->signal->rlim[RLIMIT_NOFILE].rlim_cur; 923 923 break; 924 924 case _SC_JOB_CONTROL: 925 925 ret = 1; /* yes, we do support job control */
+2 -2
arch/sparc64/kernel/pci.c
··· 327 327 328 328 subsys_initcall(pcibios_init); 329 329 330 - void pcibios_fixup_bus(struct pci_bus *pbus) 330 + void __devinit pcibios_fixup_bus(struct pci_bus *pbus) 331 331 { 332 332 struct pci_pbm_info *pbm = pbus->sysdata; 333 333 ··· 405 405 } 406 406 EXPORT_SYMBOL(pcibios_bus_to_resource); 407 407 408 - char * __init pcibios_setup(char *str) 408 + char * __devinit pcibios_setup(char *str) 409 409 { 410 410 return str; 411 411 }
+1 -1
arch/sparc64/kernel/pci_iommu.c
··· 64 64 #define IOPTE_IS_DUMMY(iommu, iopte) \ 65 65 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa) 66 66 67 - static void inline iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte) 67 + static inline void iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte) 68 68 { 69 69 unsigned long val = iopte_val(*iopte); 70 70
+248 -344
arch/sparc64/kernel/sbus.c
··· 24 24 25 25 #include "iommu_common.h" 26 26 27 - /* These should be allocated on an SMP_CACHE_BYTES 28 - * aligned boundary for optimal performance. 29 - * 30 - * On SYSIO, using an 8K page size we have 1GB of SBUS 31 - * DMA space mapped. We divide this space into equally 32 - * sized clusters. We allocate a DMA mapping from the 33 - * cluster that matches the order of the allocation, or 34 - * if the order is greater than the number of clusters, 35 - * we try to allocate from the last cluster. 36 - */ 37 - 38 - #define NCLUSTERS 8UL 39 - #define ONE_GIG (1UL * 1024UL * 1024UL * 1024UL) 40 - #define CLUSTER_SIZE (ONE_GIG / NCLUSTERS) 41 - #define CLUSTER_MASK (CLUSTER_SIZE - 1) 42 - #define CLUSTER_NPAGES (CLUSTER_SIZE >> IO_PAGE_SHIFT) 43 27 #define MAP_BASE ((u32)0xc0000000) 44 28 29 + struct sbus_iommu_arena { 30 + unsigned long *map; 31 + unsigned int hint; 32 + unsigned int limit; 33 + }; 34 + 45 35 struct sbus_iommu { 46 - /*0x00*/spinlock_t lock; 36 + spinlock_t lock; 47 37 48 - /*0x08*/iopte_t *page_table; 49 - /*0x10*/unsigned long strbuf_regs; 50 - /*0x18*/unsigned long iommu_regs; 51 - /*0x20*/unsigned long sbus_control_reg; 38 + struct sbus_iommu_arena arena; 52 39 53 - /*0x28*/volatile unsigned long strbuf_flushflag; 40 + iopte_t *page_table; 41 + unsigned long strbuf_regs; 42 + unsigned long iommu_regs; 43 + unsigned long sbus_control_reg; 54 44 55 - /* If NCLUSTERS is ever decresed to 4 or lower, 56 - * you must increase the size of the type of 57 - * these counters. You have been duly warned. -DaveM 58 - */ 59 - /*0x30*/struct { 60 - u16 next; 61 - u16 flush; 62 - } alloc_info[NCLUSTERS]; 63 - 64 - /* The lowest used consistent mapping entry. Since 65 - * we allocate consistent maps out of cluster 0 this 66 - * is relative to the beginning of closter 0. 67 - */ 68 - /*0x50*/u32 lowest_consistent_map; 45 + volatile unsigned long strbuf_flushflag; 69 46 }; 70 47 71 48 /* Offsets from iommu_regs */ ··· 67 90 upa_writeq(0, tag); 68 91 tag += 8UL; 69 92 } 70 - upa_readq(iommu->sbus_control_reg); 71 - 72 - for (entry = 0; entry < NCLUSTERS; entry++) { 73 - iommu->alloc_info[entry].flush = 74 - iommu->alloc_info[entry].next; 75 - } 76 - } 77 - 78 - static void iommu_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages) 79 - { 80 - while (npages--) 81 - upa_writeq(base + (npages << IO_PAGE_SHIFT), 82 - iommu->iommu_regs + IOMMU_FLUSH); 83 93 upa_readq(iommu->sbus_control_reg); 84 94 } 85 95 ··· 120 156 base, npages); 121 157 } 122 158 123 - static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages) 159 + /* Based largely upon the ppc64 iommu allocator. */ 160 + static long sbus_arena_alloc(struct sbus_iommu *iommu, unsigned long npages) 124 161 { 125 - iopte_t *iopte, *limit, *first, *cluster; 126 - unsigned long cnum, ent, nent, flush_point, found; 162 + struct sbus_iommu_arena *arena = &iommu->arena; 163 + unsigned long n, i, start, end, limit; 164 + int pass; 127 165 128 - cnum = 0; 129 - nent = 1; 130 - while ((1UL << cnum) < npages) 131 - cnum++; 132 - if(cnum >= NCLUSTERS) { 133 - nent = 1UL << (cnum - NCLUSTERS); 134 - cnum = NCLUSTERS - 1; 135 - } 136 - iopte = iommu->page_table + (cnum * CLUSTER_NPAGES); 166 + limit = arena->limit; 167 + start = arena->hint; 168 + pass = 0; 137 169 138 - if (cnum == 0) 139 - limit = (iommu->page_table + 140 - iommu->lowest_consistent_map); 141 - else 142 - limit = (iopte + CLUSTER_NPAGES); 143 - 144 - iopte += ((ent = iommu->alloc_info[cnum].next) << cnum); 145 - flush_point = iommu->alloc_info[cnum].flush; 146 - 147 - first = iopte; 148 - cluster = NULL; 149 - found = 0; 150 - for (;;) { 151 - if (iopte_val(*iopte) == 0UL) { 152 - found++; 153 - if (!cluster) 154 - cluster = iopte; 155 - } else { 156 - /* Used cluster in the way */ 157 - cluster = NULL; 158 - found = 0; 159 - } 160 - 161 - if (found == nent) 162 - break; 163 - 164 - iopte += (1 << cnum); 165 - ent++; 166 - if (iopte >= limit) { 167 - iopte = (iommu->page_table + (cnum * CLUSTER_NPAGES)); 168 - ent = 0; 169 - 170 - /* Multiple cluster allocations must not wrap */ 171 - cluster = NULL; 172 - found = 0; 173 - } 174 - if (ent == flush_point) 170 + again: 171 + n = find_next_zero_bit(arena->map, limit, start); 172 + end = n + npages; 173 + if (unlikely(end >= limit)) { 174 + if (likely(pass < 1)) { 175 + limit = start; 176 + start = 0; 175 177 __iommu_flushall(iommu); 176 - if (iopte == first) 177 - goto bad; 178 - } 179 - 180 - /* ent/iopte points to the last cluster entry we're going to use, 181 - * so save our place for the next allocation. 182 - */ 183 - if ((iopte + (1 << cnum)) >= limit) 184 - ent = 0; 185 - else 186 - ent = ent + 1; 187 - iommu->alloc_info[cnum].next = ent; 188 - if (ent == flush_point) 189 - __iommu_flushall(iommu); 190 - 191 - /* I've got your streaming cluster right here buddy boy... */ 192 - return cluster; 193 - 194 - bad: 195 - printk(KERN_EMERG "sbus: alloc_streaming_cluster of npages(%ld) failed!\n", 196 - npages); 197 - return NULL; 198 - } 199 - 200 - static void free_streaming_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages) 201 - { 202 - unsigned long cnum, ent, nent; 203 - iopte_t *iopte; 204 - 205 - cnum = 0; 206 - nent = 1; 207 - while ((1UL << cnum) < npages) 208 - cnum++; 209 - if(cnum >= NCLUSTERS) { 210 - nent = 1UL << (cnum - NCLUSTERS); 211 - cnum = NCLUSTERS - 1; 212 - } 213 - ent = (base & CLUSTER_MASK) >> (IO_PAGE_SHIFT + cnum); 214 - iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT); 215 - do { 216 - iopte_val(*iopte) = 0UL; 217 - iopte += 1 << cnum; 218 - } while(--nent); 219 - 220 - /* If the global flush might not have caught this entry, 221 - * adjust the flush point such that we will flush before 222 - * ever trying to reuse it. 223 - */ 224 - #define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y))) 225 - if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush)) 226 - iommu->alloc_info[cnum].flush = ent; 227 - #undef between 228 - } 229 - 230 - /* We allocate consistent mappings from the end of cluster zero. */ 231 - static iopte_t *alloc_consistent_cluster(struct sbus_iommu *iommu, unsigned long npages) 232 - { 233 - iopte_t *iopte; 234 - 235 - iopte = iommu->page_table + (1 * CLUSTER_NPAGES); 236 - while (iopte > iommu->page_table) { 237 - iopte--; 238 - if (!(iopte_val(*iopte) & IOPTE_VALID)) { 239 - unsigned long tmp = npages; 240 - 241 - while (--tmp) { 242 - iopte--; 243 - if (iopte_val(*iopte) & IOPTE_VALID) 244 - break; 245 - } 246 - if (tmp == 0) { 247 - u32 entry = (iopte - iommu->page_table); 248 - 249 - if (entry < iommu->lowest_consistent_map) 250 - iommu->lowest_consistent_map = entry; 251 - return iopte; 252 - } 178 + pass++; 179 + goto again; 180 + } else { 181 + /* Scanned the whole thing, give up. */ 182 + return -1; 253 183 } 254 184 } 255 - return NULL; 256 - } 257 185 258 - static void free_consistent_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages) 259 - { 260 - iopte_t *iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT); 261 - 262 - if ((iopte - iommu->page_table) == iommu->lowest_consistent_map) { 263 - iopte_t *walk = iopte + npages; 264 - iopte_t *limit; 265 - 266 - limit = iommu->page_table + CLUSTER_NPAGES; 267 - while (walk < limit) { 268 - if (iopte_val(*walk) != 0UL) 269 - break; 270 - walk++; 186 + for (i = n; i < end; i++) { 187 + if (test_bit(i, arena->map)) { 188 + start = i + 1; 189 + goto again; 271 190 } 272 - iommu->lowest_consistent_map = 273 - (walk - iommu->page_table); 274 191 } 275 192 276 - while (npages--) 277 - *iopte++ = __iopte(0UL); 193 + for (i = n; i < end; i++) 194 + __set_bit(i, arena->map); 195 + 196 + arena->hint = end; 197 + 198 + return n; 199 + } 200 + 201 + static void sbus_arena_free(struct sbus_iommu_arena *arena, unsigned long base, unsigned long npages) 202 + { 203 + unsigned long i; 204 + 205 + for (i = base; i < (base + npages); i++) 206 + __clear_bit(i, arena->map); 207 + } 208 + 209 + static void sbus_iommu_table_init(struct sbus_iommu *iommu, unsigned int tsbsize) 210 + { 211 + unsigned long tsbbase, order, sz, num_tsb_entries; 212 + 213 + num_tsb_entries = tsbsize / sizeof(iopte_t); 214 + 215 + /* Setup initial software IOMMU state. */ 216 + spin_lock_init(&iommu->lock); 217 + 218 + /* Allocate and initialize the free area map. */ 219 + sz = num_tsb_entries / 8; 220 + sz = (sz + 7UL) & ~7UL; 221 + iommu->arena.map = kzalloc(sz, GFP_KERNEL); 222 + if (!iommu->arena.map) { 223 + prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n"); 224 + prom_halt(); 225 + } 226 + iommu->arena.limit = num_tsb_entries; 227 + 228 + /* Now allocate and setup the IOMMU page table itself. */ 229 + order = get_order(tsbsize); 230 + tsbbase = __get_free_pages(GFP_KERNEL, order); 231 + if (!tsbbase) { 232 + prom_printf("IOMMU: Error, gfp(tsb) failed.\n"); 233 + prom_halt(); 234 + } 235 + iommu->page_table = (iopte_t *)tsbbase; 236 + memset(iommu->page_table, 0, tsbsize); 237 + } 238 + 239 + static inline iopte_t *alloc_npages(struct sbus_iommu *iommu, unsigned long npages) 240 + { 241 + long entry; 242 + 243 + entry = sbus_arena_alloc(iommu, npages); 244 + if (unlikely(entry < 0)) 245 + return NULL; 246 + 247 + return iommu->page_table + entry; 248 + } 249 + 250 + static inline void free_npages(struct sbus_iommu *iommu, dma_addr_t base, unsigned long npages) 251 + { 252 + sbus_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages); 278 253 } 279 254 280 255 void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma_addr) 281 256 { 282 - unsigned long order, first_page, flags; 283 257 struct sbus_iommu *iommu; 284 258 iopte_t *iopte; 259 + unsigned long flags, order, first_page; 285 260 void *ret; 286 261 int npages; 287 - 288 - if (size <= 0 || sdev == NULL || dvma_addr == NULL) 289 - return NULL; 290 262 291 263 size = IO_PAGE_ALIGN(size); 292 264 order = get_order(size); 293 265 if (order >= 10) 294 266 return NULL; 267 + 295 268 first_page = __get_free_pages(GFP_KERNEL|__GFP_COMP, order); 296 269 if (first_page == 0UL) 297 270 return NULL; ··· 237 336 iommu = sdev->bus->iommu; 238 337 239 338 spin_lock_irqsave(&iommu->lock, flags); 240 - iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT); 241 - if (iopte == NULL) { 242 - spin_unlock_irqrestore(&iommu->lock, flags); 339 + iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT); 340 + spin_unlock_irqrestore(&iommu->lock, flags); 341 + 342 + if (unlikely(iopte == NULL)) { 243 343 free_pages(first_page, order); 244 344 return NULL; 245 345 } 246 346 247 - /* Ok, we're committed at this point. */ 248 - *dvma_addr = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT); 347 + *dvma_addr = (MAP_BASE + 348 + ((iopte - iommu->page_table) << IO_PAGE_SHIFT)); 249 349 ret = (void *) first_page; 250 350 npages = size >> IO_PAGE_SHIFT; 351 + first_page = __pa(first_page); 251 352 while (npages--) { 252 - *iopte++ = __iopte(IOPTE_VALID | IOPTE_CACHE | IOPTE_WRITE | 253 - (__pa(first_page) & IOPTE_PAGE)); 353 + iopte_val(*iopte) = (IOPTE_VALID | IOPTE_CACHE | 354 + IOPTE_WRITE | 355 + (first_page & IOPTE_PAGE)); 356 + iopte++; 254 357 first_page += IO_PAGE_SIZE; 255 358 } 256 - iommu_flush(iommu, *dvma_addr, size >> IO_PAGE_SHIFT); 257 - spin_unlock_irqrestore(&iommu->lock, flags); 258 359 259 360 return ret; 260 361 } 261 362 262 363 void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_addr_t dvma) 263 364 { 264 - unsigned long order, npages; 265 365 struct sbus_iommu *iommu; 266 - 267 - if (size <= 0 || sdev == NULL || cpu == NULL) 268 - return; 366 + iopte_t *iopte; 367 + unsigned long flags, order, npages; 269 368 270 369 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; 271 370 iommu = sdev->bus->iommu; 371 + iopte = iommu->page_table + 372 + ((dvma - MAP_BASE) >> IO_PAGE_SHIFT); 272 373 273 - spin_lock_irq(&iommu->lock); 274 - free_consistent_cluster(iommu, dvma, npages); 275 - iommu_flush(iommu, dvma, npages); 276 - spin_unlock_irq(&iommu->lock); 374 + spin_lock_irqsave(&iommu->lock, flags); 375 + 376 + free_npages(iommu, dvma - MAP_BASE, npages); 377 + 378 + spin_unlock_irqrestore(&iommu->lock, flags); 277 379 278 380 order = get_order(size); 279 381 if (order < 10) 280 382 free_pages((unsigned long)cpu, order); 281 383 } 282 384 283 - dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t size, int dir) 385 + dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t sz, int direction) 284 386 { 285 - struct sbus_iommu *iommu = sdev->bus->iommu; 286 - unsigned long npages, pbase, flags; 287 - iopte_t *iopte; 288 - u32 dma_base, offset; 289 - unsigned long iopte_bits; 387 + struct sbus_iommu *iommu; 388 + iopte_t *base; 389 + unsigned long flags, npages, oaddr; 390 + unsigned long i, base_paddr; 391 + u32 bus_addr, ret; 392 + unsigned long iopte_protection; 290 393 291 - if (dir == SBUS_DMA_NONE) 394 + iommu = sdev->bus->iommu; 395 + 396 + if (unlikely(direction == SBUS_DMA_NONE)) 292 397 BUG(); 293 398 294 - pbase = (unsigned long) ptr; 295 - offset = (u32) (pbase & ~IO_PAGE_MASK); 296 - size = (IO_PAGE_ALIGN(pbase + size) - (pbase & IO_PAGE_MASK)); 297 - pbase = (unsigned long) __pa(pbase & IO_PAGE_MASK); 399 + oaddr = (unsigned long)ptr; 400 + npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); 401 + npages >>= IO_PAGE_SHIFT; 298 402 299 403 spin_lock_irqsave(&iommu->lock, flags); 300 - npages = size >> IO_PAGE_SHIFT; 301 - iopte = alloc_streaming_cluster(iommu, npages); 302 - if (iopte == NULL) 303 - goto bad; 304 - dma_base = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT); 305 - npages = size >> IO_PAGE_SHIFT; 306 - iopte_bits = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE; 307 - if (dir != SBUS_DMA_TODEVICE) 308 - iopte_bits |= IOPTE_WRITE; 309 - while (npages--) { 310 - *iopte++ = __iopte(iopte_bits | (pbase & IOPTE_PAGE)); 311 - pbase += IO_PAGE_SIZE; 312 - } 313 - npages = size >> IO_PAGE_SHIFT; 404 + base = alloc_npages(iommu, npages); 314 405 spin_unlock_irqrestore(&iommu->lock, flags); 315 406 316 - return (dma_base | offset); 407 + if (unlikely(!base)) 408 + BUG(); 317 409 318 - bad: 319 - spin_unlock_irqrestore(&iommu->lock, flags); 320 - BUG(); 321 - return 0; 410 + bus_addr = (MAP_BASE + 411 + ((base - iommu->page_table) << IO_PAGE_SHIFT)); 412 + ret = bus_addr | (oaddr & ~IO_PAGE_MASK); 413 + base_paddr = __pa(oaddr & IO_PAGE_MASK); 414 + 415 + iopte_protection = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE; 416 + if (direction != SBUS_DMA_TODEVICE) 417 + iopte_protection |= IOPTE_WRITE; 418 + 419 + for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE) 420 + iopte_val(*base) = iopte_protection | base_paddr; 421 + 422 + return ret; 322 423 } 323 424 324 - void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t dma_addr, size_t size, int direction) 425 + void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction) 325 426 { 326 427 struct sbus_iommu *iommu = sdev->bus->iommu; 327 - u32 dma_base = dma_addr & IO_PAGE_MASK; 328 - unsigned long flags; 428 + iopte_t *base; 429 + unsigned long flags, npages, i; 329 430 330 - size = (IO_PAGE_ALIGN(dma_addr + size) - dma_base); 431 + if (unlikely(direction == SBUS_DMA_NONE)) 432 + BUG(); 433 + 434 + npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); 435 + npages >>= IO_PAGE_SHIFT; 436 + base = iommu->page_table + 437 + ((bus_addr - MAP_BASE) >> IO_PAGE_SHIFT); 438 + 439 + bus_addr &= IO_PAGE_MASK; 331 440 332 441 spin_lock_irqsave(&iommu->lock, flags); 333 - free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT); 334 - sbus_strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT, direction); 442 + sbus_strbuf_flush(iommu, bus_addr, npages, direction); 443 + for (i = 0; i < npages; i++) 444 + iopte_val(base[i]) = 0UL; 445 + free_npages(iommu, bus_addr - MAP_BASE, npages); 335 446 spin_unlock_irqrestore(&iommu->lock, flags); 336 447 } 337 448 338 449 #define SG_ENT_PHYS_ADDRESS(SG) \ 339 450 (__pa(page_address((SG)->page)) + (SG)->offset) 340 451 341 - static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, int nused, int nelems, unsigned long iopte_bits) 452 + static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, 453 + int nused, int nelems, unsigned long iopte_protection) 342 454 { 343 455 struct scatterlist *dma_sg = sg; 344 456 struct scatterlist *sg_end = sg + nelems; ··· 376 462 for (;;) { 377 463 unsigned long tmp; 378 464 379 - tmp = (unsigned long) SG_ENT_PHYS_ADDRESS(sg); 465 + tmp = SG_ENT_PHYS_ADDRESS(sg); 380 466 len = sg->length; 381 467 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) { 382 468 pteval = tmp & IO_PAGE_MASK; ··· 392 478 sg++; 393 479 } 394 480 395 - pteval = ((pteval & IOPTE_PAGE) | iopte_bits); 481 + pteval = iopte_protection | (pteval & IOPTE_PAGE); 396 482 while (len > 0) { 397 483 *iopte++ = __iopte(pteval); 398 484 pteval += IO_PAGE_SIZE; ··· 423 509 } 424 510 } 425 511 426 - int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int dir) 512 + int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction) 427 513 { 428 - struct sbus_iommu *iommu = sdev->bus->iommu; 429 - unsigned long flags, npages; 430 - iopte_t *iopte; 514 + struct sbus_iommu *iommu; 515 + unsigned long flags, npages, iopte_protection; 516 + iopte_t *base; 431 517 u32 dma_base; 432 518 struct scatterlist *sgtmp; 433 519 int used; 434 - unsigned long iopte_bits; 435 - 436 - if (dir == SBUS_DMA_NONE) 437 - BUG(); 438 520 439 521 /* Fast path single entry scatterlists. */ 440 - if (nents == 1) { 441 - sg->dma_address = 522 + if (nelems == 1) { 523 + sglist->dma_address = 442 524 sbus_map_single(sdev, 443 - (page_address(sg->page) + sg->offset), 444 - sg->length, dir); 445 - sg->dma_length = sg->length; 525 + (page_address(sglist->page) + sglist->offset), 526 + sglist->length, direction); 527 + sglist->dma_length = sglist->length; 446 528 return 1; 447 529 } 448 530 449 - npages = prepare_sg(sg, nents); 531 + iommu = sdev->bus->iommu; 532 + 533 + if (unlikely(direction == SBUS_DMA_NONE)) 534 + BUG(); 535 + 536 + npages = prepare_sg(sglist, nelems); 450 537 451 538 spin_lock_irqsave(&iommu->lock, flags); 452 - iopte = alloc_streaming_cluster(iommu, npages); 453 - if (iopte == NULL) 454 - goto bad; 455 - dma_base = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT); 539 + base = alloc_npages(iommu, npages); 540 + spin_unlock_irqrestore(&iommu->lock, flags); 541 + 542 + if (unlikely(base == NULL)) 543 + BUG(); 544 + 545 + dma_base = MAP_BASE + 546 + ((base - iommu->page_table) << IO_PAGE_SHIFT); 456 547 457 548 /* Normalize DVMA addresses. */ 458 - sgtmp = sg; 459 - used = nents; 549 + used = nelems; 460 550 551 + sgtmp = sglist; 461 552 while (used && sgtmp->dma_length) { 462 553 sgtmp->dma_address += dma_base; 463 554 sgtmp++; 464 555 used--; 465 556 } 466 - used = nents - used; 557 + used = nelems - used; 467 558 468 - iopte_bits = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE; 469 - if (dir != SBUS_DMA_TODEVICE) 470 - iopte_bits |= IOPTE_WRITE; 559 + iopte_protection = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE; 560 + if (direction != SBUS_DMA_TODEVICE) 561 + iopte_protection |= IOPTE_WRITE; 471 562 472 - fill_sg(iopte, sg, used, nents, iopte_bits); 563 + fill_sg(base, sglist, used, nelems, iopte_protection); 564 + 473 565 #ifdef VERIFY_SG 474 - verify_sglist(sg, nents, iopte, npages); 566 + verify_sglist(sglist, nelems, base, npages); 475 567 #endif 476 - spin_unlock_irqrestore(&iommu->lock, flags); 477 568 478 569 return used; 479 - 480 - bad: 481 - spin_unlock_irqrestore(&iommu->lock, flags); 482 - BUG(); 483 - return 0; 484 570 } 485 571 486 - void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction) 572 + void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction) 487 573 { 488 - unsigned long size, flags; 489 574 struct sbus_iommu *iommu; 490 - u32 dvma_base; 491 - int i; 575 + iopte_t *base; 576 + unsigned long flags, i, npages; 577 + u32 bus_addr; 492 578 493 - /* Fast path single entry scatterlists. */ 494 - if (nents == 1) { 495 - sbus_unmap_single(sdev, sg->dma_address, sg->dma_length, direction); 496 - return; 497 - } 498 - 499 - dvma_base = sg[0].dma_address & IO_PAGE_MASK; 500 - for (i = 0; i < nents; i++) { 501 - if (sg[i].dma_length == 0) 502 - break; 503 - } 504 - i--; 505 - size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - dvma_base; 579 + if (unlikely(direction == SBUS_DMA_NONE)) 580 + BUG(); 506 581 507 582 iommu = sdev->bus->iommu; 583 + 584 + bus_addr = sglist->dma_address & IO_PAGE_MASK; 585 + 586 + for (i = 1; i < nelems; i++) 587 + if (sglist[i].dma_length == 0) 588 + break; 589 + i--; 590 + npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - 591 + bus_addr) >> IO_PAGE_SHIFT; 592 + 593 + base = iommu->page_table + 594 + ((bus_addr - MAP_BASE) >> IO_PAGE_SHIFT); 595 + 508 596 spin_lock_irqsave(&iommu->lock, flags); 509 - free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT); 510 - sbus_strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT, direction); 597 + sbus_strbuf_flush(iommu, bus_addr, npages, direction); 598 + for (i = 0; i < npages; i++) 599 + iopte_val(base[i]) = 0UL; 600 + free_npages(iommu, bus_addr - MAP_BASE, npages); 511 601 spin_unlock_irqrestore(&iommu->lock, flags); 512 602 } 513 603 514 - void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t base, size_t size, int direction) 604 + void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction) 515 605 { 516 - struct sbus_iommu *iommu = sdev->bus->iommu; 517 - unsigned long flags; 606 + struct sbus_iommu *iommu; 607 + unsigned long flags, npages; 518 608 519 - size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK)); 609 + iommu = sdev->bus->iommu; 610 + 611 + npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); 612 + npages >>= IO_PAGE_SHIFT; 613 + bus_addr &= IO_PAGE_MASK; 520 614 521 615 spin_lock_irqsave(&iommu->lock, flags); 522 - sbus_strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT, direction); 616 + sbus_strbuf_flush(iommu, bus_addr, npages, direction); 523 617 spin_unlock_irqrestore(&iommu->lock, flags); 524 618 } 525 619 ··· 535 613 { 536 614 } 537 615 538 - void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction) 616 + void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction) 539 617 { 540 - struct sbus_iommu *iommu = sdev->bus->iommu; 541 - unsigned long flags, size; 542 - u32 base; 543 - int i; 618 + struct sbus_iommu *iommu; 619 + unsigned long flags, npages, i; 620 + u32 bus_addr; 544 621 545 - base = sg[0].dma_address & IO_PAGE_MASK; 546 - for (i = 0; i < nents; i++) { 547 - if (sg[i].dma_length == 0) 622 + iommu = sdev->bus->iommu; 623 + 624 + bus_addr = sglist[0].dma_address & IO_PAGE_MASK; 625 + for (i = 0; i < nelems; i++) { 626 + if (!sglist[i].dma_length) 548 627 break; 549 628 } 550 629 i--; 551 - size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base; 630 + npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) 631 + - bus_addr) >> IO_PAGE_SHIFT; 552 632 553 633 spin_lock_irqsave(&iommu->lock, flags); 554 - sbus_strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT, direction); 634 + sbus_strbuf_flush(iommu, bus_addr, npages, direction); 555 635 spin_unlock_irqrestore(&iommu->lock, flags); 556 636 } 557 637 ··· 1028 1104 struct linux_prom64_registers *pr; 1029 1105 struct device_node *dp; 1030 1106 struct sbus_iommu *iommu; 1031 - unsigned long regs, tsb_base; 1107 + unsigned long regs; 1032 1108 u64 control; 1033 1109 int i; 1034 1110 ··· 1056 1132 1057 1133 memset(iommu, 0, sizeof(*iommu)); 1058 1134 1059 - /* We start with no consistent mappings. */ 1060 - iommu->lowest_consistent_map = CLUSTER_NPAGES; 1061 - 1062 - for (i = 0; i < NCLUSTERS; i++) { 1063 - iommu->alloc_info[i].flush = 0; 1064 - iommu->alloc_info[i].next = 0; 1065 - } 1066 - 1067 1135 /* Setup spinlock. */ 1068 1136 spin_lock_init(&iommu->lock); 1069 1137 ··· 1075 1159 sbus->portid, regs); 1076 1160 1077 1161 /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */ 1162 + sbus_iommu_table_init(iommu, IO_TSB_SIZE); 1163 + 1078 1164 control = upa_readq(iommu->iommu_regs + IOMMU_CONTROL); 1079 1165 control = ((7UL << 16UL) | 1080 1166 (0UL << 2UL) | 1081 1167 (1UL << 1UL) | 1082 1168 (1UL << 0UL)); 1083 - 1084 - /* Using the above configuration we need 1MB iommu page 1085 - * table (128K ioptes * 8 bytes per iopte). This is 1086 - * page order 7 on UltraSparc. 1087 - */ 1088 - tsb_base = __get_free_pages(GFP_ATOMIC, get_order(IO_TSB_SIZE)); 1089 - if (tsb_base == 0UL) { 1090 - prom_printf("sbus_iommu_init: Fatal error, cannot alloc TSB table.\n"); 1091 - prom_halt(); 1092 - } 1093 - 1094 - iommu->page_table = (iopte_t *) tsb_base; 1095 - memset(iommu->page_table, 0, IO_TSB_SIZE); 1096 - 1097 1169 upa_writeq(control, iommu->iommu_regs + IOMMU_CONTROL); 1098 1170 1099 1171 /* Clean out any cruft in the IOMMU using ··· 1099 1195 upa_readq(iommu->sbus_control_reg); 1100 1196 1101 1197 /* Give the TSB to SYSIO. */ 1102 - upa_writeq(__pa(tsb_base), iommu->iommu_regs + IOMMU_TSBBASE); 1198 + upa_writeq(__pa(iommu->page_table), iommu->iommu_regs + IOMMU_TSBBASE); 1103 1199 1104 1200 /* Setup streaming buffer, DE=1 SB_EN=1 */ 1105 1201 control = (1UL << 1UL) | (1UL << 0UL);
-1
arch/sparc64/kernel/sys32.S
··· 91 91 SIGN1(sys32_mkdir, sys_mkdir, %o1) 92 92 SIGN3(sys32_futex, compat_sys_futex, %o1, %o2, %o5) 93 93 SIGN1(sys32_sysfs, compat_sys_sysfs, %o0) 94 - SIGN3(sys32_ipc, compat_sys_ipc, %o1, %o2, %o3) 95 94 SIGN2(sys32_sendfile, compat_sys_sendfile, %o0, %o1) 96 95 SIGN2(sys32_sendfile64, compat_sys_sendfile64, %o0, %o1) 97 96 SIGN1(sys32_prctl, sys_prctl, %o0)
+2 -2
arch/sparc64/kernel/sys_sunos32.c
··· 871 871 ret = ARG_MAX; 872 872 break; 873 873 case _SC_CHILD_MAX: 874 - ret = -1; /* no limit */ 874 + ret = current->signal->rlim[RLIMIT_NPROC].rlim_cur; 875 875 break; 876 876 case _SC_CLK_TCK: 877 877 ret = HZ; ··· 880 880 ret = NGROUPS_MAX; 881 881 break; 882 882 case _SC_OPEN_MAX: 883 - ret = OPEN_MAX; 883 + ret = current->signal->rlim[RLIMIT_NOFILE].rlim_cur; 884 884 break; 885 885 case _SC_JOB_CONTROL: 886 886 ret = 1; /* yes, we do support job control */
+1 -1
arch/sparc64/kernel/systbls.S
··· 62 62 /*200*/ .word sys32_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, compat_sys_old_readdir 63 63 .word sys32_readahead, sys32_socketcall, sys32_syslog, sys32_lookup_dcookie, sys32_fadvise64 64 64 /*210*/ .word sys32_fadvise64_64, sys32_tgkill, sys32_waitpid, sys_swapoff, compat_sys_sysinfo 65 - .word sys32_ipc, sys32_sigreturn, sys_clone, sys32_ioprio_get, compat_sys_adjtimex 65 + .word compat_sys_ipc, sys32_sigreturn, sys_clone, sys32_ioprio_get, compat_sys_adjtimex 66 66 /*220*/ .word sys32_sigprocmask, sys_ni_syscall, sys32_delete_module, sys_ni_syscall, sys32_getpgid 67 67 .word sys32_bdflush, sys32_sysfs, sys_nis_syscall, sys32_setfsuid16, sys32_setfsgid16 68 68 /*230*/ .word sys32_select, compat_sys_time, sys32_splice, compat_sys_stime, compat_sys_statfs64
+4 -2
arch/sparc64/solaris/misc.c
··· 363 363 { 364 364 switch (id) { 365 365 case SOLARIS_CONFIG_NGROUPS: return NGROUPS_MAX; 366 - case SOLARIS_CONFIG_CHILD_MAX: return -1; /* no limit */ 367 - case SOLARIS_CONFIG_OPEN_FILES: return OPEN_MAX; 366 + case SOLARIS_CONFIG_CHILD_MAX: 367 + return current->signal->rlim[RLIMIT_NPROC].rlim_cur; 368 + case SOLARIS_CONFIG_OPEN_FILES: 369 + return current->signal->rlim[RLIMIT_NOFILE].rlim_cur; 368 370 case SOLARIS_CONFIG_POSIX_VER: return 199309; 369 371 case SOLARIS_CONFIG_PAGESIZE: return PAGE_SIZE; 370 372 case SOLARIS_CONFIG_XOPEN_VER: return 3;
+12 -4
net/sunrpc/svcsock.c
··· 452 452 struct in_pktinfo pkti; 453 453 struct in6_pktinfo pkti6; 454 454 }; 455 + #define SVC_PKTINFO_SPACE \ 456 + CMSG_SPACE(sizeof(union svc_pktinfo_u)) 455 457 456 458 static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh) 457 459 { ··· 493 491 struct svc_sock *svsk = rqstp->rq_sock; 494 492 struct socket *sock = svsk->sk_sock; 495 493 int slen; 496 - char buffer[CMSG_SPACE(sizeof(union svc_pktinfo_u))]; 497 - struct cmsghdr *cmh = (struct cmsghdr *)buffer; 494 + union { 495 + struct cmsghdr hdr; 496 + long all[SVC_PKTINFO_SPACE / sizeof(long)]; 497 + } buffer; 498 + struct cmsghdr *cmh = &buffer.hdr; 498 499 int len = 0; 499 500 int result; 500 501 int size; ··· 750 745 struct svc_sock *svsk = rqstp->rq_sock; 751 746 struct svc_serv *serv = svsk->sk_server; 752 747 struct sk_buff *skb; 753 - char buffer[CMSG_SPACE(sizeof(union svc_pktinfo_u))]; 754 - struct cmsghdr *cmh = (struct cmsghdr *)buffer; 748 + union { 749 + struct cmsghdr hdr; 750 + long all[SVC_PKTINFO_SPACE / sizeof(long)]; 751 + } buffer; 752 + struct cmsghdr *cmh = &buffer.hdr; 755 753 int err, len; 756 754 struct msghdr msg = { 757 755 .msg_name = svc_addr(rqstp),