Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge with ../linux-2.6-smp

authored by

Russell King and committed by
Russell King
99a0616b a8396883

+156 -11
+123
arch/arm/kernel/smp.c
··· 502 502 { 503 503 return -EINVAL; 504 504 } 505 + 506 + static int 507 + on_each_cpu_mask(void (*func)(void *), void *info, int retry, int wait, 508 + cpumask_t mask) 509 + { 510 + int ret = 0; 511 + 512 + preempt_disable(); 513 + 514 + ret = smp_call_function_on_cpu(func, info, retry, wait, mask); 515 + if (cpu_isset(smp_processor_id(), mask)) 516 + func(info); 517 + 518 + preempt_enable(); 519 + 520 + return ret; 521 + } 522 + 523 + /**********************************************************************/ 524 + 525 + /* 526 + * TLB operations 527 + */ 528 + struct tlb_args { 529 + struct vm_area_struct *ta_vma; 530 + unsigned long ta_start; 531 + unsigned long ta_end; 532 + }; 533 + 534 + static inline void ipi_flush_tlb_all(void *ignored) 535 + { 536 + local_flush_tlb_all(); 537 + } 538 + 539 + static inline void ipi_flush_tlb_mm(void *arg) 540 + { 541 + struct mm_struct *mm = (struct mm_struct *)arg; 542 + 543 + local_flush_tlb_mm(mm); 544 + } 545 + 546 + static inline void ipi_flush_tlb_page(void *arg) 547 + { 548 + struct tlb_args *ta = (struct tlb_args *)arg; 549 + 550 + local_flush_tlb_page(ta->ta_vma, ta->ta_start); 551 + } 552 + 553 + static inline void ipi_flush_tlb_kernel_page(void *arg) 554 + { 555 + struct tlb_args *ta = (struct tlb_args *)arg; 556 + 557 + local_flush_tlb_kernel_page(ta->ta_start); 558 + } 559 + 560 + static inline void ipi_flush_tlb_range(void *arg) 561 + { 562 + struct tlb_args *ta = (struct tlb_args *)arg; 563 + 564 + local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); 565 + } 566 + 567 + static inline void ipi_flush_tlb_kernel_range(void *arg) 568 + { 569 + struct tlb_args *ta = (struct tlb_args *)arg; 570 + 571 + local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); 572 + } 573 + 574 + void flush_tlb_all(void) 575 + { 576 + on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1); 577 + } 578 + 579 + void flush_tlb_mm(struct mm_struct *mm) 580 + { 581 + cpumask_t mask = mm->cpu_vm_mask; 582 + 583 + on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, 1, mask); 584 + } 585 + 586 + void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) 587 + { 588 + cpumask_t mask = vma->vm_mm->cpu_vm_mask; 589 + struct tlb_args ta; 590 + 591 + ta.ta_vma = vma; 592 + ta.ta_start = uaddr; 593 + 594 + on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, 1, mask); 595 + } 596 + 597 + void flush_tlb_kernel_page(unsigned long kaddr) 598 + { 599 + struct tlb_args ta; 600 + 601 + ta.ta_start = kaddr; 602 + 603 + on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1, 1); 604 + } 605 + 606 + void flush_tlb_range(struct vm_area_struct *vma, 607 + unsigned long start, unsigned long end) 608 + { 609 + cpumask_t mask = vma->vm_mm->cpu_vm_mask; 610 + struct tlb_args ta; 611 + 612 + ta.ta_vma = vma; 613 + ta.ta_start = start; 614 + ta.ta_end = end; 615 + 616 + on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, 1, mask); 617 + } 618 + 619 + void flush_tlb_kernel_range(unsigned long start, unsigned long end) 620 + { 621 + struct tlb_args ta; 622 + 623 + ta.ta_start = start; 624 + ta.ta_end = end; 625 + 626 + on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1, 1); 627 + }
+1 -1
arch/arm/mm/init.c
··· 437 437 memtable_init(mi); 438 438 if (mdesc->map_io) 439 439 mdesc->map_io(); 440 - flush_tlb_all(); 440 + local_flush_tlb_all(); 441 441 442 442 /* 443 443 * initialise the zones within each node
+1 -1
arch/arm/mm/mm-armv.c
··· 682 682 } 683 683 684 684 flush_cache_all(); 685 - flush_tlb_all(); 685 + local_flush_tlb_all(); 686 686 687 687 top_pmd = pmd_off_k(0xffff0000); 688 688 }
+9 -3
include/asm-arm/system.h
··· 290 290 }) 291 291 292 292 #ifdef CONFIG_SMP 293 - #error SMP not supported 294 293 295 294 #define smp_mb() mb() 296 295 #define smp_rmb() rmb() ··· 303 304 #define smp_wmb() barrier() 304 305 #define smp_read_barrier_depends() do { } while(0) 305 306 307 + #endif /* CONFIG_SMP */ 308 + 306 309 #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) 307 310 /* 308 311 * On the StrongARM, "swp" is terminally broken since it bypasses the ··· 317 316 * 318 317 * We choose (1) since its the "easiest" to achieve here and is not 319 318 * dependent on the processor type. 319 + * 320 + * NOTE that this solution won't work on an SMP system, so explcitly 321 + * forbid it here. 320 322 */ 323 + #ifdef CONFIG_SMP 324 + #error SMP is not supported on SA1100/SA110 325 + #else 321 326 #define swp_is_buggy 327 + #endif 322 328 #endif 323 329 324 330 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) ··· 368 360 369 361 return ret; 370 362 } 371 - 372 - #endif /* CONFIG_SMP */ 373 363 374 364 #endif /* __ASSEMBLY__ */ 375 365
+22 -6
include/asm-arm/tlbflush.h
··· 235 235 236 236 #define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f))) 237 237 238 - static inline void flush_tlb_all(void) 238 + static inline void local_flush_tlb_all(void) 239 239 { 240 240 const int zero = 0; 241 241 const unsigned int __tlb_flag = __cpu_tlb_flags; ··· 253 253 asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero)); 254 254 } 255 255 256 - static inline void flush_tlb_mm(struct mm_struct *mm) 256 + static inline void local_flush_tlb_mm(struct mm_struct *mm) 257 257 { 258 258 const int zero = 0; 259 259 const int asid = ASID(mm); ··· 282 282 } 283 283 284 284 static inline void 285 - flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) 285 + local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) 286 286 { 287 287 const int zero = 0; 288 288 const unsigned int __tlb_flag = __cpu_tlb_flags; ··· 313 313 asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (uaddr)); 314 314 } 315 315 316 - static inline void flush_tlb_kernel_page(unsigned long kaddr) 316 + static inline void local_flush_tlb_kernel_page(unsigned long kaddr) 317 317 { 318 318 const int zero = 0; 319 319 const unsigned int __tlb_flag = __cpu_tlb_flags; ··· 384 384 /* 385 385 * Convert calls to our calling convention. 386 386 */ 387 - #define flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma) 388 - #define flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e) 387 + #define local_flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma) 388 + #define local_flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e) 389 + 390 + #ifndef CONFIG_SMP 391 + #define flush_tlb_all local_flush_tlb_all 392 + #define flush_tlb_mm local_flush_tlb_mm 393 + #define flush_tlb_page local_flush_tlb_page 394 + #define flush_tlb_kernel_page local_flush_tlb_kernel_page 395 + #define flush_tlb_range local_flush_tlb_range 396 + #define flush_tlb_kernel_range local_flush_tlb_kernel_range 397 + #else 398 + extern void flush_tlb_all(void); 399 + extern void flush_tlb_mm(struct mm_struct *mm); 400 + extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr); 401 + extern void flush_tlb_kernel_page(unsigned long kaddr); 402 + extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 403 + extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 404 + #endif 389 405 390 406 /* 391 407 * if PG_dcache_dirty is set for the page, we need to ensure that any