[PATCH] Fix up page_mkclean_one(): virtual caches, s390

- add flush_cache_page() for all those virtual indexed cache
architectures.

- handle s390.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Peter Zijlstra and committed by Linus Torvalds c2fda5fe e21654a7

+13 -10
+13 -10
mm/rmap.c
··· 433 { 434 struct mm_struct *mm = vma->vm_mm; 435 unsigned long address; 436 - pte_t *pte, entry; 437 spinlock_t *ptl; 438 int ret = 0; 439 ··· 445 if (!pte) 446 goto out; 447 448 - if (!pte_dirty(*pte) && !pte_write(*pte)) 449 - goto unlock; 450 451 - entry = ptep_get_and_clear(mm, address, pte); 452 - entry = pte_mkclean(entry); 453 - entry = pte_wrprotect(entry); 454 - ptep_establish(vma, address, pte, entry); 455 - lazy_mmu_prot_update(entry); 456 - ret = 1; 457 458 - unlock: 459 pte_unmap_unlock(pte, ptl); 460 out: 461 return ret; ··· 491 if (mapping) 492 ret = page_mkclean_file(mapping, page); 493 } 494 495 return ret; 496 }
··· 433 { 434 struct mm_struct *mm = vma->vm_mm; 435 unsigned long address; 436 + pte_t *pte; 437 spinlock_t *ptl; 438 int ret = 0; 439 ··· 445 if (!pte) 446 goto out; 447 448 + if (pte_dirty(*pte) || pte_write(*pte)) { 449 + pte_t entry; 450 451 + flush_cache_page(vma, address, pte_pfn(*pte)); 452 + entry = ptep_clear_flush(vma, address, pte); 453 + entry = pte_wrprotect(entry); 454 + entry = pte_mkclean(entry); 455 + set_pte_at(vma, address, pte, entry); 456 + lazy_mmu_prot_update(entry); 457 + ret = 1; 458 + } 459 460 pte_unmap_unlock(pte, ptl); 461 out: 462 return ret; ··· 490 if (mapping) 491 ret = page_mkclean_file(mapping, page); 492 } 493 + if (page_test_and_clear_dirty(page)) 494 + ret = 1; 495 496 return ret; 497 }