b1f279cd00bfd96afb039e917a0692ce440beaeb
[~shefty/rdma-dev.git] / arch / sparc / mm / tlb.c
1 /* arch/sparc64/mm/tlb.c
2  *
3  * Copyright (C) 2004 David S. Miller <davem@redhat.com>
4  */
5
6 #include <linux/kernel.h>
7 #include <linux/init.h>
8 #include <linux/percpu.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/preempt.h>
12
13 #include <asm/pgtable.h>
14 #include <asm/pgalloc.h>
15 #include <asm/tlbflush.h>
16 #include <asm/cacheflush.h>
17 #include <asm/mmu_context.h>
18 #include <asm/tlb.h>
19
20 /* Heavily inspired by the ppc64 code.  */
21
22 static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
23
24 void flush_tlb_pending(void)
25 {
26         struct tlb_batch *tb = &get_cpu_var(tlb_batch);
27
28         if (tb->tlb_nr) {
29                 flush_tsb_user(tb);
30
31                 if (CTX_VALID(tb->mm->context)) {
32 #ifdef CONFIG_SMP
33                         smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
34                                               &tb->vaddrs[0]);
35 #else
36                         __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
37                                             tb->tlb_nr, &tb->vaddrs[0]);
38 #endif
39                 }
40                 tb->tlb_nr = 0;
41         }
42
43         put_cpu_var(tlb_batch);
44 }
45
46 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
47                    pte_t *ptep, pte_t orig, int fullmm)
48 {
49         struct tlb_batch *tb = &get_cpu_var(tlb_batch);
50         unsigned long nr;
51
52         vaddr &= PAGE_MASK;
53         if (pte_exec(orig))
54                 vaddr |= 0x1UL;
55
56         if (tlb_type != hypervisor &&
57             pte_dirty(orig)) {
58                 unsigned long paddr, pfn = pte_pfn(orig);
59                 struct address_space *mapping;
60                 struct page *page;
61
62                 if (!pfn_valid(pfn))
63                         goto no_cache_flush;
64
65                 page = pfn_to_page(pfn);
66                 if (PageReserved(page))
67                         goto no_cache_flush;
68
69                 /* A real file page? */
70                 mapping = page_mapping(page);
71                 if (!mapping)
72                         goto no_cache_flush;
73
74                 paddr = (unsigned long) page_address(page);
75                 if ((paddr ^ vaddr) & (1 << 13))
76                         flush_dcache_page_all(mm, page);
77         }
78
79 no_cache_flush:
80
81         if (fullmm) {
82                 put_cpu_var(tlb_batch);
83                 return;
84         }
85
86         nr = tb->tlb_nr;
87
88         if (unlikely(nr != 0 && mm != tb->mm)) {
89                 flush_tlb_pending();
90                 nr = 0;
91         }
92
93         if (nr == 0)
94                 tb->mm = mm;
95
96         tb->vaddrs[nr] = vaddr;
97         tb->tlb_nr = ++nr;
98         if (nr >= TLB_BATCH_NR)
99                 flush_tlb_pending();
100
101         put_cpu_var(tlb_batch);
102 }