]> git.openfabrics.org - ~shefty/rdma-dev.git/blob - arch/x86/mm/ioremap.c
x86: create a non-zero sized bm_pte only when needed
[~shefty/rdma-dev.git] / arch / x86 / mm / ioremap.c
1 /*
2  * Re-map IO memory to kernel address space so that we can access it.
3  * This is needed for high PCI addresses that aren't mapped in the
4  * 640k-1MB IO memory area on PC's
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  */
8
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
16
17 #include <asm/cacheflush.h>
18 #include <asm/e820.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
23 #include <asm/pat.h>
24
25 static inline int phys_addr_valid(resource_size_t addr)
26 {
27 #ifdef CONFIG_PHYS_ADDR_T_64BIT
28         return !(addr >> boot_cpu_data.x86_phys_bits);
29 #else
30         return 1;
31 #endif
32 }
33
34 #ifdef CONFIG_X86_64
35
36 unsigned long __phys_addr(unsigned long x)
37 {
38         if (x >= __START_KERNEL_map) {
39                 x -= __START_KERNEL_map;
40                 VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE);
41                 x += phys_base;
42         } else {
43                 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
44                 x -= PAGE_OFFSET;
45                 VIRTUAL_BUG_ON(!phys_addr_valid(x));
46         }
47         return x;
48 }
49 EXPORT_SYMBOL(__phys_addr);
50
51 bool __virt_addr_valid(unsigned long x)
52 {
53         if (x >= __START_KERNEL_map) {
54                 x -= __START_KERNEL_map;
55                 if (x >= KERNEL_IMAGE_SIZE)
56                         return false;
57                 x += phys_base;
58         } else {
59                 if (x < PAGE_OFFSET)
60                         return false;
61                 x -= PAGE_OFFSET;
62                 if (!phys_addr_valid(x))
63                         return false;
64         }
65
66         return pfn_valid(x >> PAGE_SHIFT);
67 }
68 EXPORT_SYMBOL(__virt_addr_valid);
69
70 #else
71
72 #ifdef CONFIG_DEBUG_VIRTUAL
73 unsigned long __phys_addr(unsigned long x)
74 {
75         /* VMALLOC_* aren't constants  */
76         VIRTUAL_BUG_ON(x < PAGE_OFFSET);
77         VIRTUAL_BUG_ON(__vmalloc_start_set && is_vmalloc_addr((void *) x));
78         return x - PAGE_OFFSET;
79 }
80 EXPORT_SYMBOL(__phys_addr);
81 #endif
82
83 bool __virt_addr_valid(unsigned long x)
84 {
85         if (x < PAGE_OFFSET)
86                 return false;
87         if (__vmalloc_start_set && is_vmalloc_addr((void *) x))
88                 return false;
89         if (x >= FIXADDR_START)
90                 return false;
91         return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT);
92 }
93 EXPORT_SYMBOL(__virt_addr_valid);
94
95 #endif
96
97 int page_is_ram(unsigned long pagenr)
98 {
99         resource_size_t addr, end;
100         int i;
101
102         /*
103          * A special case is the first 4Kb of memory;
104          * This is a BIOS owned area, not kernel ram, but generally
105          * not listed as such in the E820 table.
106          */
107         if (pagenr == 0)
108                 return 0;
109
110         /*
111          * Second special case: Some BIOSen report the PC BIOS
112          * area (640->1Mb) as ram even though it is not.
113          */
114         if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
115                     pagenr < (BIOS_END >> PAGE_SHIFT))
116                 return 0;
117
118         for (i = 0; i < e820.nr_map; i++) {
119                 /*
120                  * Not usable memory:
121                  */
122                 if (e820.map[i].type != E820_RAM)
123                         continue;
124                 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
125                 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
126
127
128                 if ((pagenr >= addr) && (pagenr < end))
129                         return 1;
130         }
131         return 0;
132 }
133
134 /*
135  * Fix up the linear direct mapping of the kernel to avoid cache attribute
136  * conflicts.
137  */
138 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
139                                unsigned long prot_val)
140 {
141         unsigned long nrpages = size >> PAGE_SHIFT;
142         int err;
143
144         switch (prot_val) {
145         case _PAGE_CACHE_UC:
146         default:
147                 err = _set_memory_uc(vaddr, nrpages);
148                 break;
149         case _PAGE_CACHE_WC:
150                 err = _set_memory_wc(vaddr, nrpages);
151                 break;
152         case _PAGE_CACHE_WB:
153                 err = _set_memory_wb(vaddr, nrpages);
154                 break;
155         }
156
157         return err;
158 }
159
160 /*
161  * Remap an arbitrary physical address space into the kernel virtual
162  * address space. Needed when the kernel wants to access high addresses
163  * directly.
164  *
165  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
166  * have to convert them into an offset in a page-aligned mapping, but the
167  * caller shouldn't need to know that small detail.
168  */
169 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
170                 unsigned long size, unsigned long prot_val, void *caller)
171 {
172         unsigned long pfn, offset, vaddr;
173         resource_size_t last_addr;
174         const resource_size_t unaligned_phys_addr = phys_addr;
175         const unsigned long unaligned_size = size;
176         struct vm_struct *area;
177         unsigned long new_prot_val;
178         pgprot_t prot;
179         int retval;
180         void __iomem *ret_addr;
181
182         /* Don't allow wraparound or zero size */
183         last_addr = phys_addr + size - 1;
184         if (!size || last_addr < phys_addr)
185                 return NULL;
186
187         if (!phys_addr_valid(phys_addr)) {
188                 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
189                        (unsigned long long)phys_addr);
190                 WARN_ON_ONCE(1);
191                 return NULL;
192         }
193
194         /*
195          * Don't remap the low PCI/ISA area, it's always mapped..
196          */
197         if (is_ISA_range(phys_addr, last_addr))
198                 return (__force void __iomem *)phys_to_virt(phys_addr);
199
200         /*
201          * Check if the request spans more than any BAR in the iomem resource
202          * tree.
203          */
204         WARN_ONCE(iomem_map_sanity_check(phys_addr, size),
205                   KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
206
207         /*
208          * Don't allow anybody to remap normal RAM that we're using..
209          */
210         for (pfn = phys_addr >> PAGE_SHIFT;
211                                 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
212                                 pfn++) {
213
214                 int is_ram = page_is_ram(pfn);
215
216                 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
217                         return NULL;
218                 WARN_ON_ONCE(is_ram);
219         }
220
221         /*
222          * Mappings have to be page-aligned
223          */
224         offset = phys_addr & ~PAGE_MASK;
225         phys_addr &= PAGE_MASK;
226         size = PAGE_ALIGN(last_addr+1) - phys_addr;
227
228         retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
229                                                 prot_val, &new_prot_val);
230         if (retval) {
231                 pr_debug("Warning: reserve_memtype returned %d\n", retval);
232                 return NULL;
233         }
234
235         if (prot_val != new_prot_val) {
236                 /*
237                  * Do not fallback to certain memory types with certain
238                  * requested type:
239                  * - request is uc-, return cannot be write-back
240                  * - request is uc-, return cannot be write-combine
241                  * - request is write-combine, return cannot be write-back
242                  */
243                 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
244                      (new_prot_val == _PAGE_CACHE_WB ||
245                       new_prot_val == _PAGE_CACHE_WC)) ||
246                     (prot_val == _PAGE_CACHE_WC &&
247                      new_prot_val == _PAGE_CACHE_WB)) {
248                         pr_debug(
249                 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
250                                 (unsigned long long)phys_addr,
251                                 (unsigned long long)(phys_addr + size),
252                                 prot_val, new_prot_val);
253                         free_memtype(phys_addr, phys_addr + size);
254                         return NULL;
255                 }
256                 prot_val = new_prot_val;
257         }
258
259         switch (prot_val) {
260         case _PAGE_CACHE_UC:
261         default:
262                 prot = PAGE_KERNEL_IO_NOCACHE;
263                 break;
264         case _PAGE_CACHE_UC_MINUS:
265                 prot = PAGE_KERNEL_IO_UC_MINUS;
266                 break;
267         case _PAGE_CACHE_WC:
268                 prot = PAGE_KERNEL_IO_WC;
269                 break;
270         case _PAGE_CACHE_WB:
271                 prot = PAGE_KERNEL_IO;
272                 break;
273         }
274
275         /*
276          * Ok, go for it..
277          */
278         area = get_vm_area_caller(size, VM_IOREMAP, caller);
279         if (!area)
280                 return NULL;
281         area->phys_addr = phys_addr;
282         vaddr = (unsigned long) area->addr;
283         if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
284                 free_memtype(phys_addr, phys_addr + size);
285                 free_vm_area(area);
286                 return NULL;
287         }
288
289         if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
290                 free_memtype(phys_addr, phys_addr + size);
291                 vunmap(area->addr);
292                 return NULL;
293         }
294
295         ret_addr = (void __iomem *) (vaddr + offset);
296         mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
297
298         return ret_addr;
299 }
300
301 /**
302  * ioremap_nocache     -   map bus memory into CPU space
303  * @offset:    bus address of the memory
304  * @size:      size of the resource to map
305  *
306  * ioremap_nocache performs a platform specific sequence of operations to
307  * make bus memory CPU accessible via the readb/readw/readl/writeb/
308  * writew/writel functions and the other mmio helpers. The returned
309  * address is not guaranteed to be usable directly as a virtual
310  * address.
311  *
312  * This version of ioremap ensures that the memory is marked uncachable
313  * on the CPU as well as honouring existing caching rules from things like
314  * the PCI bus. Note that there are other caches and buffers on many
315  * busses. In particular driver authors should read up on PCI writes
316  *
317  * It's useful if some control registers are in such an area and
318  * write combining or read caching is not desirable:
319  *
320  * Must be freed with iounmap.
321  */
322 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
323 {
324         /*
325          * Ideally, this should be:
326          *      pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
327          *
328          * Till we fix all X drivers to use ioremap_wc(), we will use
329          * UC MINUS.
330          */
331         unsigned long val = _PAGE_CACHE_UC_MINUS;
332
333         return __ioremap_caller(phys_addr, size, val,
334                                 __builtin_return_address(0));
335 }
336 EXPORT_SYMBOL(ioremap_nocache);
337
338 /**
339  * ioremap_wc   -       map memory into CPU space write combined
340  * @offset:     bus address of the memory
341  * @size:       size of the resource to map
342  *
343  * This version of ioremap ensures that the memory is marked write combining.
344  * Write combining allows faster writes to some hardware devices.
345  *
346  * Must be freed with iounmap.
347  */
348 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
349 {
350         if (pat_enabled)
351                 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
352                                         __builtin_return_address(0));
353         else
354                 return ioremap_nocache(phys_addr, size);
355 }
356 EXPORT_SYMBOL(ioremap_wc);
357
358 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
359 {
360         return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
361                                 __builtin_return_address(0));
362 }
363 EXPORT_SYMBOL(ioremap_cache);
364
365 static void __iomem *ioremap_default(resource_size_t phys_addr,
366                                         unsigned long size)
367 {
368         unsigned long flags;
369         void __iomem *ret;
370         int err;
371
372         /*
373          * - WB for WB-able memory and no other conflicting mappings
374          * - UC_MINUS for non-WB-able memory with no other conflicting mappings
375          * - Inherit from confliting mappings otherwise
376          */
377         err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags);
378         if (err < 0)
379                 return NULL;
380
381         ret = __ioremap_caller(phys_addr, size, flags,
382                                __builtin_return_address(0));
383
384         free_memtype(phys_addr, phys_addr + size);
385         return ret;
386 }
387
388 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
389                                 unsigned long prot_val)
390 {
391         return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
392                                 __builtin_return_address(0));
393 }
394 EXPORT_SYMBOL(ioremap_prot);
395
396 /**
397  * iounmap - Free a IO remapping
398  * @addr: virtual address from ioremap_*
399  *
400  * Caller must ensure there is only one unmapping for the same pointer.
401  */
402 void iounmap(volatile void __iomem *addr)
403 {
404         struct vm_struct *p, *o;
405
406         if ((void __force *)addr <= high_memory)
407                 return;
408
409         /*
410          * __ioremap special-cases the PCI/ISA range by not instantiating a
411          * vm_area and by simply returning an address into the kernel mapping
412          * of ISA space.   So handle that here.
413          */
414         if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
415             (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
416                 return;
417
418         addr = (volatile void __iomem *)
419                 (PAGE_MASK & (unsigned long __force)addr);
420
421         mmiotrace_iounmap(addr);
422
423         /* Use the vm area unlocked, assuming the caller
424            ensures there isn't another iounmap for the same address
425            in parallel. Reuse of the virtual address is prevented by
426            leaving it in the global lists until we're done with it.
427            cpa takes care of the direct mappings. */
428         read_lock(&vmlist_lock);
429         for (p = vmlist; p; p = p->next) {
430                 if (p->addr == (void __force *)addr)
431                         break;
432         }
433         read_unlock(&vmlist_lock);
434
435         if (!p) {
436                 printk(KERN_ERR "iounmap: bad address %p\n", addr);
437                 dump_stack();
438                 return;
439         }
440
441         free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
442
443         /* Finally remove it */
444         o = remove_vm_area((void __force *)addr);
445         BUG_ON(p != o || o == NULL);
446         kfree(p);
447 }
448 EXPORT_SYMBOL(iounmap);
449
450 /*
451  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
452  * access
453  */
454 void *xlate_dev_mem_ptr(unsigned long phys)
455 {
456         void *addr;
457         unsigned long start = phys & PAGE_MASK;
458
459         /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
460         if (page_is_ram(start >> PAGE_SHIFT))
461                 return __va(phys);
462
463         addr = (void __force *)ioremap_default(start, PAGE_SIZE);
464         if (addr)
465                 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
466
467         return addr;
468 }
469
470 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
471 {
472         if (page_is_ram(phys >> PAGE_SHIFT))
473                 return;
474
475         iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
476         return;
477 }
478
479 static int __initdata early_ioremap_debug;
480
481 static int __init early_ioremap_debug_setup(char *str)
482 {
483         early_ioremap_debug = 1;
484
485         return 0;
486 }
487 early_param("early_ioremap_debug", early_ioremap_debug_setup);
488
489 static __initdata int after_paging_init;
490 #define __FIXADDR_TOP (-PAGE_SIZE)
491 static pte_t bm_pte[(__fix_to_virt(FIX_DBGP_BASE)
492                      ^ __fix_to_virt(FIX_BTMAP_BEGIN)) >> PMD_SHIFT
493                     ? PAGE_SIZE / sizeof(pte_t) : 0] __page_aligned_bss;
494 #undef __FIXADDR_TOP
495 static __initdata pte_t *bm_ptep;
496
497 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
498 {
499         /* Don't assume we're using swapper_pg_dir at this point */
500         pgd_t *base = __va(read_cr3());
501         pgd_t *pgd = &base[pgd_index(addr)];
502         pud_t *pud = pud_offset(pgd, addr);
503         pmd_t *pmd = pmd_offset(pud, addr);
504
505         return pmd;
506 }
507
508 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
509 {
510         if (!sizeof(bm_pte))
511                 return &bm_ptep[pte_index(addr)];
512         return &bm_pte[pte_index(addr)];
513 }
514
515 static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
516
517 void __init early_ioremap_init(void)
518 {
519         pmd_t *pmd;
520         int i;
521
522         if (early_ioremap_debug)
523                 printk(KERN_INFO "early_ioremap_init()\n");
524
525         for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
526                 slot_virt[i] = fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
527
528         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
529         if (sizeof(bm_pte)) {
530                 memset(bm_pte, 0, sizeof(bm_pte));
531                 pmd_populate_kernel(&init_mm, pmd, bm_pte);
532         } else {
533                 bm_ptep = pte_offset_kernel(pmd, 0);
534                 if (early_ioremap_debug)
535                         printk(KERN_INFO "bm_ptep=%p\n", bm_ptep);
536         }
537
538         /*
539          * The boot-ioremap range spans multiple pmds, for which
540          * we are not prepared:
541          */
542         if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
543                 WARN_ON(1);
544                 printk(KERN_WARNING "pmd %p != %p\n",
545                        pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
546                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
547                         fix_to_virt(FIX_BTMAP_BEGIN));
548                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
549                         fix_to_virt(FIX_BTMAP_END));
550
551                 printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
552                 printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
553                        FIX_BTMAP_BEGIN);
554         }
555 }
556
557 void __init early_ioremap_reset(void)
558 {
559         after_paging_init = 1;
560 }
561
562 static void __init __early_set_fixmap(enum fixed_addresses idx,
563                                    unsigned long phys, pgprot_t flags)
564 {
565         unsigned long addr = __fix_to_virt(idx);
566         pte_t *pte;
567
568         if (idx >= __end_of_fixed_addresses) {
569                 BUG();
570                 return;
571         }
572         pte = early_ioremap_pte(addr);
573
574         if (pgprot_val(flags))
575                 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
576         else
577                 pte_clear(&init_mm, addr, pte);
578         __flush_tlb_one(addr);
579 }
580
581 static inline void __init early_set_fixmap(enum fixed_addresses idx,
582                                            unsigned long phys, pgprot_t prot)
583 {
584         if (after_paging_init)
585                 __set_fixmap(idx, phys, prot);
586         else
587                 __early_set_fixmap(idx, phys, prot);
588 }
589
590 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
591 {
592         if (after_paging_init)
593                 clear_fixmap(idx);
594         else
595                 __early_set_fixmap(idx, 0, __pgprot(0));
596 }
597
598 static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
599 static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
600
601 static int __init check_early_ioremap_leak(void)
602 {
603         int count = 0;
604         int i;
605
606         for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
607                 if (prev_map[i])
608                         count++;
609
610         if (!count)
611                 return 0;
612         WARN(1, KERN_WARNING
613                "Debug warning: early ioremap leak of %d areas detected.\n",
614                 count);
615         printk(KERN_WARNING
616                 "please boot with early_ioremap_debug and report the dmesg.\n");
617
618         return 1;
619 }
620 late_initcall(check_early_ioremap_leak);
621
622 static void __init __iomem *
623 __early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
624 {
625         unsigned long offset, last_addr;
626         unsigned int nrpages;
627         enum fixed_addresses idx0, idx;
628         int i, slot;
629
630         WARN_ON(system_state != SYSTEM_BOOTING);
631
632         slot = -1;
633         for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
634                 if (!prev_map[i]) {
635                         slot = i;
636                         break;
637                 }
638         }
639
640         if (slot < 0) {
641                 printk(KERN_INFO "early_iomap(%08lx, %08lx) not found slot\n",
642                          phys_addr, size);
643                 WARN_ON(1);
644                 return NULL;
645         }
646
647         if (early_ioremap_debug) {
648                 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
649                        phys_addr, size, slot);
650                 dump_stack();
651         }
652
653         /* Don't allow wraparound or zero size */
654         last_addr = phys_addr + size - 1;
655         if (!size || last_addr < phys_addr) {
656                 WARN_ON(1);
657                 return NULL;
658         }
659
660         prev_size[slot] = size;
661         /*
662          * Mappings have to be page-aligned
663          */
664         offset = phys_addr & ~PAGE_MASK;
665         phys_addr &= PAGE_MASK;
666         size = PAGE_ALIGN(last_addr + 1) - phys_addr;
667
668         /*
669          * Mappings have to fit in the FIX_BTMAP area.
670          */
671         nrpages = size >> PAGE_SHIFT;
672         if (nrpages > NR_FIX_BTMAPS) {
673                 WARN_ON(1);
674                 return NULL;
675         }
676
677         /*
678          * Ok, go for it..
679          */
680         idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
681         idx = idx0;
682         while (nrpages > 0) {
683                 early_set_fixmap(idx, phys_addr, prot);
684                 phys_addr += PAGE_SIZE;
685                 --idx;
686                 --nrpages;
687         }
688         if (early_ioremap_debug)
689                 printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]);
690
691         prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
692         return prev_map[slot];
693 }
694
695 /* Remap an IO device */
696 void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size)
697 {
698         return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
699 }
700
701 /* Remap memory */
702 void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size)
703 {
704         return __early_ioremap(phys_addr, size, PAGE_KERNEL);
705 }
706
707 void __init early_iounmap(void __iomem *addr, unsigned long size)
708 {
709         unsigned long virt_addr;
710         unsigned long offset;
711         unsigned int nrpages;
712         enum fixed_addresses idx;
713         int i, slot;
714
715         slot = -1;
716         for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
717                 if (prev_map[i] == addr) {
718                         slot = i;
719                         break;
720                 }
721         }
722
723         if (slot < 0) {
724                 printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
725                          addr, size);
726                 WARN_ON(1);
727                 return;
728         }
729
730         if (prev_size[slot] != size) {
731                 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
732                          addr, size, slot, prev_size[slot]);
733                 WARN_ON(1);
734                 return;
735         }
736
737         if (early_ioremap_debug) {
738                 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
739                        size, slot);
740                 dump_stack();
741         }
742
743         virt_addr = (unsigned long)addr;
744         if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
745                 WARN_ON(1);
746                 return;
747         }
748         offset = virt_addr & ~PAGE_MASK;
749         nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
750
751         idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
752         while (nrpages > 0) {
753                 early_clear_fixmap(idx);
754                 --idx;
755                 --nrpages;
756         }
757         prev_map[slot] = NULL;
758 }