]> git.openfabrics.org - ~shefty/rdma-dev.git/commitdiff
Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 27 Jul 2009 19:16:57 +0000 (12:16 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 27 Jul 2009 19:16:57 +0000 (12:16 -0700)
* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6: (34 commits)
  V4L/DVB (12303): cx23885: check pointers before dereferencing in dprintk macro
  V4L/DVB (12302): cx23885-417: fix broken IOCTL handling
  V4L/DVB (12300): bttv: fix regression: tvaudio must be loaded before tuner
  V4L/DVB (12291): b2c2: fix frontends compiled into kernel
  V4L/DVB (12286): sn9c20x: reorder includes to be like other drivers
  V4L/DVB (12284): gspca - jpeg subdrivers: Check the result of kmalloc(jpeg header).
  V4L/DVB (12283): gspca - sn9c20x: New subdriver for sn9c201 and sn9c202 bridges.
  V4L/DVB (12282): gspca - main: Support for vidioc_g_chip_ident and vidioc_g/s_register.
  V4L/DVB (12269): af9013: auto-detect parameters in case of garbage given by app
  V4L/DVB (12267): gspca - sonixj: Bad sensor init of non ov76xx sensors.
  V4L/DVB (12265): em28xx: fix tuning problem in HVR-900 (R1)
  V4L/DVB (12263): em28xx: set demod profile for Pinnacle Hybrid Pro 320e
  V4L/DVB (12262): em28xx: Make sure the tuner is initialized if generic empia USB id was used
  V4L/DVB (12261): em28xx: set GPIO properly for Pinnacle Hybrid Pro analog support
  V4L/DVB (12260): em28xx: make support work for the Pinnacle Hybrid Pro (eb1a:2881)
  V4L/DVB (12258): em28xx: fix typo in mt352 init sequence for Terratec Cinergy T XS USB
  V4L/DVB (12257): em28xx: make tuning work for Terratec Cinergy T XS USB (mt352 variant)
  V4L/DVB (12245): em28xx: add support for mt9m001 webcams
  V4L/DVB (12244): em28xx: adjust vinmode/vinctl based on the stream input format
  V4L/DVB (12243): em28xx: allow specifying sensor xtal frequency
  ...

60 files changed:
arch/alpha/include/asm/tlb.h
arch/arm/include/asm/tlb.h
arch/avr32/include/asm/pgalloc.h
arch/cris/include/asm/pgalloc.h
arch/frv/include/asm/pgalloc.h
arch/frv/include/asm/pgtable.h
arch/ia64/include/asm/pgalloc.h
arch/ia64/include/asm/tlb.h
arch/m32r/include/asm/pgalloc.h
arch/m68k/include/asm/motorola_pgalloc.h
arch/m68k/include/asm/sun3_pgalloc.h
arch/microblaze/include/asm/pgalloc.h
arch/mips/include/asm/pgalloc.h
arch/mn10300/include/asm/pgalloc.h
arch/parisc/include/asm/tlb.h
arch/powerpc/include/asm/pgalloc-32.h
arch/powerpc/include/asm/pgalloc-64.h
arch/powerpc/include/asm/pgalloc.h
arch/powerpc/mm/hugetlbpage.c
arch/s390/include/asm/tlb.h
arch/s390/kernel/early.c
arch/s390/kernel/smp.c
arch/s390/kernel/vdso64/clock_gettime.S
arch/s390/power/swsusp.c
arch/s390/power/swsusp_asm64.S
arch/sh/include/asm/pgalloc.h
arch/sh/include/asm/tlb.h
arch/sparc/include/asm/pgalloc_32.h
arch/sparc/include/asm/tlb_64.h
arch/um/include/asm/pgalloc.h
arch/um/include/asm/tlb.h
arch/x86/include/asm/pgalloc.h
arch/x86/mm/pgtable.c
arch/xtensa/include/asm/tlb.h
drivers/md/dm-crypt.c
drivers/md/dm-delay.c
drivers/md/dm-linear.c
drivers/md/dm-mpath.c
drivers/md/dm-raid1.c
drivers/md/dm-stripe.c
drivers/md/dm-table.c
drivers/md/dm.c
drivers/md/dm.h
drivers/s390/crypto/ap_bus.c
fs/cifs/connect.c
fs/cifs/inode.c
fs/ext3/dir.c
fs/ext3/inode.c
fs/jbd/journal.c
fs/jbd/transaction.c
fs/jfs/acl.c
include/asm-generic/4level-fixup.h
include/asm-generic/pgtable-nopmd.h
include/asm-generic/pgtable-nopud.h
include/asm-generic/tlb.h
include/linux/device-mapper.h
include/linux/ext3_fs.h
kernel/kthread.c
kernel/module.c
mm/memory.c

index c13636575fbab4fb14c17b75d53058b0f9181e1c..42866759f3fabd022c07c431a01bc9f45f6d91f4 100644 (file)
@@ -9,7 +9,7 @@
 
 #include <asm-generic/tlb.h>
 
-#define __pte_free_tlb(tlb, pte)                       pte_free((tlb)->mm, pte)
-#define __pmd_free_tlb(tlb, pmd)                       pmd_free((tlb)->mm, pmd)
+#define __pte_free_tlb(tlb, pte, address)              pte_free((tlb)->mm, pte)
+#define __pmd_free_tlb(tlb, pmd, address)              pmd_free((tlb)->mm, pmd)
  
 #endif
index 321c83e43a1e7a2cc9d8d770417004a5c206be9a..f41a6f57cd1223aef8d00f450bc1b68365623b19 100644 (file)
@@ -102,8 +102,8 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
 }
 
 #define tlb_remove_page(tlb,page)      free_page_and_swap_cache(page)
-#define pte_free_tlb(tlb, ptep)                pte_free((tlb)->mm, ptep)
-#define pmd_free_tlb(tlb, pmdp)                pmd_free((tlb)->mm, pmdp)
+#define pte_free_tlb(tlb, ptep, addr)  pte_free((tlb)->mm, ptep)
+#define pmd_free_tlb(tlb, pmdp, addr)  pmd_free((tlb)->mm, pmdp)
 
 #define tlb_migrate_finish(mm)         do { } while (0)
 
index 64082132394312a4c861abdc6b110f58517a7a40..92ecd8446ef80b6fe06c62bce45fe93a40f344bb 100644 (file)
@@ -83,7 +83,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
        quicklist_free_page(QUICK_PT, NULL, pte);
 }
 
-#define __pte_free_tlb(tlb,pte)                                \
+#define __pte_free_tlb(tlb,pte,addr)                   \
 do {                                                   \
        pgtable_page_dtor(pte);                         \
        tlb_remove_page((tlb), pte);                    \
index a1ba761d0573637ffb79ff4f9d6624d644dad9ce..6da975db112fdabff1125ed0f3ecb38df892caae 100644 (file)
@@ -47,7 +47,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
        __free_page(pte);
 }
 
-#define __pte_free_tlb(tlb,pte)                                \
+#define __pte_free_tlb(tlb,pte,address)                        \
 do {                                                   \
        pgtable_page_dtor(pte);                         \
        tlb_remove_page((tlb), pte);                    \
index 971e6addb0095a4175d86b68715937e918d2f474..416d19a632f228a30f1133df32fc1629fa359731 100644 (file)
@@ -49,7 +49,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
        __free_page(pte);
 }
 
-#define __pte_free_tlb(tlb,pte)                                \
+#define __pte_free_tlb(tlb,pte,address)                        \
 do {                                                   \
        pgtable_page_dtor(pte);                         \
        tlb_remove_page((tlb),(pte));                   \
@@ -62,7 +62,7 @@ do {                                                  \
  */
 #define pmd_alloc_one(mm, addr)                ({ BUG(); ((pmd_t *) 2); })
 #define pmd_free(mm, x)                        do { } while (0)
-#define __pmd_free_tlb(tlb,x)          do { } while (0)
+#define __pmd_free_tlb(tlb,x,a)                do { } while (0)
 
 #endif /* CONFIG_MMU */
 
index 33233011b1c1147ff46d2aaac3522150d72630c7..22c60692b5513c7c6f66d49947a16e574ba31835 100644 (file)
@@ -225,7 +225,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
  */
 #define pud_alloc_one(mm, address)             NULL
 #define pud_free(mm, x)                                do { } while (0)
-#define __pud_free_tlb(tlb, x                do { } while (0)
+#define __pud_free_tlb(tlb, x, address)                do { } while (0)
 
 /*
  * The "pud_xxx()" functions here are trivial for a folded two-level
index b9ac1a6fc21694a55e2771e471ab8cc90ab997cf..96a8d927db2851c9c17d8b48f6b47da66f9dd572 100644 (file)
@@ -48,7 +48,7 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
 {
        quicklist_free(0, NULL, pud);
 }
-#define __pud_free_tlb(tlb, pud      pud_free((tlb)->mm, pud)
+#define __pud_free_tlb(tlb, pud, address)      pud_free((tlb)->mm, pud)
 #endif /* CONFIG_PGTABLE_4 */
 
 static inline void
@@ -67,7 +67,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
        quicklist_free(0, NULL, pmd);
 }
 
-#define __pmd_free_tlb(tlb, pmd      pmd_free((tlb)->mm, pmd)
+#define __pmd_free_tlb(tlb, pmd, address)      pmd_free((tlb)->mm, pmd)
 
 static inline void
 pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, pgtable_t pte)
@@ -117,6 +117,6 @@ static inline void check_pgt_cache(void)
        quicklist_trim(0, NULL, 25, 16);
 }
 
-#define __pte_free_tlb(tlb, pte      pte_free((tlb)->mm, pte)
+#define __pte_free_tlb(tlb, pte, address)      pte_free((tlb)->mm, pte)
 
 #endif                         /* _ASM_IA64_PGALLOC_H */
index 20d8a39680c212af2e455327fb5782680af8c772..85d965cb19a0835ace573551d667053fcc8fa260 100644 (file)
@@ -236,22 +236,22 @@ do {                                                      \
        __tlb_remove_tlb_entry(tlb, ptep, addr);        \
 } while (0)
 
-#define pte_free_tlb(tlb, ptep)                                \
+#define pte_free_tlb(tlb, ptep, address)               \
 do {                                                   \
        tlb->need_flush = 1;                            \
-       __pte_free_tlb(tlb, ptep);                      \
+       __pte_free_tlb(tlb, ptep, address);             \
 } while (0)
 
-#define pmd_free_tlb(tlb, ptep)                                \
+#define pmd_free_tlb(tlb, ptep, address)               \
 do {                                                   \
        tlb->need_flush = 1;                            \
-       __pmd_free_tlb(tlb, ptep);                      \
+       __pmd_free_tlb(tlb, ptep, address);             \
 } while (0)
 
-#define pud_free_tlb(tlb, pudp)                                \
+#define pud_free_tlb(tlb, pudp, address)               \
 do {                                                   \
        tlb->need_flush = 1;                            \
-       __pud_free_tlb(tlb, pudp);                      \
+       __pud_free_tlb(tlb, pudp, address);             \
 } while (0)
 
 #endif /* _ASM_IA64_TLB_H */
index f11a2b909cdbf8ab3bc1a11195b0160173c2a58b..0fc7361989797d9a8c6fb6cc2a823367f75236e0 100644 (file)
@@ -58,7 +58,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
        __free_page(pte);
 }
 
-#define __pte_free_tlb(tlb, pte)       pte_free((tlb)->mm, (pte))
+#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte))
 
 /*
  * allocating and freeing a pmd is trivial: the 1-entry pmd is
@@ -68,7 +68,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
 
 #define pmd_alloc_one(mm, addr)                ({ BUG(); ((pmd_t *)2); })
 #define pmd_free(mm, x)                        do { } while (0)
-#define __pmd_free_tlb(tlb, x)         do { } while (0)
+#define __pmd_free_tlb(tlb, x, addr)   do { } while (0)
 #define pgd_populate(mm, pmd, pte)     BUG()
 
 #define check_pgt_cache()      do { } while (0)
index d08bf6261df88889f0e39091f8356580920208bb..15ee4c74a9f0621c6ca4d45380e6267475edf8e5 100644 (file)
@@ -54,7 +54,8 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t page)
        __free_page(page);
 }
 
-static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page)
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
+                                 unsigned long address)
 {
        pgtable_page_dtor(page);
        cache_page(kmap(page));
@@ -73,7 +74,8 @@ static inline int pmd_free(struct mm_struct *mm, pmd_t *pmd)
        return free_pointer_table(pmd);
 }
 
-static inline int __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
+static inline int __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
+                                unsigned long address)
 {
        return free_pointer_table(pmd);
 }
index d4c83f14381652402d35f4b3cb5fb5967f97a6de..48d80d5a666f80b64d5b62e318197b1752e29733 100644 (file)
@@ -32,7 +32,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t page)
         __free_page(page);
 }
 
-#define __pte_free_tlb(tlb,pte)                                \
+#define __pte_free_tlb(tlb,pte,addr)                   \
 do {                                                   \
        pgtable_page_dtor(pte);                         \
        tlb_remove_page((tlb), pte);                    \
@@ -80,7 +80,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page
  * inside the pgd, so has no extra memory associated with it.
  */
 #define pmd_free(mm, x)                        do { } while (0)
-#define __pmd_free_tlb(tlb, x)         do { } while (0)
+#define __pmd_free_tlb(tlb, x, addr)   do { } while (0)
 
 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 {
index 59a757e46ba552ad57736103ea42c3378e83af90..b0131da1387bb131cd9e7666519eedb9591302e3 100644 (file)
@@ -180,7 +180,7 @@ extern inline void pte_free(struct mm_struct *mm, struct page *ptepage)
        __free_page(ptepage);
 }
 
-#define __pte_free_tlb(tlb, pte)       pte_free((tlb)->mm, (pte))
+#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte))
 
 #define pmd_populate(mm, pmd, pte)     (pmd_val(*(pmd)) = page_address(pte))
 
@@ -193,7 +193,7 @@ extern inline void pte_free(struct mm_struct *mm, struct page *ptepage)
  */
 #define pmd_alloc_one(mm, address)     ({ BUG(); ((pmd_t *)2); })
 /*#define pmd_free(mm, x)                      do { } while (0)*/
-#define __pmd_free_tlb(tlb, x)         do { } while (0)
+#define __pmd_free_tlb(tlb, x, addr)   do { } while (0)
 #define pgd_populate(mm, pmd, pte)     BUG()
 
 extern int do_check_pgt_cache(int, int);
index 1275831dda29b65ae90d2046f781a747cb4a3df0..f705735feefcb063eb31d7d35e89e430da081fda 100644 (file)
@@ -98,7 +98,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
        __free_pages(pte, PTE_ORDER);
 }
 
-#define __pte_free_tlb(tlb,pte)                                \
+#define __pte_free_tlb(tlb,pte,address)                        \
 do {                                                   \
        pgtable_page_dtor(pte);                         \
        tlb_remove_page((tlb), pte);                    \
@@ -111,7 +111,7 @@ do {                                                        \
  * inside the pgd, so has no extra memory associated with it.
  */
 #define pmd_free(mm, x)                        do { } while (0)
-#define __pmd_free_tlb(tlb, x)         do { } while (0)
+#define __pmd_free_tlb(tlb, x, addr)   do { } while (0)
 
 #endif
 
@@ -132,7 +132,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
        free_pages((unsigned long)pmd, PMD_ORDER);
 }
 
-#define __pmd_free_tlb(tlb, x) pmd_free((tlb)->mm, x)
+#define __pmd_free_tlb(tlb, x, addr)   pmd_free((tlb)->mm, x)
 
 #endif
 
index ec057e1bd4cf3e72538d4b8c5ae3a83b4c887694..a19f11327cd87ce9dd2c578c1ecf25bf655d083f 100644 (file)
@@ -51,6 +51,6 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte)
 }
 
 
-#define __pte_free_tlb(tlb, pte) tlb_remove_page((tlb), (pte))
+#define __pte_free_tlb(tlb, pte, addr) tlb_remove_page((tlb), (pte))
 
 #endif /* _ASM_PGALLOC_H */
index 383b1db310ee71e18789112587f9ba3e818bdd25..07924903989eed2a8ce2d93cb349332918e1db6e 100644 (file)
@@ -21,7 +21,7 @@ do {  if (!(tlb)->fullmm)     \
 
 #include <asm-generic/tlb.h>
 
-#define __pmd_free_tlb(tlb, pmd)       pmd_free((tlb)->mm, pmd)
-#define __pte_free_tlb(tlb, pte)       pte_free((tlb)->mm, pte)
+#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
+#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
 
 #endif
index 0815eb40acae214b8947a4da26d565bfaf4fb8a7..c9500d666a1de37687346d7b7cfae5dd2f81ac7a 100644 (file)
@@ -16,7 +16,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
  */
 /* #define pmd_alloc_one(mm,address)       ({ BUG(); ((pmd_t *)2); }) */
 #define pmd_free(mm, x)                do { } while (0)
-#define __pmd_free_tlb(tlb,x)          do { } while (0)
+#define __pmd_free_tlb(tlb,x,a)                do { } while (0)
 /* #define pgd_populate(mm, pmd, pte)      BUG() */
 
 #ifndef CONFIG_BOOKE
index afda2bdd860f8bc8eb20dbb915701fface743ce6..e6f069c4f713a7b2c7ce76e3da593693d2fc1d79 100644 (file)
@@ -118,11 +118,11 @@ static inline void pgtable_free(pgtable_free_t pgf)
                kmem_cache_free(pgtable_cache[cachenum], p);
 }
 
-#define __pmd_free_tlb(tlb, pmd)       \
+#define __pmd_free_tlb(tlb, pmd,addr)                \
        pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
                PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
 #ifndef CONFIG_PPC_64K_PAGES
-#define __pud_free_tlb(tlb, pud)       \
+#define __pud_free_tlb(tlb, pud, addr)               \
        pgtable_free_tlb(tlb, pgtable_free_cache(pud, \
                PUD_CACHE_NUM, PUD_TABLE_SIZE-1))
 #endif /* CONFIG_PPC_64K_PAGES */
index 5d8480265a77561bfa48cb2b82eb2fca9b4a4fd2..1730e5e298d61b07df858de4e32078c7ff4201e4 100644 (file)
@@ -38,14 +38,14 @@ static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
 extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
 
 #ifdef CONFIG_SMP
-#define __pte_free_tlb(tlb,ptepage)    \
+#define __pte_free_tlb(tlb,ptepage,address)            \
 do { \
        pgtable_page_dtor(ptepage); \
        pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
-               PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
+                                       PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
 } while (0)
 #else
-#define __pte_free_tlb(tlb, pte      pte_free((tlb)->mm, (pte))
+#define __pte_free_tlb(tlb, pte, address)      pte_free((tlb)->mm, (pte))
 #endif
 
 
index 9920d6a7cf290cc02339da13d337fa6eddbf021a..c46ef2ffa3d95673febbbe9be0d167a5879ec04f 100644 (file)
@@ -305,7 +305,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
 
        pmd = pmd_offset(pud, start);
        pud_clear(pud);
-       pmd_free_tlb(tlb, pmd);
+       pmd_free_tlb(tlb, pmd, start);
 }
 
 static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
@@ -348,7 +348,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
 
        pud = pud_offset(pgd, start);
        pgd_clear(pgd);
-       pud_free_tlb(tlb, pud);
+       pud_free_tlb(tlb, pud, start);
 }
 
 /*
index 3d8a96d39d9d6a9a999c0f493911af00e204b605..81150b0536890b7d664052a33f22d397bb05b852 100644 (file)
@@ -96,7 +96,8 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  * pte_free_tlb frees a pte table and clears the CRSTE for the
  * page table from the tlb.
  */
-static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte)
+static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
+                               unsigned long address)
 {
        if (!tlb->fullmm) {
                tlb->array[tlb->nr_ptes++] = pte;
@@ -113,7 +114,8 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte)
  * as the pgd. pmd_free_tlb checks the asce_limit against 2GB
  * to avoid the double free of the pmd in this case.
  */
-static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
+static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
+                               unsigned long address)
 {
 #ifdef __s390x__
        if (tlb->mm->context.asce_limit <= (1UL << 31))
@@ -134,7 +136,8 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
  * as the pgd. pud_free_tlb checks the asce_limit against 4TB
  * to avoid the double free of the pud in this case.
  */
-static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
+static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
+                               unsigned long address)
 {
 #ifdef __s390x__
        if (tlb->mm->context.asce_limit <= (1UL << 42))
index f9b144049dc983ef16419a415feadb14e2c0374b..8d15314381e04640bbf227ff045b9d9a782f22cf 100644 (file)
@@ -210,7 +210,7 @@ static noinline __init void detect_machine_type(void)
                machine_flags |= MACHINE_FLAG_VM;
 }
 
-static void early_pgm_check_handler(void)
+static __init void early_pgm_check_handler(void)
 {
        unsigned long addr;
        const struct exception_table_entry *fixup;
@@ -222,7 +222,7 @@ static void early_pgm_check_handler(void)
        S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE;
 }
 
-void setup_lowcore_early(void)
+static noinline __init void setup_lowcore_early(void)
 {
        psw_t psw;
 
index 2270730f535451f647aeea1e51abdc36ac8af542..be2cae083406206c949330ee1a267fb7136a969e 100644 (file)
@@ -687,13 +687,14 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 #ifndef CONFIG_64BIT
        if (MACHINE_HAS_IEEE)
                lowcore->extended_save_area_addr = (u32) save_area;
-#else
-       if (vdso_alloc_per_cpu(smp_processor_id(), lowcore))
-               BUG();
 #endif
        set_prefix((u32)(unsigned long) lowcore);
        local_mcck_enable();
        local_irq_enable();
+#ifdef CONFIG_64BIT
+       if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore))
+               BUG();
+#endif
        for_each_possible_cpu(cpu)
                if (cpu != smp_processor_id())
                        smp_create_idle(cpu);
index 79dbfee831ec00cf05e0fef2c8b993f1da911697..49106c6e6f88336ab9e3d641e97ba25b8179df04 100644 (file)
@@ -88,10 +88,17 @@ __kernel_clock_gettime:
        llilh   %r4,0x0100
        sar     %a4,%r4
        lghi    %r4,0
+       epsw    %r5,0
        sacf    512                             /* Magic ectg instruction */
        .insn   ssf,0xc80100000000,__VDSO_ECTG_BASE(4),__VDSO_ECTG_USER(4),4
-       sacf    0
-       sar     %a4,%r2
+       tml     %r5,0x4000
+       jo      11f
+       tml     %r5,0x8000
+       jno     10f
+       sacf    256
+       j       11f
+10:    sacf    0
+11:    sar     %a4,%r2
        algr    %r1,%r0                         /* r1 = cputime as TOD value */
        mghi    %r1,1000                        /* convert to nanoseconds */
        srlg    %r1,%r1,12                      /* r1 = cputime in nanosec */
index e6a4fe9f5f247154eab2ba960bf2c376ffdd9a27..bd1f5c6b0b8c57f12e96ec41f7f8f7538c222855 100644 (file)
@@ -7,24 +7,36 @@
  *
  */
 
+#include <asm/system.h>
 
-/*
- * save CPU registers before creating a hibernation image and before
- * restoring the memory state from it
- */
 void save_processor_state(void)
 {
-       /* implentation contained in the
-        * swsusp_arch_suspend function
+       /* swsusp_arch_suspend() actually saves all cpu register contents.
+        * Machine checks must be disabled since swsusp_arch_suspend() stores
+        * register contents to their lowcore save areas. That's the same
+        * place where register contents on machine checks would be saved.
+        * To avoid register corruption disable machine checks.
+        * We must also disable machine checks in the new psw mask for
+        * program checks, since swsusp_arch_suspend() may generate program
+        * checks. Disabling machine checks for all other new psw masks is
+        * just paranoia.
         */
+       local_mcck_disable();
+       /* Disable lowcore protection */
+       __ctl_clear_bit(0,28);
+       S390_lowcore.external_new_psw.mask &= ~PSW_MASK_MCHECK;
+       S390_lowcore.svc_new_psw.mask &= ~PSW_MASK_MCHECK;
+       S390_lowcore.io_new_psw.mask &= ~PSW_MASK_MCHECK;
+       S390_lowcore.program_new_psw.mask &= ~PSW_MASK_MCHECK;
 }
 
-/*
- * restore the contents of CPU registers
- */
 void restore_processor_state(void)
 {
-       /* implentation contained in the
-        * swsusp_arch_resume function
-        */
+       S390_lowcore.external_new_psw.mask |= PSW_MASK_MCHECK;
+       S390_lowcore.svc_new_psw.mask |= PSW_MASK_MCHECK;
+       S390_lowcore.io_new_psw.mask |= PSW_MASK_MCHECK;
+       S390_lowcore.program_new_psw.mask |= PSW_MASK_MCHECK;
+       /* Enable lowcore protection */
+       __ctl_set_bit(0,28);
+       local_mcck_enable();
 }
index 76d688da32fa3d65ee22d80041daa0baca8565bb..b26df5c5933e1d682f0dfb8f13239269b36a8bf8 100644 (file)
@@ -32,19 +32,14 @@ swsusp_arch_suspend:
        /* Deactivate DAT */
        stnsm   __SF_EMPTY(%r15),0xfb
 
-       /* Switch off lowcore protection */
-       stctg   %c0,%c0,__SF_EMPTY(%r15)
-       ni      __SF_EMPTY+4(%r15),0xef
-       lctlg   %c0,%c0,__SF_EMPTY(%r15)
-
        /* Store prefix register on stack */
        stpx    __SF_EMPTY(%r15)
 
-       /* Setup base register for lowcore (absolute 0) */
-       llgf    %r1,__SF_EMPTY(%r15)
+       /* Save prefix register contents for lowcore */
+       llgf    %r4,__SF_EMPTY(%r15)
 
        /* Get pointer to save area */
-       aghi    %r1,0x1000
+       lghi    %r1,0x1000
 
        /* Store registers */
        mvc     0x318(4,%r1),__SF_EMPTY(%r15)   /* move prefix to lowcore */
@@ -79,17 +74,15 @@ swsusp_arch_suspend:
        xc      __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
        spx     __SF_EMPTY(%r15)
 
-       /* Setup lowcore */
-       brasl   %r14,setup_lowcore_early
+       lghi    %r2,0
+       lghi    %r3,2*PAGE_SIZE
+       lghi    %r5,2*PAGE_SIZE
+1:     mvcle   %r2,%r4,0
+       jo      1b
 
        /* Save image */
        brasl   %r14,swsusp_save
 
-       /* Switch on lowcore protection */
-       stctg   %c0,%c0,__SF_EMPTY(%r15)
-       oi      __SF_EMPTY+4(%r15),0x10
-       lctlg   %c0,%c0,__SF_EMPTY(%r15)
-
        /* Restore prefix register and return */
        lghi    %r1,0x1000
        spx     0x318(%r1)
@@ -117,11 +110,6 @@ swsusp_arch_resume:
        /* Deactivate DAT */
        stnsm   __SF_EMPTY(%r15),0xfb
 
-       /* Switch off lowcore protection */
-       stctg   %c0,%c0,__SF_EMPTY(%r15)
-       ni      __SF_EMPTY+4(%r15),0xef
-       lctlg   %c0,%c0,__SF_EMPTY(%r15)
-
        /* Set prefix page to zero */
        xc      __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
        spx     __SF_EMPTY(%r15)
@@ -175,7 +163,7 @@ swsusp_arch_resume:
        /* Load old stack */
        lg      %r15,0x2f8(%r13)
 
-       /* Pointer to save arae */
+       /* Pointer to save area */
        lghi    %r13,0x1000
 
 #ifdef CONFIG_SMP
@@ -187,11 +175,6 @@ swsusp_arch_resume:
        /* Restore prefix register */
        spx     0x318(%r13)
 
-       /* Switch on lowcore protection */
-       stctg   %c0,%c0,__SF_EMPTY(%r15)
-       oi      __SF_EMPTY+4(%r15),0x10
-       lctlg   %c0,%c0,__SF_EMPTY(%r15)
-
        /* Activate DAT */
        stosm   __SF_EMPTY(%r15),0x04
 
index 84dd2db7104c74f4f57eb4f1c7d0bfc1f4159869..89a482750a5b0f267f6360cfb1cf259db6bde269 100644 (file)
@@ -73,7 +73,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
        quicklist_free_page(QUICK_PT, NULL, pte);
 }
 
-#define __pte_free_tlb(tlb,pte)                                \
+#define __pte_free_tlb(tlb,pte,addr)                   \
 do {                                                   \
        pgtable_page_dtor(pte);                         \
        tlb_remove_page((tlb), (pte));                  \
@@ -85,7 +85,7 @@ do {                                                  \
  */
 
 #define pmd_free(mm, x)                        do { } while (0)
-#define __pmd_free_tlb(tlb,x)          do { } while (0)
+#define __pmd_free_tlb(tlb,x,addr)     do { } while (0)
 
 static inline void check_pgt_cache(void)
 {
index 9c16f737074afb62b552170227fdf4571c66912d..da8fe7ab87283904adac91d7fae1a7bea65af497 100644 (file)
@@ -91,9 +91,9 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
 }
 
 #define tlb_remove_page(tlb,page)      free_page_and_swap_cache(page)
-#define pte_free_tlb(tlb, ptep)                pte_free((tlb)->mm, ptep)
-#define pmd_free_tlb(tlb, pmdp)                pmd_free((tlb)->mm, pmdp)
-#define pud_free_tlb(tlb, pudp)                pud_free((tlb)->mm, pudp)
+#define pte_free_tlb(tlb, ptep, addr)  pte_free((tlb)->mm, ptep)
+#define pmd_free_tlb(tlb, pmdp, addr)  pmd_free((tlb)->mm, pmdp)
+#define pud_free_tlb(tlb, pudp, addr)  pud_free((tlb)->mm, pudp)
 
 #define tlb_migrate_finish(mm)         do { } while (0)
 
index 681582d2696911ce7262bfa821dc1dbff5f41d94..ca2b34456c4b00c5e5854f630286e0cfe192cc96 100644 (file)
@@ -44,8 +44,8 @@ BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
 BTFIXUPDEF_CALL(void, free_pmd_fast, pmd_t *)
 #define free_pmd_fast(pmd)     BTFIXUP_CALL(free_pmd_fast)(pmd)
 
-#define pmd_free(mm, pmd)      free_pmd_fast(pmd)
-#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd)
+#define pmd_free(mm, pmd)              free_pmd_fast(pmd)
+#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
 
 BTFIXUPDEF_CALL(void, pmd_populate, pmd_t *, struct page *)
 #define pmd_populate(MM, PMD, PTE)        BTFIXUP_CALL(pmd_populate)(PMD, PTE)
@@ -62,7 +62,7 @@ BTFIXUPDEF_CALL(void, free_pte_fast, pte_t *)
 #define pte_free_kernel(mm, pte)       BTFIXUP_CALL(free_pte_fast)(pte)
 
 BTFIXUPDEF_CALL(void, pte_free, pgtable_t )
-#define pte_free(mm, pte)      BTFIXUP_CALL(pte_free)(pte)
-#define __pte_free_tlb(tlb, pte)       pte_free((tlb)->mm, pte)
+#define pte_free(mm, pte)              BTFIXUP_CALL(pte_free)(pte)
+#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
 
 #endif /* _SPARC_PGALLOC_H */
index ee38e731bfa635587afd73bac991fa69fad8ebec..dca406b9b6fc5b9b6400732d6abe3a9f1e9e528d 100644 (file)
@@ -100,9 +100,9 @@ static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page)
 }
 
 #define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0)
-#define pte_free_tlb(mp, ptepage) pte_free((mp)->mm, ptepage)
-#define pmd_free_tlb(mp, pmdp) pmd_free((mp)->mm, pmdp)
-#define pud_free_tlb(tlb,pudp) __pud_free_tlb(tlb,pudp)
+#define pte_free_tlb(mp, ptepage, addr) pte_free((mp)->mm, ptepage)
+#define pmd_free_tlb(mp, pmdp, addr) pmd_free((mp)->mm, pmdp)
+#define pud_free_tlb(tlb,pudp, addr) __pud_free_tlb(tlb,pudp,addr)
 
 #define tlb_migrate_finish(mm) do { } while (0)
 #define tlb_start_vma(tlb, vma) do { } while (0)
index 718984359f8c8876d84a2a902fa0b6fa6b4cb933..32c8ce4e15153ee6d42fb72fd7579df0400f9e69 100644 (file)
@@ -40,7 +40,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
        __free_page(pte);
 }
 
-#define __pte_free_tlb(tlb,pte)                                \
+#define __pte_free_tlb(tlb,pte, address)               \
 do {                                                   \
        pgtable_page_dtor(pte);                         \
        tlb_remove_page((tlb),(pte));                   \
@@ -53,7 +53,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
        free_page((unsigned long)pmd);
 }
 
-#define __pmd_free_tlb(tlb,x)   tlb_remove_page((tlb),virt_to_page(x))
+#define __pmd_free_tlb(tlb,x, address)   tlb_remove_page((tlb),virt_to_page(x))
 #endif
 
 #define check_pgt_cache()      do { } while (0)
index 5240fa1c5e0860debec1fcf56daf1e3db30970ec..660caedac9eb70bb3314beb5b1e87a3d3e876bef 100644 (file)
@@ -116,11 +116,11 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
                __tlb_remove_tlb_entry(tlb, ptep, address);     \
        } while (0)
 
-#define pte_free_tlb(tlb, ptep) __pte_free_tlb(tlb, ptep)
+#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
 
-#define pud_free_tlb(tlb, pudp) __pud_free_tlb(tlb, pudp)
+#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
 
-#define pmd_free_tlb(tlb, pmdp) __pmd_free_tlb(tlb, pmdp)
+#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
 
 #define tlb_migrate_finish(mm) do {} while (0)
 
index dd14c54ac718f465ebccf0b486b421f3d3937c6b..0e8c2a0fd9222d4b75793fb664591de22461e7d7 100644 (file)
@@ -46,7 +46,13 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte)
        __free_page(pte);
 }
 
-extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
+extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
+                                 unsigned long address)
+{
+       ___pte_free_tlb(tlb, pte);
+}
 
 static inline void pmd_populate_kernel(struct mm_struct *mm,
                                       pmd_t *pmd, pte_t *pte)
@@ -78,7 +84,13 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
        free_page((unsigned long)pmd);
 }
 
-extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
+extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
+
+static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
+                                 unsigned long adddress)
+{
+       ___pmd_free_tlb(tlb, pmd);
+}
 
 #ifdef CONFIG_X86_PAE
 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
@@ -108,7 +120,14 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
        free_page((unsigned long)pud);
 }
 
-extern void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
+extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
+
+static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
+                                 unsigned long address)
+{
+       ___pud_free_tlb(tlb, pud);
+}
+
 #endif /* PAGETABLE_LEVELS > 3 */
 #endif /* PAGETABLE_LEVELS > 2 */
 
index 8e43bdd45456017cd431c2cf9b4678c5b7e9f65d..af8f9650058cfcf59ab46476ac174ec86e41de49 100644 (file)
@@ -25,7 +25,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
        return pte;
 }
 
-void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
+void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
 {
        pgtable_page_dtor(pte);
        paravirt_release_pte(page_to_pfn(pte));
@@ -33,14 +33,14 @@ void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
 }
 
 #if PAGETABLE_LEVELS > 2
-void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
+void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
 {
        paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
        tlb_remove_page(tlb, virt_to_page(pmd));
 }
 
 #if PAGETABLE_LEVELS > 3
-void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
+void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
 {
        paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
        tlb_remove_page(tlb, virt_to_page(pud));
index 31c220faca02ac9cbb3695f735e0221bdc32104a..0d766f9c1083a59cd4a073cb5da0dfc640a06415 100644 (file)
@@ -42,6 +42,6 @@
 
 #include <asm-generic/tlb.h>
 
-#define __pte_free_tlb(tlb, pte)               pte_free((tlb)->mm, pte)
+#define __pte_free_tlb(tlb, pte, address)      pte_free((tlb)->mm, pte)
 
 #endif /* _XTENSA_TLB_H */
index 529e2ba505c30055440be381d908c52b3113f44d..ed1038164019a1d5935937037c6aae2bd429c666 100644 (file)
@@ -1318,7 +1318,7 @@ static int crypt_iterate_devices(struct dm_target *ti,
 {
        struct crypt_config *cc = ti->private;
 
-       return fn(ti, cc->dev, cc->start, data);
+       return fn(ti, cc->dev, cc->start, ti->len, data);
 }
 
 static struct target_type crypt_target = {
index 4e5b843cd4d77fb388920991eefd5540e2399aef..ebe7381f47c8fd5b1d86cd4e603acad20eebe98f 100644 (file)
@@ -324,12 +324,12 @@ static int delay_iterate_devices(struct dm_target *ti,
        struct delay_c *dc = ti->private;
        int ret = 0;
 
-       ret = fn(ti, dc->dev_read, dc->start_read, data);
+       ret = fn(ti, dc->dev_read, dc->start_read, ti->len, data);
        if (ret)
                goto out;
 
        if (dc->dev_write)
-               ret = fn(ti, dc->dev_write, dc->start_write, data);
+               ret = fn(ti, dc->dev_write, dc->start_write, ti->len, data);
 
 out:
        return ret;
index 9184b6deb8685dc3660e21dd438f6aa8fcd200b3..82f7d6e6b1eab551588ae6b8e917622151c4d4ff 100644 (file)
@@ -139,7 +139,7 @@ static int linear_iterate_devices(struct dm_target *ti,
 {
        struct linear_c *lc = ti->private;
 
-       return fn(ti, lc->dev, lc->start, data);
+       return fn(ti, lc->dev, lc->start, ti->len, data);
 }
 
 static struct target_type linear_target = {
index c70604a208979b4b1f87e4a9e98f10b5ce00036d..6f0d90d4a541962e0ba289e5e2568da51d6445cb 100644 (file)
@@ -1453,7 +1453,7 @@ static int multipath_iterate_devices(struct dm_target *ti,
 
        list_for_each_entry(pg, &m->priority_groups, list) {
                list_for_each_entry(p, &pg->pgpaths, list) {
-                       ret = fn(ti, p->path.dev, ti->begin, data);
+                       ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
                        if (ret)
                                goto out;
                }
index ce8868c768cce1c411d3ba76d4eae5b9011bee64..9726577cde493f2ca21b7fd28386d1d2a31c8786 100644 (file)
@@ -638,6 +638,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
                spin_lock_irq(&ms->lock);
                bio_list_merge(&ms->writes, &requeue);
                spin_unlock_irq(&ms->lock);
+               delayed_wake(ms);
        }
 
        /*
@@ -1292,7 +1293,7 @@ static int mirror_iterate_devices(struct dm_target *ti,
 
        for (i = 0; !ret && i < ms->nr_mirrors; i++)
                ret = fn(ti, ms->mirror[i].dev,
-                        ms->mirror[i].offset, data);
+                        ms->mirror[i].offset, ti->len, data);
 
        return ret;
 }
index b240e85ae39aa4b444135b0a71823e35b03aac49..4e0e5937e42afc6f35274fc8856c3a8190cd1c20 100644 (file)
@@ -320,10 +320,11 @@ static int stripe_iterate_devices(struct dm_target *ti,
        int ret = 0;
        unsigned i = 0;
 
-       do
+       do {
                ret = fn(ti, sc->stripe[i].dev,
-                        sc->stripe[i].physical_start, data);
-       while (!ret && ++i < sc->stripes);
+                        sc->stripe[i].physical_start,
+                        sc->stripe_width, data);
+       } while (!ret && ++i < sc->stripes);
 
        return ret;
 }
index 2cba557d9e61e10dad844dea12f134b4a4ee0d11..d952b3441913a74b15c09f6ba7ea6237bd1e8aa0 100644 (file)
@@ -346,7 +346,7 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
  * If possible, this checks an area of a destination device is valid.
  */
 static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev,
-                               sector_t start, void *data)
+                               sector_t start, sector_t len, void *data)
 {
        struct queue_limits *limits = data;
        struct block_device *bdev = dev->bdev;
@@ -359,7 +359,7 @@ static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev,
        if (!dev_size)
                return 1;
 
-       if ((start >= dev_size) || (start + ti->len > dev_size)) {
+       if ((start >= dev_size) || (start + len > dev_size)) {
                DMWARN("%s: %s too small for target",
                       dm_device_name(ti->table->md), bdevname(bdev, b));
                return 0;
@@ -377,11 +377,11 @@ static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev,
                return 0;
        }
 
-       if (ti->len & (logical_block_size_sectors - 1)) {
+       if (len & (logical_block_size_sectors - 1)) {
                DMWARN("%s: len=%llu not aligned to h/w "
                       "logical block size %hu of %s",
                       dm_device_name(ti->table->md),
-                      (unsigned long long)ti->len,
+                      (unsigned long long)len,
                       limits->logical_block_size, bdevname(bdev, b));
                return 0;
        }
@@ -482,7 +482,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
 
 int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
-                        sector_t start, void *data)
+                        sector_t start, sector_t len, void *data)
 {
        struct queue_limits *limits = data;
        struct block_device *bdev = dev->bdev;
@@ -830,11 +830,6 @@ unsigned dm_table_get_type(struct dm_table *t)
        return t->type;
 }
 
-bool dm_table_bio_based(struct dm_table *t)
-{
-       return dm_table_get_type(t) == DM_TYPE_BIO_BASED;
-}
-
 bool dm_table_request_based(struct dm_table *t)
 {
        return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED;
index 9acd54a5cffb3b875c699332c0bfaf597be3d19a..8a311ea0d441faad7b5fdf2d0ce9a34a3d01983d 100644 (file)
@@ -2203,16 +2203,6 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table)
                goto out;
        }
 
-       /*
-        * It is enought that blk_queue_ordered() is called only once when
-        * the first bio-based table is bound.
-        *
-        * This setting should be moved to alloc_dev() when request-based dm
-        * supports barrier.
-        */
-       if (!md->map && dm_table_bio_based(table))
-               blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
-
        __unbind(md);
        r = __bind(md, table, &limits);
 
index 23278ae80f08dfc90c8458a3b6a700e4a2a87f8b..a7663eba17e2920ba84d5a7f41fe5fa015964590 100644 (file)
@@ -61,7 +61,6 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits);
 int dm_table_any_busy_target(struct dm_table *t);
 int dm_table_set_type(struct dm_table *t);
 unsigned dm_table_get_type(struct dm_table *t);
-bool dm_table_bio_based(struct dm_table *t);
 bool dm_table_request_based(struct dm_table *t);
 int dm_table_alloc_md_mempools(struct dm_table *t);
 void dm_table_free_md_mempools(struct dm_table *t);
index 727a809636d8a63ce93b1271175c3f520ac4455b..ed3dcdea7fe10cb7b521f7ccdb78a1bb5bfc86cc 100644 (file)
@@ -1145,12 +1145,17 @@ ap_config_timeout(unsigned long ptr)
  */
 static inline void ap_schedule_poll_timer(void)
 {
+       ktime_t hr_time;
        if (ap_using_interrupts() || ap_suspend_flag)
                return;
        if (hrtimer_is_queued(&ap_poll_timer))
                return;
-       hrtimer_start(&ap_poll_timer, ktime_set(0, poll_timeout),
-                     HRTIMER_MODE_ABS);
+       if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
+               hr_time = ktime_set(0, poll_timeout);
+               hrtimer_forward_now(&ap_poll_timer, hr_time);
+               hrtimer_restart(&ap_poll_timer);
+       }
+       return;
 }
 
 /**
index 9bb5c8750736e73813ac2f5e50edae28932016e3..fc44d316d0bb4702eaa8d083cf9ed0eb746304ca 100644 (file)
@@ -2452,10 +2452,10 @@ try_mount_again:
                tcon->local_lease = volume_info->local_lease;
        }
        if (pSesInfo) {
-               if (pSesInfo->capabilities & CAP_LARGE_FILES) {
-                       sb->s_maxbytes = (u64) 1 << 63;
-               else
-                       sb->s_maxbytes = (u64) 1 << 31; /* 2 GB */
+               if (pSesInfo->capabilities & CAP_LARGE_FILES)
+                       sb->s_maxbytes = MAX_LFS_FILESIZE;
+               else
+                       sb->s_maxbytes = MAX_NON_LFS;
        }
 
        /* BB FIXME fix time_gran to be larger for LANMAN sessions */
index 18afe57b24611748e35377e95de5130c53a5a625..82d83839655eed195fa91dd10120e048309bd287 100644 (file)
@@ -212,7 +212,7 @@ cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info,
  * junction to the new submount (ie to setup the fake directory
  * which represents a DFS referral).
  */
-void
+static void
 cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb)
 {
        struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
@@ -388,7 +388,7 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
 }
 
 /* Fill a cifs_fattr struct with info from FILE_ALL_INFO */
-void
+static void
 cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
                       struct cifs_sb_info *cifs_sb, bool adjust_tz)
 {
@@ -513,9 +513,12 @@ int cifs_get_inode_info(struct inode **pinode,
                                        cifs_sb->mnt_cifs_flags &
                                                CIFS_MOUNT_MAP_SPECIAL_CHR);
                        if (rc1) {
-                               /* BB EOPNOSUPP disable SERVER_INUM? */
                                cFYI(1, ("GetSrvInodeNum rc %d", rc1));
                                fattr.cf_uniqueid = iunique(sb, ROOT_I);
+                               /* disable serverino if call not supported */
+                               if (rc1 == -EINVAL)
+                                       cifs_sb->mnt_cifs_flags &=
+                                                       ~CIFS_MOUNT_SERVER_INUM;
                        }
                } else {
                        fattr.cf_uniqueid = iunique(sb, ROOT_I);
index 3d724a95882f618672a45ee8f945d9164f80b89f..373fa90c796a0821c23cd441c0a238a1a52cfb69 100644 (file)
@@ -130,8 +130,7 @@ static int ext3_readdir(struct file * filp,
                struct buffer_head *bh = NULL;
 
                map_bh.b_state = 0;
-               err = ext3_get_blocks_handle(NULL, inode, blk, 1,
-                                               &map_bh, 0, 0);
+               err = ext3_get_blocks_handle(NULL, inode, blk, 1, &map_bh, 0);
                if (err > 0) {
                        pgoff_t index = map_bh.b_blocknr >>
                                        (PAGE_CACHE_SHIFT - inode->i_blkbits);
index 5f51fed5c750870b8c575fdf7dd8e877fe986bfb..b49908a167ae09d366f76ddcef57432f338b5735 100644 (file)
@@ -788,7 +788,7 @@ err_out:
 int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
                sector_t iblock, unsigned long maxblocks,
                struct buffer_head *bh_result,
-               int create, int extend_disksize)
+               int create)
 {
        int err = -EIO;
        int offsets[4];
@@ -911,13 +911,6 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
        if (!err)
                err = ext3_splice_branch(handle, inode, iblock,
                                        partial, indirect_blks, count);
-       /*
-        * i_disksize growing is protected by truncate_mutex.  Don't forget to
-        * protect it if you're about to implement concurrent
-        * ext3_get_block() -bzzz
-       */
-       if (!err && extend_disksize && inode->i_size > ei->i_disksize)
-               ei->i_disksize = inode->i_size;
        mutex_unlock(&ei->truncate_mutex);
        if (err)
                goto cleanup;
@@ -972,7 +965,7 @@ static int ext3_get_block(struct inode *inode, sector_t iblock,
        }
 
        ret = ext3_get_blocks_handle(handle, inode, iblock,
-                                       max_blocks, bh_result, create, 0);
+                                       max_blocks, bh_result, create);
        if (ret > 0) {
                bh_result->b_size = (ret << inode->i_blkbits);
                ret = 0;
@@ -1005,7 +998,7 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
        dummy.b_blocknr = -1000;
        buffer_trace_init(&dummy.b_history);
        err = ext3_get_blocks_handle(handle, inode, block, 1,
-                                       &dummy, create, 1);
+                                       &dummy, create);
        /*
         * ext3_get_blocks_handle() returns number of blocks
         * mapped. 0 in case of a HOLE.
@@ -1193,15 +1186,16 @@ write_begin_failed:
                 * i_size_read because we hold i_mutex.
                 *
                 * Add inode to orphan list in case we crash before truncate
-                * finishes.
+                * finishes. Do this only if ext3_can_truncate() agrees so
+                * that orphan processing code is happy.
                 */
-               if (pos + len > inode->i_size)
+               if (pos + len > inode->i_size && ext3_can_truncate(inode))
                        ext3_orphan_add(handle, inode);
                ext3_journal_stop(handle);
                unlock_page(page);
                page_cache_release(page);
                if (pos + len > inode->i_size)
-                       vmtruncate(inode, inode->i_size);
+                       ext3_truncate(inode);
        }
        if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
                goto retry;
@@ -1287,7 +1281,7 @@ static int ext3_ordered_write_end(struct file *file,
         * There may be allocated blocks outside of i_size because
         * we failed to copy some data. Prepare for truncate.
         */
-       if (pos + len > inode->i_size)
+       if (pos + len > inode->i_size && ext3_can_truncate(inode))
                ext3_orphan_add(handle, inode);
        ret2 = ext3_journal_stop(handle);
        if (!ret)
@@ -1296,7 +1290,7 @@ static int ext3_ordered_write_end(struct file *file,
        page_cache_release(page);
 
        if (pos + len > inode->i_size)
-               vmtruncate(inode, inode->i_size);
+               ext3_truncate(inode);
        return ret ? ret : copied;
 }
 
@@ -1315,14 +1309,14 @@ static int ext3_writeback_write_end(struct file *file,
         * There may be allocated blocks outside of i_size because
         * we failed to copy some data. Prepare for truncate.
         */
-       if (pos + len > inode->i_size)
+       if (pos + len > inode->i_size && ext3_can_truncate(inode))
                ext3_orphan_add(handle, inode);
        ret = ext3_journal_stop(handle);
        unlock_page(page);
        page_cache_release(page);
 
        if (pos + len > inode->i_size)
-               vmtruncate(inode, inode->i_size);
+               ext3_truncate(inode);
        return ret ? ret : copied;
 }
 
@@ -1358,7 +1352,7 @@ static int ext3_journalled_write_end(struct file *file,
         * There may be allocated blocks outside of i_size because
         * we failed to copy some data. Prepare for truncate.
         */
-       if (pos + len > inode->i_size)
+       if (pos + len > inode->i_size && ext3_can_truncate(inode))
                ext3_orphan_add(handle, inode);
        EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
        if (inode->i_size > EXT3_I(inode)->i_disksize) {
@@ -1375,7 +1369,7 @@ static int ext3_journalled_write_end(struct file *file,
        page_cache_release(page);
 
        if (pos + len > inode->i_size)
-               vmtruncate(inode, inode->i_size);
+               ext3_truncate(inode);
        return ret ? ret : copied;
 }
 
index 737f7246a4b5ddc750b4a5198928f4b5afacc193..f96f85092d1cf2bffb671d147470666118b4bc7d 100644 (file)
@@ -287,6 +287,7 @@ int journal_write_metadata_buffer(transaction_t *transaction,
        struct page *new_page;
        unsigned int new_offset;
        struct buffer_head *bh_in = jh2bh(jh_in);
+       journal_t *journal = transaction->t_journal;
 
        /*
         * The buffer really shouldn't be locked: only the current committing
@@ -300,6 +301,11 @@ int journal_write_metadata_buffer(transaction_t *transaction,
        J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in));
 
        new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL);
+       /* keep subsequent assertions sane */
+       new_bh->b_state = 0;
+       init_buffer(new_bh, NULL, NULL);
+       atomic_set(&new_bh->b_count, 1);
+       new_jh = journal_add_journal_head(new_bh);      /* This sleeps */
 
        /*
         * If a new transaction has already done a buffer copy-out, then
@@ -361,14 +367,6 @@ repeat:
                kunmap_atomic(mapped_data, KM_USER0);
        }
 
-       /* keep subsequent assertions sane */
-       new_bh->b_state = 0;
-       init_buffer(new_bh, NULL, NULL);
-       atomic_set(&new_bh->b_count, 1);
-       jbd_unlock_bh_state(bh_in);
-
-       new_jh = journal_add_journal_head(new_bh);      /* This sleeps */
-
        set_bh_page(new_bh, new_page, new_offset);
        new_jh->b_transaction = NULL;
        new_bh->b_size = jh2bh(jh_in)->b_size;
@@ -385,7 +383,11 @@ repeat:
         * copying is moved to the transaction's shadow queue.
         */
        JBUFFER_TRACE(jh_in, "file as BJ_Shadow");
-       journal_file_buffer(jh_in, transaction, BJ_Shadow);
+       spin_lock(&journal->j_list_lock);
+       __journal_file_buffer(jh_in, transaction, BJ_Shadow);
+       spin_unlock(&journal->j_list_lock);
+       jbd_unlock_bh_state(bh_in);
+
        JBUFFER_TRACE(new_jh, "file as BJ_IO");
        journal_file_buffer(new_jh, transaction, BJ_IO);
 
@@ -848,6 +850,12 @@ static int journal_reset(journal_t *journal)
 
        first = be32_to_cpu(sb->s_first);
        last = be32_to_cpu(sb->s_maxlen);
+       if (first + JFS_MIN_JOURNAL_BLOCKS > last + 1) {
+               printk(KERN_ERR "JBD: Journal too short (blocks %lu-%lu).\n",
+                      first, last);
+               journal_fail_superblock(journal);
+               return -EINVAL;
+       }
 
        journal->j_first = first;
        journal->j_last = last;
index 73242ba7c7b1315d9cea08de1f85f1189293dd22..c03ac11f74be1313b8f7d09a24eba88f1d27ceed 100644 (file)
@@ -489,34 +489,15 @@ void journal_unlock_updates (journal_t *journal)
        wake_up(&journal->j_wait_transaction_locked);
 }
 
-/*
- * Report any unexpected dirty buffers which turn up.  Normally those
- * indicate an error, but they can occur if the user is running (say)
- * tune2fs to modify the live filesystem, so we need the option of
- * continuing as gracefully as possible.  #
- *
- * The caller should already hold the journal lock and
- * j_list_lock spinlock: most callers will need those anyway
- * in order to probe the buffer's journaling state safely.
- */
-static void jbd_unexpected_dirty_buffer(struct journal_head *jh)
+static void warn_dirty_buffer(struct buffer_head *bh)
 {
-       int jlist;
-
-       /* If this buffer is one which might reasonably be dirty
-        * --- ie. data, or not part of this journal --- then
-        * we're OK to leave it alone, but otherwise we need to
-        * move the dirty bit to the journal's own internal
-        * JBDDirty bit. */
-       jlist = jh->b_jlist;
+       char b[BDEVNAME_SIZE];
 
-       if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
-           jlist == BJ_Shadow || jlist == BJ_Forget) {
-               struct buffer_head *bh = jh2bh(jh);
-
-               if (test_clear_buffer_dirty(bh))
-                       set_buffer_jbddirty(bh);
-       }
+       printk(KERN_WARNING
+              "JBD: Spotted dirty metadata buffer (dev = %s, blocknr = %llu). "
+              "There's a risk of filesystem corruption in case of system "
+              "crash.\n",
+              bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr);
 }
 
 /*
@@ -583,14 +564,16 @@ repeat:
                        if (jh->b_next_transaction)
                                J_ASSERT_JH(jh, jh->b_next_transaction ==
                                                        transaction);
+                       warn_dirty_buffer(bh);
                }
                /*
                 * In any case we need to clean the dirty flag and we must
                 * do it under the buffer lock to be sure we don't race
                 * with running write-out.
                 */
-               JBUFFER_TRACE(jh, "Unexpected dirty buffer");
-               jbd_unexpected_dirty_buffer(jh);
+               JBUFFER_TRACE(jh, "Journalling dirty buffer");
+               clear_buffer_dirty(bh);
+               set_buffer_jbddirty(bh);
        }
 
        unlock_buffer(bh);
@@ -826,6 +809,15 @@ int journal_get_create_access(handle_t *handle, struct buffer_head *bh)
        J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
 
        if (jh->b_transaction == NULL) {
+               /*
+                * Previous journal_forget() could have left the buffer
+                * with jbddirty bit set because it was being committed. When
+                * the commit finished, we've filed the buffer for
+                * checkpointing and marked it dirty. Now we are reallocating
+                * the buffer so the transaction freeing it must have
+                * committed and so it's safe to clear the dirty bit.
+                */
+               clear_buffer_dirty(jh2bh(jh));
                jh->b_transaction = transaction;
 
                /* first access by this transaction */
@@ -1782,8 +1774,13 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
 
        if (jh->b_cp_transaction) {
                JBUFFER_TRACE(jh, "on running+cp transaction");
+               /*
+                * We don't want to write the buffer anymore, clear the
+                * bit so that we don't confuse checks in
+                * __journal_file_buffer
+                */
+               clear_buffer_dirty(bh);
                __journal_file_buffer(jh, transaction, BJ_Forget);
-               clear_buffer_jbddirty(bh);
                may_free = 0;
        } else {
                JBUFFER_TRACE(jh, "on running transaction");
@@ -2041,12 +2038,17 @@ void __journal_file_buffer(struct journal_head *jh,
        if (jh->b_transaction && jh->b_jlist == jlist)
                return;
 
-       /* The following list of buffer states needs to be consistent
-        * with __jbd_unexpected_dirty_buffer()'s handling of dirty
-        * state. */
-
        if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
            jlist == BJ_Shadow || jlist == BJ_Forget) {
+               /*
+                * For metadata buffers, we track dirty bit in buffer_jbddirty
+                * instead of buffer_dirty. We should not see a dirty bit set
+                * here because we clear it in do_get_write_access but e.g.
+                * tune2fs can modify the sb and set the dirty bit at any time
+                * so we try to gracefully handle that.
+                */
+               if (buffer_dirty(bh))
+                       warn_dirty_buffer(bh);
                if (test_clear_buffer_dirty(bh) ||
                    test_clear_buffer_jbddirty(bh))
                        was_dirty = 1;
index 91fa3ad6e8c2dc01144bcf1ab56c250a9c9e54bb..a29c7c3e3fb81a58148c93e0dd56e858bac04939 100644 (file)
@@ -67,10 +67,8 @@ static struct posix_acl *jfs_get_acl(struct inode *inode, int type)
                acl = posix_acl_from_xattr(value, size);
        }
        kfree(value);
-       if (!IS_ERR(acl)) {
+       if (!IS_ERR(acl))
                set_cached_acl(inode, type, acl);
-               posix_acl_release(acl);
-       }
        return acl;
 }
 
index 9d40e879f99ea968e18e07f142519c5839392003..77ff547730af53f8da5850f05ee70866b720f4b9 100644 (file)
@@ -27,9 +27,9 @@
 #define pud_page_vaddr(pud)            pgd_page_vaddr(pud)
 
 #undef pud_free_tlb
-#define pud_free_tlb(tlb, x)            do { } while (0)
+#define pud_free_tlb(tlb, x, addr)     do { } while (0)
 #define pud_free(mm, x)                        do { } while (0)
-#define __pud_free_tlb(tlb, x)         do { } while (0)
+#define __pud_free_tlb(tlb, x, addr)   do { } while (0)
 
 #undef  pud_addr_end
 #define pud_addr_end(addr, end)                (end)
index a7cdc48e8b78703156456f8eb2472251e66c8d78..725612b793ce1d55462de9ab2158a795ab3b4faa 100644 (file)
@@ -59,7 +59,7 @@ static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address)
 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 {
 }
-#define __pmd_free_tlb(tlb, x)                 do { } while (0)
+#define __pmd_free_tlb(tlb, x, a)              do { } while (0)
 
 #undef  pmd_addr_end
 #define pmd_addr_end(addr, end)                        (end)
index 87cf449a6df380e6be3a11fd24648ef3aefe588b..810431d8351b16c14c3d1954ddc2890866c41658 100644 (file)
@@ -52,7 +52,7 @@ static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address)
  */
 #define pud_alloc_one(mm, address)             NULL
 #define pud_free(mm, x)                                do { } while (0)
-#define __pud_free_tlb(tlb, x)                 do { } while (0)
+#define __pud_free_tlb(tlb, x, a)              do { } while (0)
 
 #undef  pud_addr_end
 #define pud_addr_end(addr, end)                        (end)
index f490e43a90b90a09e1cfe78a9f1215b66d3df74e..e43f9766259f301b2087e83d70a1e563e568edbd 100644 (file)
@@ -123,24 +123,24 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
                __tlb_remove_tlb_entry(tlb, ptep, address);     \
        } while (0)
 
-#define pte_free_tlb(tlb, ptep)                                        \
+#define pte_free_tlb(tlb, ptep, address)                       \
        do {                                                    \
                tlb->need_flush = 1;                            \
-               __pte_free_tlb(tlb, ptep);                      \
+               __pte_free_tlb(tlb, ptep, address);             \
        } while (0)
 
 #ifndef __ARCH_HAS_4LEVEL_HACK
-#define pud_free_tlb(tlb, pudp)                                        \
+#define pud_free_tlb(tlb, pudp, address)                       \
        do {                                                    \
                tlb->need_flush = 1;                            \
-               __pud_free_tlb(tlb, pudp);                      \
+               __pud_free_tlb(tlb, pudp, address);             \
        } while (0)
 #endif
 
-#define pmd_free_tlb(tlb, pmdp)                                        \
+#define pmd_free_tlb(tlb, pmdp, address)                       \
        do {                                                    \
                tlb->need_flush = 1;                            \
-               __pmd_free_tlb(tlb, pmdp);                      \
+               __pmd_free_tlb(tlb, pmdp, address);             \
        } while (0)
 
 #define tlb_migrate_finish(mm) do {} while (0)
index 0d6310657f32a41cc6f5af7150f13db9c35a51e7..655e7721580a450e93c8467316244a829cd5eace 100644 (file)
@@ -84,7 +84,7 @@ typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
 
 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
                                           struct dm_dev *dev,
-                                          sector_t physical_start,
+                                          sector_t start, sector_t len,
                                           void *data);
 
 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
@@ -104,7 +104,7 @@ void dm_error(const char *message);
  * Combine device limits.
  */
 int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
-                        sector_t start, void *data);
+                        sector_t start, sector_t len, void *data);
 
 struct dm_dev {
        struct block_device *bdev;
index 634a5e5aba3e219844fbffadce72b0361c794203..7499b36677985240f73743c9993c0ceadd16bc5b 100644 (file)
@@ -874,7 +874,7 @@ struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
 struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
 int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
        sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result,
-       int create, int extend_disksize);
+       int create);
 
 extern struct inode *ext3_iget(struct super_block *, unsigned long);
 extern int  ext3_write_inode (struct inode *, int);
index 9b1a7de26979a7d02abc5f2e68025a5acea0bdfd..eb8751aa0418d448f2b46d364074d39bf622ec6b 100644 (file)
@@ -180,10 +180,12 @@ EXPORT_SYMBOL(kthread_bind);
  * @k: thread created by kthread_create().
  *
  * Sets kthread_should_stop() for @k to return true, wakes it, and
- * waits for it to exit.  Your threadfn() must not call do_exit()
- * itself if you use this function!  This can also be called after
- * kthread_create() instead of calling wake_up_process(): the thread
- * will exit without calling threadfn().
+ * waits for it to exit. This can also be called after kthread_create()
+ * instead of calling wake_up_process(): the thread will exit without
+ * calling threadfn().
+ *
+ * If threadfn() may call do_exit() itself, the caller must ensure
+ * task_struct can't go away.
  *
  * Returns the result of threadfn(), or %-EINTR if wake_up_process()
  * was never called.
index 0a049837008e6b77d416659dfd0588fed37b00b5..fd141140355889abd13630c663e66a2e87267194 100644 (file)
@@ -1068,7 +1068,8 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
 {
        const unsigned long *crc;
 
-       if (!find_symbol("module_layout", NULL, &crc, true, false))
+       if (!find_symbol(MODULE_SYMBOL_PREFIX "module_layout", NULL,
+                        &crc, true, false))
                BUG();
        return check_version(sechdrs, versindex, "module_layout", mod, crc);
 }
index 65216194eb8daec69843ef672d73725eeb37e0ca..aede2ce3aba4fdf1159946cffc7b6acaf8b534d3 100644 (file)
@@ -135,11 +135,12 @@ void pmd_clear_bad(pmd_t *pmd)
  * Note: this doesn't free the actual pages themselves. That
  * has been handled earlier when unmapping all the memory regions.
  */
-static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
+static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
+                          unsigned long addr)
 {
        pgtable_t token = pmd_pgtable(*pmd);
        pmd_clear(pmd);
-       pte_free_tlb(tlb, token);
+       pte_free_tlb(tlb, token, addr);
        tlb->mm->nr_ptes--;
 }
 
@@ -157,7 +158,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
                next = pmd_addr_end(addr, end);
                if (pmd_none_or_clear_bad(pmd))
                        continue;
-               free_pte_range(tlb, pmd);
+               free_pte_range(tlb, pmd, addr);
        } while (pmd++, addr = next, addr != end);
 
        start &= PUD_MASK;
@@ -173,7 +174,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
 
        pmd = pmd_offset(pud, start);
        pud_clear(pud);
-       pmd_free_tlb(tlb, pmd);
+       pmd_free_tlb(tlb, pmd, start);
 }
 
 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
@@ -206,7 +207,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
 
        pud = pud_offset(pgd, start);
        pgd_clear(pgd);
-       pud_free_tlb(tlb, pud);
+       pud_free_tlb(tlb, pud, start);
 }
 
 /*